Merge branch 'drm-next-5.3' of git://people.freedesktop.org/~agd5f/linux into drm-next

amdgpu:
- Revert timeline support until KHR is ready
- Various driver reload fixes
- Refactor clock handling in DC
- Aux fixes for DC
- Bandwidth calculation updates for DC
- Fix documentation due to file rename
- RAS fix
- Fix race in late_init

ttm:
- Allow for better forward progress when there is heavy memory contention

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606032537.6939-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2019-06-06 13:47:40 +10:00
commit 396f9acaff
93 changed files with 3083 additions and 2120 deletions

View File

@ -37,10 +37,10 @@ Buffer Objects
PRIME Buffer Sharing
--------------------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
:doc: PRIME Buffer Sharing
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
:internal:
MMU Notifier

View File

@ -922,7 +922,7 @@ struct amdgpu_device {
const struct amdgpu_df_funcs *df_funcs;
/* delayed work_func for deferring clockgating during resume */
struct delayed_work late_init_work;
struct delayed_work delayed_init_work;
struct amdgpu_virt virt;
/* firmware VRAM reservation */

View File

@ -585,7 +585,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
false, &ctx->duplicates, true);
if (!ret)
ctx->reserved = true;
else {
@ -658,7 +658,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
false, &ctx->duplicates, true);
if (!ret)
ctx->reserved = true;
else
@ -1808,7 +1808,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
/* Reserve all BOs and page tables for validation */
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
true);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
@ -2014,7 +2015,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
false, &duplicate_save);
false, &duplicate_save, true);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
@ -2124,7 +2125,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
if (!*mem)
return -EINVAL;
return -ENOMEM;
mutex_init(&(*mem)->lock);
(*mem)->bo = amdgpu_bo_ref(gws_bo);

View File

@ -648,7 +648,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
&duplicates, true);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");

View File

@ -79,7 +79,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;

View File

@ -1869,6 +1869,43 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
return 0;
}
static int amdgpu_device_enable_mgpu_fan_boost(void)
{
struct amdgpu_gpu_instance *gpu_ins;
struct amdgpu_device *adev;
int i, ret = 0;
mutex_lock(&mgpu_info.mutex);
/*
* MGPU fan boost feature should be enabled
* only when there are two or more dGPUs in
* the system
*/
if (mgpu_info.num_dgpu < 2)
goto out;
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
if (!(adev->flags & AMD_IS_APU) &&
!gpu_ins->mgpu_fan_enabled &&
adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
break;
gpu_ins->mgpu_fan_enabled = 1;
}
}
out:
mutex_unlock(&mgpu_info.mutex);
return ret;
}
/**
* amdgpu_device_ip_late_init - run late init for hardware IPs
*
@ -1902,11 +1939,15 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
queue_delayed_work(system_wq, &adev->late_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
amdgpu_device_fill_reset_magic(adev);
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
/* set to low pstate by default */
amdgpu_xgmi_set_pstate(adev, 0);
return 0;
}
@ -2005,65 +2046,20 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
return 0;
}
static int amdgpu_device_enable_mgpu_fan_boost(void)
{
struct amdgpu_gpu_instance *gpu_ins;
struct amdgpu_device *adev;
int i, ret = 0;
mutex_lock(&mgpu_info.mutex);
/*
* MGPU fan boost feature should be enabled
* only when there are two or more dGPUs in
* the system
*/
if (mgpu_info.num_dgpu < 2)
goto out;
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
if (!(adev->flags & AMD_IS_APU) &&
!gpu_ins->mgpu_fan_enabled &&
adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
break;
gpu_ins->mgpu_fan_enabled = 1;
}
}
out:
mutex_unlock(&mgpu_info.mutex);
return ret;
}
/**
* amdgpu_device_ip_late_init_func_handler - work handler for ib test
* amdgpu_device_delayed_init_work_handler - work handler for IB tests
*
* @work: work_struct.
*/
static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, late_init_work.work);
container_of(work, struct amdgpu_device, delayed_init_work.work);
int r;
r = amdgpu_ib_ring_tests(adev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
/*set to low pstate by default */
amdgpu_xgmi_set_pstate(adev, 0);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@ -2535,8 +2531,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->ring_lru_list);
spin_lock_init(&adev->ring_lru_list_lock);
INIT_DELAYED_WORK(&adev->late_init_work,
amdgpu_device_ip_late_init_func_handler);
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
@ -2749,6 +2745,9 @@ fence_driver_init:
/* must succeed. */
amdgpu_ras_resume(adev);
queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
if (r) {
dev_err(adev->dev, "Could not create pcie_replay_count");
@ -2796,7 +2795,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
}
adev->accel_working = false;
cancel_delayed_work_sync(&adev->late_init_work);
cancel_delayed_work_sync(&adev->delayed_init_work);
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
@ -2859,7 +2858,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (fbcon)
amdgpu_fbdev_set_suspend(adev, 1);
cancel_delayed_work_sync(&adev->late_init_work);
cancel_delayed_work_sync(&adev->delayed_init_work);
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
@ -2979,6 +2978,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
return r;
queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
if (!amdgpu_device_has_dc_support(adev)) {
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@ -3002,7 +3004,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
return r;
/* Make sure IB tests flushed */
flush_delayed_work(&adev->late_init_work);
flush_delayed_work(&adev->delayed_init_work);
/* blat the mode back in */
if (fbcon) {
@ -3593,6 +3595,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dev_info(adev->dev, "GPU reset begin!\n");
cancel_delayed_work_sync(&adev->delayed_init_work);
hive = amdgpu_get_xgmi_hive(adev, false);
/*

View File

@ -1307,8 +1307,7 @@ static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE,
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,

View File

@ -171,7 +171,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, true);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@ -608,7 +608,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, true);
if (r)
goto error_unref;

View File

@ -974,7 +974,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
int r, pasid;
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->late_init_work);
flush_delayed_work(&adev->delayed_init_work);
file_priv->driver_priv = NULL;

View File

@ -128,6 +128,12 @@ const char *ras_block_string[] = {
#define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr);
static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
struct amdgpu_bo **bo_ptr);
static void amdgpu_ras_self_test(struct amdgpu_device *adev)
{
/* TODO */
@ -307,6 +313,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
struct ras_debug_if data;
struct amdgpu_bo *bo;
int ret = 0;
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
@ -324,7 +331,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
ret = amdgpu_ras_reserve_vram(adev,
data.inject.address, PAGE_SIZE, &bo);
/* This address might be used already on failure. In fact we can
* perform an injection in such case.
*/
if (ret)
break;
data.inject.address = amdgpu_bo_gpu_offset(bo);
ret = amdgpu_ras_error_inject(adev, &data.inject);
amdgpu_ras_release_vram(adev, &bo);
break;
default:
ret = -EINVAL;

View File

@ -1794,9 +1794,7 @@ static int gfx_v9_0_sw_fini(void *handle)
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
if (adev->asic_type == CHIP_RAVEN) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
@ -1929,17 +1927,17 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
} else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
(adev->gmc.private_aperture_start >> 48));
tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
(adev->gmc.shared_aperture_start >> 48));
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
}
}
soc15_grbm_select(adev, 0, 0, 0, 0);
@ -3048,7 +3046,7 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
(adev->doorbell_index.userqueue_end * 2) << 2);
}
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */

View File

@ -146,12 +146,12 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL, tmp);
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL, tmp);
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL2);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
@ -163,12 +163,12 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL4, tmp);
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL4, tmp);
}
static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)

View File

@ -1575,15 +1575,15 @@ static int kfd_ioctl_alloc_queue_gws(struct file *filep,
struct kfd_dev *dev;
if (!hws_gws_support)
return -EINVAL;
return -ENODEV;
dev = kfd_device_by_id(args->gpu_id);
if (!dev) {
pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
return -EINVAL;
return -ENODEV;
}
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
return -EINVAL;
return -ENODEV;
mutex_lock(&p->mutex);
retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);

View File

@ -103,7 +103,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
/* Only allow one queue per process can have GWS assigned */
if (gws && pdd->qpd.num_gws)
return -EINVAL;
return -EBUSY;
if (!gws && pdd->qpd.num_gws == 0)
return -EINVAL;

View File

@ -28,6 +28,7 @@ AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/clk_mgr
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color

View File

@ -665,13 +665,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA20:
return 0;
case CHIP_RAVEN:
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else
#endif
return 0;
break;
default:
@ -3675,6 +3673,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
return ret;
}
static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
}
static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@ -3703,6 +3708,11 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
if (aconnector->i2c) {
i2c_del_adapter(&aconnector->i2c->base);
kfree(aconnector->i2c);
}
kfree(connector);
}
@ -3760,7 +3770,8 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
.atomic_get_property = amdgpu_dm_connector_atomic_get_property
.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
.early_unregister = amdgpu_dm_connector_unregister
};
static int get_modes(struct drm_connector *connector)
@ -4217,6 +4228,9 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct amdgpu_device *adev;
struct amdgpu_bo *rbo;
struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
struct list_head list;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
uint64_t tiling_flags;
uint32_t domain;
int r;
@ -4233,9 +4247,17 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
obj = new_state->fb->obj[0];
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r != 0))
INIT_LIST_HEAD(&list);
tv.bo = &rbo->tbo;
tv.num_shared = 1;
list_add(&tv.head, &list);
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
}
if (plane->type != DRM_PLANE_TYPE_CURSOR)
domain = amdgpu_display_supported_domains(adev);
@ -4246,21 +4268,21 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
amdgpu_bo_unreserve(rbo);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
r = amdgpu_ttm_alloc_gart(&rbo->tbo);
if (unlikely(r != 0)) {
amdgpu_bo_unpin(rbo);
amdgpu_bo_unreserve(rbo);
ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("%p bind failed\n", rbo);
return r;
}
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
amdgpu_bo_unreserve(rbo);
ttm_eu_backoff_reservation(&ticket, &list);
afb->address = amdgpu_bo_gpu_offset(rbo);
@ -5254,6 +5276,11 @@ static void update_freesync_state_on_stream(
amdgpu_dm_vrr_active(new_crtc_state)) {
mod_freesync_handle_v_update(dm->freesync_module,
new_stream, &vrr_params);
/* Need to call this before the frame ends. */
dc_stream_adjust_vmin_vmax(dm->dc,
new_crtc_state->stream,
&vrr_params.adjust);
}
}
@ -5592,11 +5619,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
}
if (acrtc_state->stream) {
if (acrtc_state->freesync_timing_changed)
bundle->stream_update.adjust =
&acrtc_state->stream->adjust;
if (acrtc_state->freesync_vrr_info_changed)
bundle->stream_update.vrr_infopacket =
&acrtc_state->stream->vrr_infopacket;
@ -5617,6 +5639,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
bundle->stream_update.abm_level = &acrtc_state->abm_level;
/*
* If FreeSync state on the stream has changed then we need to
* re-adjust the min/max bounds now that DC doesn't handle this
* as part of commit.
*/
if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
amdgpu_dm_vrr_active(acrtc_state)) {
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
dc_stream_adjust_vmin_vmax(
dm->dc, acrtc_state->stream,
&acrtc_state->vrr_params.adjust);
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
bundle->surface_updates,
@ -6487,6 +6523,10 @@ static bool should_reset_plane(struct drm_atomic_state *state,
if (!new_crtc_state)
return true;
/* CRTC Degamma changes currently require us to recreate planes. */
if (new_crtc_state->color_mgmt_changed)
return true;
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
return true;

View File

@ -23,7 +23,7 @@
# Makefile for Display Core (dc) component.
#
DC_LIBS = basics bios calcs dce gpio irq virtual
DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual
ifdef CONFIG_DRM_AMD_DC_DCN1_0
DC_LIBS += dcn10 dml

View File

@ -26,6 +26,7 @@
#ifndef _DCN_CALC_AUTO_H_
#define _DCN_CALC_AUTO_H_
#include "dc.h"
#include "dcn_calcs.h"
void scaler_settings_calculation(struct dcn_bw_internal_vars *v);

View File

@ -24,11 +24,10 @@
*/
#include "dm_services.h"
#include "dc.h"
#include "dcn_calcs.h"
#include "dcn_calc_auto.h"
#include "dc.h"
#include "dal_asic_id.h"
#include "resource.h"
#include "dcn10/dcn10_resource.h"
#include "dcn10/dcn10_hubbub.h"
@ -712,7 +711,7 @@ bool dcn_validate_bandwidth(
const struct resource_pool *pool = dc->res_pool;
struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
int i, input_idx;
int i, input_idx, k;
int vesa_sync_start, asic_blank_end, asic_blank_start;
bool bw_limit_pass;
float bw_limit;
@ -873,8 +872,19 @@ bool dcn_validate_bandwidth(
v->lb_bit_per_pixel[input_idx] = 30;
v->viewport_width[input_idx] = pipe->stream->timing.h_addressable;
v->viewport_height[input_idx] = pipe->stream->timing.v_addressable;
v->scaler_rec_out_width[input_idx] = pipe->stream->timing.h_addressable;
v->scaler_recout_height[input_idx] = pipe->stream->timing.v_addressable;
/*
* for cases where we have no plane, we want to validate up to 1080p
* source size because here we are only interested in if the output
* timing is supported or not. if we cannot support native resolution
* of the high res display, we still want to support lower res up scale
* to native
*/
if (v->viewport_width[input_idx] > 1920)
v->viewport_width[input_idx] = 1920;
if (v->viewport_height[input_idx] > 1080)
v->viewport_height[input_idx] = 1080;
v->scaler_rec_out_width[input_idx] = v->viewport_width[input_idx];
v->scaler_recout_height[input_idx] = v->viewport_height[input_idx];
v->override_hta_ps[input_idx] = 1;
v->override_vta_ps[input_idx] = 1;
v->override_hta_pschroma[input_idx] = 1;
@ -1023,6 +1033,43 @@ bool dcn_validate_bandwidth(
mode_support_and_system_configuration(v);
}
display_pipe_configuration(v);
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->source_scan[k] == dcn_bw_hor)
v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k];
else
v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k];
}
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
v->byte_per_pixel_dety[k] = 8.0;
v->byte_per_pixel_detc[k] = 0.0;
} else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
v->byte_per_pixel_dety[k] = 4.0;
v->byte_per_pixel_detc[k] = 0.0;
} else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
v->byte_per_pixel_dety[k] = 2.0;
v->byte_per_pixel_detc[k] = 0.0;
} else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
v->byte_per_pixel_dety[k] = 1.0;
v->byte_per_pixel_detc[k] = 2.0;
} else {
v->byte_per_pixel_dety[k] = 4.0f / 3.0f;
v->byte_per_pixel_detc[k] = 8.0f / 3.0f;
}
}
v->total_data_read_bandwidth = 0.0;
for (k = 0; k <= v->number_of_active_planes - 1; k++) {
v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *
dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k];
v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *
dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0;
v->total_data_read_bandwidth = v->total_data_read_bandwidth +
v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k];
}
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (v->voltage_level != number_of_states_plus_one && !fast_validate) {

View File

@ -0,0 +1,75 @@
#
# Copyright 2017 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
#
# Makefile for the 'clk_mgr' sub-component of DAL.
# It provides the control and status of HW CLK_MGR pins.
CLK_MGR = clk_mgr.o
AMD_DAL_CLK_MGR = $(addprefix $(AMDDALPATH)/dc/clk_mgr/,$(CLK_MGR))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR)
###############################################################################
# DCE 100 and DCE8x
###############################################################################
CLK_MGR_DCE100 = dce_clk_mgr.o
AMD_DAL_CLK_MGR_DCE100 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce100/,$(CLK_MGR_DCE100))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE100)
###############################################################################
# DCE 100 and DCE8x
###############################################################################
CLK_MGR_DCE110 = dce110_clk_mgr.o
AMD_DAL_CLK_MGR_DCE110 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce110/,$(CLK_MGR_DCE110))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE110)
###############################################################################
# DCE 112
###############################################################################
CLK_MGR_DCE112 = dce112_clk_mgr.o
AMD_DAL_CLK_MGR_DCE112 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce112/,$(CLK_MGR_DCE112))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE112)
###############################################################################
# DCE 120
###############################################################################
CLK_MGR_DCE120 = dce120_clk_mgr.o
AMD_DAL_CLK_MGR_DCE120 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce120/,$(CLK_MGR_DCE120))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE120)
ifdef CONFIG_DRM_AMD_DC_DCN1_0
###############################################################################
# DCN10
###############################################################################
CLK_MGR_DCN10 = rv1_clk_mgr.o rv1_clk_mgr_vbios_smu.o rv2_clk_mgr.o
AMD_DAL_CLK_MGR_DCN10 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn10/,$(CLK_MGR_DCN10))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN10)
endif

View File

@ -0,0 +1,134 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dal_asic_id.h"
#include "dc_types.h"
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
#include "dce112/dce112_clk_mgr.h"
#include "dce120/dce120_clk_mgr.h"
#include "dcn10/rv1_clk_mgr.h"
#include "dcn10/rv2_clk_mgr.h"
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context)
{
int i, display_count;
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
/*
* Only notify active stream or virtual stream.
* Need to notify virtual stream to work around
* headless case. HPD does not fire when system is in
* S0i2.
*/
if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
display_count++;
}
return display_count;
}
struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
{
struct hw_asic_id asic_id = ctx->asic_id;
struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
if (clk_mgr == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
switch (asic_id.chip_family) {
case FAMILY_CI:
case FAMILY_KV:
dce_clk_mgr_construct(ctx, clk_mgr);
break;
case FAMILY_CZ:
dce110_clk_mgr_construct(ctx, clk_mgr);
break;
case FAMILY_VI:
if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
dce_clk_mgr_construct(ctx, clk_mgr);
break;
}
if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
dce112_clk_mgr_construct(ctx, clk_mgr);
break;
}
if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) {
dce112_clk_mgr_construct(ctx, clk_mgr);
break;
}
break;
case FAMILY_AI:
if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
dce121_clk_mgr_construct(ctx, clk_mgr);
else
dce120_clk_mgr_construct(ctx, clk_mgr);
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case FAMILY_RV:
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
break;
}
if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) ||
ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) {
rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
break;
}
break;
#endif /* Family RV */
default:
ASSERT(0); /* Unknown Asic */
break;
}
return &clk_mgr->base;
}
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
kfree(clk_mgr);
}

View File

@ -0,0 +1,471 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
#include "dce112/dce112_clk_mgr.h"
#include "reg_helper.h"
#include "dmcu.h"
#include "core_types.h"
#include "dal_asic_id.h"
/*
* Currently the register shifts and masks in this file are used for dce100 and dce80
* which has identical definitions.
* TODO: remove this when DPREFCLK_CNTL and dpref DENTIST_DISPCLK_CNTL
* is moved to dccg, where it belongs
*/
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
#define REG(reg) \
(clk_mgr->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
/* Max clock values for each state indexed by "enum clocks_state": */
static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
/* ClocksStateInvalid - should not be used */
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
int dentist_get_divider_from_did(int did)
{
if (did < DENTIST_BASE_DID_1)
did = DENTIST_BASE_DID_1;
if (did > DENTIST_MAX_DID)
did = DENTIST_MAX_DID;
if (did < DENTIST_BASE_DID_2) {
return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
* (did - DENTIST_BASE_DID_1);
} else if (did < DENTIST_BASE_DID_3) {
return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
* (did - DENTIST_BASE_DID_2);
} else if (did < DENTIST_BASE_DID_4) {
return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
* (did - DENTIST_BASE_DID_3);
} else {
return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
* (did - DENTIST_BASE_DID_4);
}
}
/* SW will adjust DP REF Clock average value for all purposes
* (DP DTO / DP Audio DTO and DP GTC)
if clock is spread for all cases:
-if SS enabled on DP Ref clock and HW de-spreading enabled with SW
calculations for DS_INCR/DS_MODULO (this is planned to be default case)
-if SS enabled on DP Ref clock and HW de-spreading enabled with HW
calculations (not planned to be used, but average clock should still
be valid)
-if SS enabled on DP Ref clock and HW de-spreading disabled
(should not be case with CIK) then SW should program all rates
generated according to average value (case as with previous ASICs)
*/
int dce_adjust_dp_ref_freq_for_ss(struct clk_mgr_internal *clk_mgr_dce, int dp_ref_clk_khz)
{
if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
struct fixed31_32 ss_percentage = dc_fixpt_div_int(
dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
clk_mgr_dce->dprefclk_ss_divider), 200);
struct fixed31_32 adj_dp_ref_clk_khz;
ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
}
return dp_ref_clk_khz;
}
int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
int dprefclk_src_sel;
int dp_ref_clk_khz = 600000;
int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
ASSERT(dprefclk_src_sel == 0);
/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
* programmed DID DENTIST_DPREFCLK_WDIVIDER*/
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
target_div = dentist_get_divider_from_did(dprefclk_wdivider);
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
* clk_mgr->dentist_vco_freq_khz) / target_div;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
return dce_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_base->dprefclk_khz);
}
/* unit: in_khz before mode set, get pixel clock from context. ASIC register
* may not be programmed yet
*/
uint32_t dce_get_max_pixel_clock_for_all_paths(struct dc_state *context)
{
uint32_t max_pix_clk = 0;
int i;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream == NULL)
continue;
/* do not check under lay */
if (pipe_ctx->top_pipe)
continue;
if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk)
max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10;
/* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
* logic for HBR3 still needs Nominal (0.8V) on VDDC rail
*/
if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
}
return max_pix_clk;
}
enum dm_pp_clocks_state dce_get_required_clocks_state(
struct clk_mgr *clk_mgr_base,
struct dc_state *context)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int i;
enum dm_pp_clocks_state low_req_clk;
int max_pix_clk = dce_get_max_pixel_clock_for_all_paths(context);
/* Iterate from highest supported to lowest valid state, and update
* lowest RequiredState with the lowest state that satisfies
* all required clocks
*/
for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
if (context->bw_ctx.bw.dce.dispclk_khz >
clk_mgr_dce->max_clks_by_state[i].display_clk_khz
|| max_pix_clk >
clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
break;
low_req_clk = i + 1;
if (low_req_clk > clk_mgr_dce->max_clks_state) {
/* set max clock state for high phyclock, invalid on exceeding display clock */
if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
< context->bw_ctx.bw.dce.dispclk_khz)
low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
else
low_req_clk = clk_mgr_dce->max_clks_state;
}
return low_req_clk;
}
/* TODO: remove use the two broken down functions */
int dce_set_clock(
struct clk_mgr *clk_mgr_base,
int requested_clk_khz)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
int actual_clock = requested_clk_khz;
struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
clk_mgr_dce->dentist_vco_freq_khz / 64);
/* Prepare to program display clock*/
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
if (clk_mgr_dce->dfs_bypass_active)
pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
if (clk_mgr_dce->dfs_bypass_active) {
/* Cache the fixed display clock*/
clk_mgr_dce->dfs_bypass_disp_clk =
pxl_clk_params.dfs_bypass_display_clock;
actual_clock = pxl_clk_params.dfs_bypass_display_clock;
}
/* from power down, we need mark the clock state as ClocksStateNominal
* from HWReset, so when resume we will call pplib voltage regulator.*/
if (requested_clk_khz == 0)
clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
return actual_clock;
}
static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
{
struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
struct integrated_info info = { { { 0 } } };
struct dc_firmware_info fw_info = { { 0 } };
int i;
if (bp->integrated_info)
info = *bp->integrated_info;
clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
bp->funcs->get_firmware_info(bp, &fw_info);
clk_mgr_dce->dentist_vco_freq_khz =
fw_info.smu_gpu_pll_output_freq;
if (clk_mgr_dce->dentist_vco_freq_khz == 0)
clk_mgr_dce->dentist_vco_freq_khz = 3600000;
}
/*update the maximum display clock for each power state*/
for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
switch (i) {
case 0:
clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
break;
case 1:
clk_state = DM_PP_CLOCKS_STATE_LOW;
break;
case 2:
clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
break;
case 3:
clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
break;
default:
clk_state = DM_PP_CLOCKS_STATE_INVALID;
break;
}
/*Do not allow bad VBIOS/SBIOS to override with invalid values,
* check for > 100MHz*/
if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
info.disp_clk_voltage[i].max_supported_clk;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
clk_mgr_dce->dfs_bypass_enabled = true;
}
void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
{
struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
int ss_info_num = bp->funcs->get_ss_entry_number(
bp, AS_SIGNAL_TYPE_GPU_PLL);
if (ss_info_num) {
struct spread_spectrum_info info = { { 0 } };
enum bp_result result = bp->funcs->get_spread_spectrum_info(
bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
/* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
* even if SS not enabled and in that case
* SSInfo.spreadSpectrumPercentage !=0 would be sign
* that SS is enabled
*/
if (result == BP_RESULT_OK &&
info.spread_spectrum_percentage != 0) {
clk_mgr_dce->ss_on_dprefclk = true;
clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
if (info.type.CENTER_MODE == 0) {
/* TODO: Currently for DP Reference clock we
* need only SS percentage for
* downspread */
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
return;
}
result = bp->funcs->get_spread_spectrum_info(
bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
/* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
* even if SS not enabled and in that case
* SSInfo.spreadSpectrumPercentage !=0 would be sign
* that SS is enabled
*/
if (result == BP_RESULT_OK &&
info.spread_spectrum_percentage != 0) {
clk_mgr_dce->ss_on_dprefclk = true;
clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
if (info.type.CENTER_MODE == 0) {
/* Currently for DP Reference clock we
* need only SS percentage for
* downspread */
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
}
}
}
static void dce_pplib_apply_display_requirements(
struct dc *dc,
struct dc_state *context)
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
patched_disp_clk = dce_set_clock(clk_mgr_base, patched_disp_clk);
clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
}
dce_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
}
static struct clk_mgr_funcs dce_funcs = {
.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
.update_clocks = dce_update_clocks
};
void dce_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
struct clk_mgr *base = &clk_mgr->base;
struct dm_pp_static_clock_info static_clk_info = {0};
memcpy(clk_mgr->max_clks_by_state,
dce80_max_clks_by_state,
sizeof(dce80_max_clks_by_state));
base->ctx = ctx;
base->funcs = &dce_funcs;
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->dfs_bypass_disp_clk = 0;
clk_mgr->dprefclk_ss_percentage = 0;
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
if (dm_pp_get_static_clocks(ctx, &static_clk_info))
clk_mgr->max_clks_state = static_clk_info.max_clocks_state;
else
clk_mgr->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
dce_clock_read_integrated_info(clk_mgr);
dce_clock_read_ss_info(clk_mgr);
}

View File

@ -0,0 +1,81 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef _DCE_CLK_MGR_H_
#define _DCE_CLK_MGR_H_
#include "dc.h"
/* Starting DID for each range */
enum dentist_base_divider_id {
DENTIST_BASE_DID_1 = 0x08,
DENTIST_BASE_DID_2 = 0x40,
DENTIST_BASE_DID_3 = 0x60,
DENTIST_BASE_DID_4 = 0x7e,
DENTIST_MAX_DID = 0x7f
};
/* Starting point and step size for each divider range.*/
enum dentist_divider_range {
DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
};
/* functions shared by other dce clk mgrs */
int dce_adjust_dp_ref_freq_for_ss(struct clk_mgr_internal *clk_mgr_dce, int dp_ref_clk_khz);
int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base);
enum dm_pp_clocks_state dce_get_required_clocks_state(
struct clk_mgr *clk_mgr_base,
struct dc_state *context);
uint32_t dce_get_max_pixel_clock_for_all_paths(struct dc_state *context);
void dce_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr_dce);
void dce_clock_read_ss_info(struct clk_mgr_internal *dccg_dce);
int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
int dce_set_clock(
struct clk_mgr *clk_mgr_base,
int requested_clk_khz);
void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
int dentist_get_divider_from_did(int did);
#endif /* _DCE_CLK_MGR_H_ */

View File

@ -0,0 +1,276 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
#include "dce110_clk_mgr.h"
#include "../clk_mgr/dce100/dce_clk_mgr.h"
/* set register offset */
#define SR(reg_name)\
.reg_name = mm ## reg_name
/* set register offset with instance */
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
/*ClocksStateInvalid - should not be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
/*ClocksStateLow*/
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
/*ClocksStateNominal*/
{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
/*ClocksStatePerformance*/
{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
static int determine_sclk_from_bounding_box(
const struct dc *dc,
int required_sclk)
{
int i;
/*
* Some asics do not give us sclk levels, so we just report the actual
* required sclk
*/
if (dc->sclk_lvls.num_levels == 0)
return required_sclk;
for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
return dc->sclk_lvls.clocks_in_khz[i];
}
/*
* even maximum level could not satisfy requirement, this
* is unexpected at this stage, should have been caught at
* validation time
*/
ASSERT(0);
return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
}
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
{
uint8_t j;
uint32_t min_vertical_blank_time = -1;
for (j = 0; j < context->stream_count; j++) {
struct dc_stream_state *stream = context->streams[j];
uint32_t vertical_blank_in_pixels = 0;
uint32_t vertical_blank_time = 0;
vertical_blank_in_pixels = stream->timing.h_total *
(stream->timing.v_total
- stream->timing.v_addressable);
vertical_blank_time = vertical_blank_in_pixels
* 10000 / stream->timing.pix_clk_100hz;
if (min_vertical_blank_time > vertical_blank_time)
min_vertical_blank_time = vertical_blank_time;
}
return min_vertical_blank_time;
}
void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg)
{
int j;
int num_cfgs = 0;
for (j = 0; j < context->stream_count; j++) {
int k;
const struct dc_stream_state *stream = context->streams[j];
struct dm_pp_single_disp_config *cfg =
&pp_display_cfg->disp_configs[num_cfgs];
const struct pipe_ctx *pipe_ctx = NULL;
for (k = 0; k < MAX_PIPES; k++)
if (stream == context->res_ctx.pipe_ctx[k].stream) {
pipe_ctx = &context->res_ctx.pipe_ctx[k];
break;
}
ASSERT(pipe_ctx != NULL);
/* only notify active stream */
if (stream->dpms_off)
continue;
num_cfgs++;
cfg->signal = pipe_ctx->stream->signal;
cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
cfg->src_height = stream->src.height;
cfg->src_width = stream->src.width;
cfg->ddi_channel_mapping =
stream->link->ddi_channel_mapping.raw;
cfg->transmitter =
stream->link->link_enc->transmitter;
cfg->link_settings.lane_count =
stream->link->cur_link_settings.lane_count;
cfg->link_settings.link_rate =
stream->link->cur_link_settings.link_rate;
cfg->link_settings.link_spread =
stream->link->cur_link_settings.link_spread;
cfg->sym_clock = stream->phy_pix_clk;
/* Round v_refresh*/
cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
cfg->v_refresh /= stream->timing.h_total;
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
/ stream->timing.v_total;
}
pp_display_cfg->display_count = num_cfgs;
}
void dce11_pplib_apply_display_requirements(
struct dc *dc,
struct dc_state *context)
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->all_displays_in_sync =
context->bw_ctx.bw.dce.all_displays_in_sync;
pp_display_cfg->nb_pstate_switch_disable =
context->bw_ctx.bw.dce.nbp_state_change_enable == false;
pp_display_cfg->cpu_cc6_disable =
context->bw_ctx.bw.dce.cpuc_state_change_enable == false;
pp_display_cfg->cpu_pstate_disable =
context->bw_ctx.bw.dce.cpup_state_change_enable == false;
pp_display_cfg->cpu_pstate_separation_time =
context->bw_ctx.bw.dce.blackout_recovery_time_us;
pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
/ MEMORY_TYPE_MULTIPLIER_CZ;
pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
dc,
context->bw_ctx.bw.dce.sclk_khz);
/*
* As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
* This is not required for less than 5 displays,
* thus don't request decfclk in dc to avoid impact
* on power saving.
*
*/
pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4) ?
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
pp_display_cfg->avail_mclk_switch_time_us =
dce110_get_min_vblank_time_us(context);
/* TODO: dce11.2*/
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
/* TODO: is this still applicable?*/
if (pp_display_cfg->display_count == 1) {
const struct dc_crtc_timing *timing =
&context->streams[0]->timing;
pp_display_cfg->crtc_index =
pp_display_cfg->disp_configs[0].pipe_idx;
pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
}
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
static void dce11_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr_base, patched_disp_clk);
clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
}
static struct clk_mgr_funcs dce110_funcs = {
.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
.update_clocks = dce11_update_clocks
};
void dce110_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
memcpy(clk_mgr->max_clks_by_state,
dce110_max_clks_by_state,
sizeof(dce110_max_clks_by_state));
dce_clk_mgr_construct(ctx, clk_mgr);
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce110_funcs;
}

View File

@ -0,0 +1,44 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef DAL_DC_DCE_DCE110_CLK_MGR_H_
#define DAL_DC_DCE_DCE110_CLK_MGR_H_
void dce110_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr);
void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
/* functions shared with other clk mgr*/
void dce11_pplib_apply_display_requirements(
struct dc *dc,
struct dc_state *context);
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
#endif /* DAL_DC_DCE_DCE110_CLK_MGR_H_ */

View File

@ -0,0 +1,239 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "dce/dce_11_2_d.h"
#include "dce/dce_11_2_sh_mask.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
#include "dce112_clk_mgr.h"
#include "dal_asic_id.h"
/* set register offset */
#define SR(reg_name)\
.reg_name = mm ## reg_name
/* set register offset with instance */
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
/*ClocksStateInvalid - should not be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
/*ClocksStateLow*/
{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
/*ClocksStateNominal*/
{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
/*ClocksStatePerformance*/
{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
//TODO: remove use the two broken down functions
int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
struct dc *core_dc = clk_mgr_base->ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
clk_mgr_dce->dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
bp->funcs->set_dce_clock(bp, &dce_clk_params);
actual_clock = dce_clk_params.target_clock_frequency;
/*
* from power down, we need mark the clock state as ClocksStateNominal
* from HWReset, so when resume we will call pplib voltage regulator.
*/
if (requested_clk_khz == 0)
clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
/*Program DP ref Clock*/
/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
dce_clk_params.target_clock_frequency = 0;
dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
if (!ASICREV_IS_VEGA20_P(clk_mgr_base->ctx->asic_id.hw_internal_rev))
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
(dce_clk_params.pll_id ==
CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
else
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
bp->funcs->set_dce_clock(bp, &dce_clk_params);
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
actual_clock / 1000 / 7);
}
}
clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
return actual_clock;
}
int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
{
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
struct dc *core_dc = clk_mgr->base.ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
clk_mgr->dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
bp->funcs->set_dce_clock(bp, &dce_clk_params);
actual_clock = dce_clk_params.target_clock_frequency;
/*
* from power down, we need mark the clock state as ClocksStateNominal
* from HWReset, so when resume we will call pplib voltage regulator.
*/
if (requested_clk_khz == 0)
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
actual_clock / 1000 / 7);
}
}
clk_mgr->dfs_bypass_disp_clk = actual_clock;
return actual_clock;
}
int dce112_set_dprefclk(struct clk_mgr_internal *clk_mgr)
{
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
/*Program DP ref Clock*/
/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
dce_clk_params.target_clock_frequency = 0;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
if (!ASICREV_IS_VEGA20_P(clk_mgr->base.ctx->asic_id.hw_internal_rev))
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
(dce_clk_params.pll_id ==
CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
else
dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
bp->funcs->set_dce_clock(bp, &dce_clk_params);
/* Returns the dp_refclk that was set */
return dce_clk_params.target_clock_frequency;
}
static void dce112_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
|| level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
patched_disp_clk = dce112_set_clock(clk_mgr_base, patched_disp_clk);
clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
}
static struct clk_mgr_funcs dce112_funcs = {
.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
.update_clocks = dce112_update_clocks
};
void dce112_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
memcpy(clk_mgr->max_clks_by_state,
dce112_max_clks_by_state,
sizeof(dce112_max_clks_by_state));
dce_clk_mgr_construct(ctx, clk_mgr);
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce112_funcs;
}

View File

@ -0,0 +1,39 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef DAL_DC_DCE_DCE112_CLK_MGR_H_
#define DAL_DC_DCE_DCE112_CLK_MGR_H_
void dce112_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr);
/* functions shared with other clk mgr */
int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz);
int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz);
int dce112_set_dprefclk(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCE_DCE112_CLK_MGR_H_ */

View File

@ -0,0 +1,145 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "dce112/dce112_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
#include "dce120_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
/*ClocksStateInvalid - should not be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateLow*/
{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
/*ClocksStateNominal*/
{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
/*ClocksStatePerformance*/
{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
/**
* dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info
* @clk_mgr: clock manager base structure
*
* Reads from VBIOS the XGMI spread spectrum info and saves it within
* the dce clock manager. This operation will overwrite the existing dprefclk
* SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also
* sets the ->xgmi_enabled flag.
*/
void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
enum bp_result result;
struct spread_spectrum_info info = { { 0 } };
struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
clk_mgr_dce->xgmi_enabled = false;
result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI,
0, &info);
if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) {
clk_mgr_dce->xgmi_enabled = true;
clk_mgr_dce->ss_on_dprefclk = true;
clk_mgr_dce->dprefclk_ss_divider =
info.spread_percentage_divider;
if (info.type.CENTER_MODE == 0) {
/*
* Currently for DP Reference clock we
* need only SS percentage for
* downspread
*/
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
}
}
static void dce12_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
int max_pix_clk = dce_get_max_pixel_clock_for_all_paths(context);
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
patched_disp_clk = patched_disp_clk * 115 / 100;
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
/*
* When xGMI is enabled, the display clk needs to be adjusted
* with the WAFL link's SS percentage.
*/
if (clk_mgr_dce->xgmi_enabled)
patched_disp_clk = dce_adjust_dp_ref_freq_for_ss(
clk_mgr_dce, patched_disp_clk);
clock_voltage_req.clocks_in_khz = patched_disp_clk;
clk_mgr_base->clks.dispclk_khz = dce112_set_clock(clk_mgr_base, patched_disp_clk);
dm_pp_apply_clock_for_voltage_request(clk_mgr_base->ctx, &clock_voltage_req);
}
if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr_base->clks.phyclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
clock_voltage_req.clocks_in_khz = max_pix_clk;
clk_mgr_base->clks.phyclk_khz = max_pix_clk;
dm_pp_apply_clock_for_voltage_request(clk_mgr_base->ctx, &clock_voltage_req);
}
dce11_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
}
static struct clk_mgr_funcs dce120_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dce12_update_clocks
};
void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
{
memcpy(clk_mgr->max_clks_by_state,
dce120_max_clks_by_state,
sizeof(dce120_max_clks_by_state));
dce_clk_mgr_construct(ctx, clk_mgr);
clk_mgr->base.dprefclk_khz = 600000;
clk_mgr->base.funcs = &dce120_funcs;
}
void dce121_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
{
dce120_clk_mgr_construct(ctx, clk_mgr);
clk_mgr->base.dprefclk_khz = 625000;
}

View File

@ -0,0 +1,34 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef DAL_DC_DCE_DCE120_CLK_MGR_H_
#define DAL_DC_DCE_DCE120_CLK_MGR_H_
void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr);
void dce121_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCE_DCE120_CLK_MGR_H_ */

View File

@ -0,0 +1,262 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "rv1_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
#include "dce112/dce112_clk_mgr.h"
#include "rv1_clk_mgr_vbios_smu.h"
#include "rv1_clk_mgr_clk.h"
static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->base.clks.dispclk_khz;
int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
bool cur_dpp_div = clk_mgr->base.clks.dispclk_khz > clk_mgr->base.clks.dppclk_khz;
/* increase clock, looking for div is 0 for current, request div is 1*/
if (dispclk_increase) {
/* already divided by 2, no need to reach target clk with 2 steps*/
if (cur_dpp_div)
return new_clocks->dispclk_khz;
/* request disp clk is lower than maximum supported dpp clk,
* no need to reach target clk with two steps.
*/
if (new_clocks->dispclk_khz <= disp_clk_threshold)
return new_clocks->dispclk_khz;
/* target dpp clk not request divided by 2, still within threshold */
if (!request_dpp_div)
return new_clocks->dispclk_khz;
} else {
/* decrease clock, looking for current dppclk divided by 2,
* request dppclk not divided by 2.
*/
/* current dpp clk not divided by 2, no need to ramp*/
if (!cur_dpp_div)
return new_clocks->dispclk_khz;
/* current disp clk is lower than current maximum dpp clk,
* no need to ramp
*/
if (clk_mgr->base.clks.dispclk_khz <= disp_clk_threshold)
return new_clocks->dispclk_khz;
/* request dpp clk need to be divided by 2 */
if (request_dpp_div)
return new_clocks->dispclk_khz;
}
return disp_clk_threshold;
}
static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
{
int i;
int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
/* set disp clk to dpp clk threshold */
clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
clk_mgr->funcs->set_dprefclk(clk_mgr);
/* update request dpp clk division option */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (!pipe_ctx->plane_state)
continue;
pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
pipe_ctx->plane_res.dpp,
request_dpp_div,
true);
}
/* If target clk not same as dppclk threshold, set to target clock */
if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) {
clk_mgr->funcs->set_dispclk(clk_mgr, new_clocks->dispclk_khz);
clk_mgr->funcs->set_dprefclk(clk_mgr);
}
clk_mgr->base.clks.dispclk_khz = new_clocks->dispclk_khz;
clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
}
static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc *dc = clk_mgr_base->ctx->dc;
struct dc_debug_options *debug = &dc->debug;
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct pp_smu_funcs_rv *pp_smu = NULL;
bool send_request_to_increase = false;
bool send_request_to_lower = false;
int display_count;
bool enter_display_off = false;
ASSERT(clk_mgr->pp_smu);
pp_smu = &clk_mgr->pp_smu->rv_funcs;
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
if (display_count == 0)
enter_display_off = true;
if (enter_display_off == safe_to_lower) {
/*
* Notify SMU active displays
* if function pointer not set up, this message is
* sent as part of pplib_apply_display_requirements.
*/
if (pp_smu->set_display_count)
pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
}
if (new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz
|| new_clocks->phyclk_khz > clk_mgr_base->clks.phyclk_khz
|| new_clocks->fclk_khz > clk_mgr_base->clks.fclk_khz
|| new_clocks->dcfclk_khz > clk_mgr_base->clks.dcfclk_khz)
send_request_to_increase = true;
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
send_request_to_lower = true;
}
// F Clock
if (debug->force_fclk_khz != 0)
new_clocks->fclk_khz = debug->force_fclk_khz;
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
clk_mgr_base->clks.fclk_khz = new_clocks->fclk_khz;
send_request_to_lower = true;
}
//DCF Clock
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
send_request_to_lower = true;
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
send_request_to_lower = true;
}
/* make sure dcf clk is before dpp clk to
* make sure we have enough voltage to run dpp clk
*/
if (send_request_to_increase) {
/*use dcfclk to request voltage*/
if (pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
/* dcn1 dppclk is tied to dispclk */
/* program dispclk on = as a w/a for sleep resume clock ramping issues */
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
|| new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
send_request_to_lower = true;
}
if (!send_request_to_increase && send_request_to_lower) {
/*use dcfclk to request voltage*/
if (pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
}
static struct clk_mgr_funcs rv1_clk_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rv1_update_clocks,
};
static struct clk_mgr_internal_funcs rv1_clk_internal_funcs = {
.set_dispclk = rv1_vbios_smu_set_dispclk,
.set_dprefclk = dce112_set_dprefclk
};
void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu)
{
struct dc_debug_options *debug = &ctx->dc->debug;
struct dc_bios *bp = ctx->dc_bios;
struct dc_firmware_info fw_info = { { 0 } };
clk_mgr->base.ctx = ctx;
clk_mgr->pp_smu = pp_smu;
clk_mgr->base.funcs = &rv1_clk_funcs;
clk_mgr->funcs = &rv1_clk_internal_funcs;
clk_mgr->dfs_bypass_disp_clk = 0;
clk_mgr->dprefclk_ss_percentage = 0;
clk_mgr->dprefclk_ss_divider = 1000;
clk_mgr->ss_on_dprefclk = false;
clk_mgr->base.dprefclk_khz = 600000;
if (bp->integrated_info)
clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
if (clk_mgr->dentist_vco_freq_khz == 0) {
bp->funcs->get_firmware_info(bp, &fw_info);
clk_mgr->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
if (clk_mgr->dentist_vco_freq_khz == 0)
clk_mgr->dentist_vco_freq_khz = 3600000;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
clk_mgr->dfs_bypass_enabled = true;
dce_clock_read_ss_info(clk_mgr);
}

View File

@ -0,0 +1,31 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __RV1_CLK_MGR_H__
#define __RV1_CLK_MGR_H__
void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu);
#endif //__DCN10_CLK_MGR_H__

View File

@ -0,0 +1,79 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"
#include "clk_mgr_internal.h"
#include "rv1_clk_mgr_clk.h"
#include "ip/Discovery/hwid.h"
#include "ip/Discovery/v1/ip_offset_1.h"
#include "ip/CLK/clk_10_0_default.h"
#include "ip/CLK/clk_10_0_offset.h"
#include "ip/CLK/clk_10_0_reg.h"
#include "ip/CLK/clk_10_0_sh_mask.h"
#include "dce100/dce_clk_mgr.h"
#define CLK_BASE_INNER(inst) \
CLK_BASE__INST ## inst ## _SEG0
#define CLK_REG(reg_name, block, inst)\
CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## _ ## inst ## _ ## reg_name
#define REG(reg_name) \
CLK_REG(reg_name, CLK0, 0)
/* Only used by testing framework*/
void rv1_dump_clk_registers(struct clk_state_registers *regs, struct clk_bypass *bypass, struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
regs->CLK0_CLK8_CURRENT_CNT = REG_READ(CLK0_CLK8_CURRENT_CNT) / 10; //dcf clk
bypass->dcfclk_bypass = REG_READ(CLK0_CLK8_BYPASS_CNTL) & 0x0007;
if (bypass->dcfclk_bypass < 0 || bypass->dcfclk_bypass > 4)
bypass->dcfclk_bypass = 0;
regs->CLK0_CLK8_DS_CNTL = REG_READ(CLK0_CLK8_DS_CNTL) / 10; //dcf deep sleep divider
regs->CLK0_CLK8_ALLOW_DS = REG_READ(CLK0_CLK8_ALLOW_DS); //dcf deep sleep allow
regs->CLK0_CLK10_CURRENT_CNT = REG_READ(CLK0_CLK10_CURRENT_CNT) / 10; //dpref clk
bypass->dispclk_pypass = REG_READ(CLK0_CLK10_BYPASS_CNTL) & 0x0007;
if (bypass->dispclk_pypass < 0 || bypass->dispclk_pypass > 4)
bypass->dispclk_pypass = 0;
regs->CLK0_CLK11_CURRENT_CNT = REG_READ(CLK0_CLK11_CURRENT_CNT) / 10; //disp clk
bypass->dprefclk_bypass = REG_READ(CLK0_CLK11_BYPASS_CNTL) & 0x0007;
if (bypass->dprefclk_bypass < 0 || bypass->dprefclk_bypass > 4)
bypass->dprefclk_bypass = 0;
}

View File

@ -0,0 +1,29 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef DAL_DC_DCN10_RV1_CLK_MGR_CLK_H_
#define DAL_DC_DCN10_RV1_CLK_MGR_CLK_H_
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_CLK_H_ */

View File

@ -0,0 +1,126 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "reg_helper.h"
#define MAX_INSTANCE 5
#define MAX_SEGMENT 5
struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
};
struct IP_BASE {
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
};
static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
#define mmMP1_SMN_C2PMSG_91 0x29B
#define mmMP1_SMN_C2PMSG_83 0x293
#define mmMP1_SMN_C2PMSG_67 0x283
#define mmMP1_SMN_C2PMSG_91_BASE_IDX 0
#define mmMP1_SMN_C2PMSG_83_BASE_IDX 0
#define mmMP1_SMN_C2PMSG_67_BASE_IDX 0
#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xffffffffL
#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xffffffffL
#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xffffffffL
#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x00000000
#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x00000000
#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x00000000
#define REG(reg_name) \
(MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
#define VBIOSSMC_MSG_SetDispclkFreq 0x4
#define VBIOSSMC_MSG_SetDprefclkFreq 0x5
int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param)
{
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, 0);
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_83, param);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
REG_WAIT(MP1_SMN_C2PMSG_91, CONTENT, 1, 10, 200000);
/* Actual dispclk set is returned in the parameter register */
return REG_READ(MP1_SMN_C2PMSG_83);
}
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
{
int actual_dispclk_set_mhz = -1;
struct dc *core_dc = clk_mgr->base.ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
requested_dispclk_khz / 1000);
/* Actual dispclk set is returned in the parameter register */
actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
dmcu->funcs->set_psr_wait_loop(dmcu,
actual_dispclk_set_mhz / 7);
}
}
return actual_dispclk_set_mhz * 1000;
}
int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
{
int actual_dprefclk_set_mhz = -1;
actual_dprefclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDprefclkFreq,
clk_mgr->base.dprefclk_khz / 1000);
/* TODO: add code for programing DP DTO, currently this is down by command table */
return actual_dprefclk_set_mhz * 1000;
}

View File

@ -0,0 +1,32 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_
#define DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_
int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
int rv1_vbios_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */

View File

@ -0,0 +1,43 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"
#include "clk_mgr_internal.h"
#include "rv1_clk_mgr.h"
#include "rv2_clk_mgr.h"
#include "dce112/dce112_clk_mgr.h"
static struct clk_mgr_internal_funcs rv2_clk_internal_funcs = {
.set_dispclk = dce112_set_dispclk,
.set_dprefclk = dce112_set_dprefclk
};
void rv2_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu)
{
rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
clk_mgr->funcs = &rv2_clk_internal_funcs;
}

View File

@ -23,17 +23,10 @@
*
*/
#ifndef __DCN10_CLK_MGR_H__
#define __DCN10_CLK_MGR_H__
#ifndef __RV2_CLK_MGR_H__
#define __RV2_CLK_MGR_H__
#include "../dce/dce_clk_mgr.h"
void rv2_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu);
struct clk_bypass {
uint32_t dcfclk_bypass;
uint32_t dispclk_pypass;
uint32_t dprefclk_bypass;
};
struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
#endif //__DCN10_CLK_MGR_H__

View File

@ -33,6 +33,7 @@
#include "resource.h"
#include "clk_mgr.h"
#include "clock_source.h"
#include "dc_bios_types.h"
@ -262,7 +263,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream && pipe->stream_res.stream_enc) {
if (pipe->stream == stream && pipe->stream_res.tg) {
pipe->stream->adjust = *adjust;
dc->hwss.set_drr(&pipe,
1,
@ -489,128 +490,6 @@ void dc_stream_set_static_screen_events(struct dc *dc,
dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
}
void dc_link_set_drive_settings(struct dc *dc,
struct link_training_settings *lt_settings,
const struct dc_link *link)
{
int i;
for (i = 0; i < dc->link_count; i++) {
if (dc->links[i] == link)
break;
}
if (i >= dc->link_count)
ASSERT_CRITICAL(false);
dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
}
void dc_link_perform_link_training(struct dc *dc,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
{
int i;
for (i = 0; i < dc->link_count; i++)
dc_link_dp_perform_link_training(
dc->links[i],
link_setting,
skip_video_pattern);
}
void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_link_settings *link_setting,
struct dc_link *link)
{
int i;
struct pipe_ctx *pipe;
struct dc_stream_state *link_stream;
struct dc_link_settings store_settings = *link_setting;
link->preferred_link_setting = store_settings;
/* Retrain with preferred link settings only relevant for
* DP signal type
*/
if (!dc_is_dp_signal(link->connector_signal))
return;
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->link) {
if (pipe->stream->link == link)
break;
}
}
/* Stream not found */
if (i == MAX_PIPES)
return;
link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
/* Cannot retrain link if backend is off */
if (link_stream->dpms_off)
return;
if (link_stream)
decide_link_settings(link_stream, &store_settings);
if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
(store_settings.link_rate != LINK_RATE_UNKNOWN))
dp_retrain_link_dp_test(link, &store_settings, false);
}
void dc_link_enable_hpd(const struct dc_link *link)
{
dc_link_dp_enable_hpd(link);
}
void dc_link_disable_hpd(const struct dc_link *link)
{
dc_link_dp_disable_hpd(link);
}
void dc_link_set_test_pattern(struct dc_link *link,
enum dp_test_pattern test_pattern,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size)
{
if (link != NULL)
dc_link_dp_set_test_pattern(
link,
test_pattern,
p_link_settings,
p_custom_pattern,
cust_pattern_size);
}
uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting)
{
uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
return link_bw_kbps;
}
const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link)
{
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
return &link->preferred_link_setting;
return &link->verified_link_cap;
}
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@ -618,6 +497,11 @@ static void destruct(struct dc *dc)
destroy_links(dc);
if (dc->clk_mgr) {
dc_destroy_clk_mgr(dc->clk_mgr);
dc->clk_mgr = NULL;
}
dc_destroy_resource_pool(dc);
if (dc->ctx->gpio_service)
@ -761,6 +645,10 @@ static bool construct(struct dc *dc,
if (!dc->res_pool)
goto fail;
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
@ -1735,13 +1623,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
pipe_ctx->stream &&
pipe_ctx->stream == stream) {
/* Fast update*/
// VRR program can be done as part of FAST UPDATE
if (stream_update->adjust)
dc->hwss.set_drr(&pipe_ctx, 1,
stream_update->adjust->v_total_min,
stream_update->adjust->v_total_max);
if (stream_update->periodic_interrupt0 &&
dc->hwss.setup_periodic_interrupt)
dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
@ -1899,6 +1780,20 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
}
// Fire manual trigger only when bottom plane is flipped
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (pipe_ctx->bottom_pipe ||
!pipe_ctx->stream ||
pipe_ctx->stream != stream ||
!pipe_ctx->plane_state->update_flags.bits.addr_update)
continue;
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
}
}
void dc_commit_updates_for_stream(struct dc *dc,

View File

@ -42,6 +42,7 @@
#include "fixed31_32.h"
#include "dpcd_defs.h"
#include "dmcu.h"
#include "hw/clk_mgr.h"
#define DC_LOGGER_INIT(logger)
@ -2338,7 +2339,8 @@ void core_link_resume(struct dc_link *link)
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
struct fixed31_32 mbytes_per_sec;
uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link,
&stream->link->cur_link_settings);
link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
@ -2857,3 +2859,127 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
return kbps;
}
void dc_link_set_drive_settings(struct dc *dc,
struct link_training_settings *lt_settings,
const struct dc_link *link)
{
int i;
for (i = 0; i < dc->link_count; i++) {
if (dc->links[i] == link)
break;
}
if (i >= dc->link_count)
ASSERT_CRITICAL(false);
dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
}
void dc_link_perform_link_training(struct dc *dc,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
{
int i;
for (i = 0; i < dc->link_count; i++)
dc_link_dp_perform_link_training(
dc->links[i],
link_setting,
skip_video_pattern);
}
void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_link_settings *link_setting,
struct dc_link *link)
{
int i;
struct pipe_ctx *pipe;
struct dc_stream_state *link_stream;
struct dc_link_settings store_settings = *link_setting;
link->preferred_link_setting = store_settings;
/* Retrain with preferred link settings only relevant for
* DP signal type
*/
if (!dc_is_dp_signal(link->connector_signal))
return;
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->link) {
if (pipe->stream->link == link)
break;
}
}
/* Stream not found */
if (i == MAX_PIPES)
return;
link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
/* Cannot retrain link if backend is off */
if (link_stream->dpms_off)
return;
if (link_stream)
decide_link_settings(link_stream, &store_settings);
if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
(store_settings.link_rate != LINK_RATE_UNKNOWN))
dp_retrain_link_dp_test(link, &store_settings, false);
}
void dc_link_enable_hpd(const struct dc_link *link)
{
dc_link_dp_enable_hpd(link);
}
void dc_link_disable_hpd(const struct dc_link *link)
{
dc_link_dp_disable_hpd(link);
}
void dc_link_set_test_pattern(struct dc_link *link,
enum dp_test_pattern test_pattern,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size)
{
if (link != NULL)
dc_link_dp_set_test_pattern(
link,
test_pattern,
p_link_settings,
p_custom_pattern,
cust_pattern_size);
}
uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting)
{
uint32_t link_bw_kbps =
link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
return link_bw_kbps;
}
const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link)
{
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
return &link->preferred_link_setting;
return &link->verified_link_cap;
}

View File

@ -2361,6 +2361,7 @@ static bool retrieve_link_cap(struct dc_link *link)
/*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
*/
uint8_t dpcd_dprx_data = '\0';
uint8_t dpcd_power_state = '\0';
struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
@ -2377,6 +2378,17 @@ static bool retrieve_link_cap(struct dc_link *link)
memset(&edp_config_cap, '\0',
sizeof(union edp_configuration_cap));
status = core_link_read_dpcd(link, DP_SET_POWER,
&dpcd_power_state, sizeof(dpcd_power_state));
/* Delay 1 ms if AUX CH is in power down state. Based on spec
* section 2.3.1.2, if AUX CH may be powered down due to
* write to DPCD 600h = 2. Sink AUX CH is monitoring differential
* signal and may need up to 1 ms before being able to reply.
*/
if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
udelay(1000);
for (i = 0; i < read_dpcd_retry_cnt; i++) {
status = core_link_read_dpcd(
link,

View File

@ -2018,7 +2018,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
dst_ctx->clk_mgr = dc->res_pool->clk_mgr;
dst_ctx->clk_mgr = dc->clk_mgr;
}
/**

View File

@ -179,6 +179,9 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
if (new_stream->out_transfer_func)
dc_transfer_func_retain(new_stream->out_transfer_func);
new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
new_stream->ctx->dc_stream_id_count++;
kref_init(&new_stream->refcount);
return new_stream;
@ -229,7 +232,7 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
unsigned int us_per_line;
if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))

View File

@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
#define DC_VER "3.2.31"
#define DC_VER "3.2.32"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@ -367,6 +367,7 @@ struct dc_bounding_box_overrides {
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
int min_dcfclk_mhz;
};
struct dc_state;
@ -387,6 +388,8 @@ struct dc {
struct dc_state *current_state;
struct resource_pool *res_pool;
struct clk_mgr *clk_mgr;
/* Display Engine Clock levels */
struct dm_pp_clock_levels sclk_lvls;

View File

@ -172,7 +172,6 @@ struct dc_stream_update {
struct periodic_interrupt_config *periodic_interrupt0;
struct periodic_interrupt_config *periodic_interrupt1;
struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
struct dc_info_packet *vsp_infopacket;

View File

@ -28,7 +28,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))

View File

@ -58,6 +58,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
uint32_t rampingBoundary = 0xFFFF;
if (abm->dmcu_is_running == false)
return true;
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
1, 80000);
@ -302,6 +305,9 @@ static bool dce_abm_set_level(struct abm *abm, uint32_t level)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
if (abm->dmcu_is_running == false)
return true;
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
1, 80000);
@ -320,6 +326,9 @@ static bool dce_abm_immediate_disable(struct abm *abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
if (abm->dmcu_is_running == false)
return true;
dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
abm->stored_backlight_registers.BL_PWM_CNTL =
@ -443,6 +452,7 @@ static void dce_abm_construct(
base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
base->dmcu_is_running = false;
abm_dce->regs = regs;
abm_dce->abm_shift = abm_shift;
@ -473,6 +483,7 @@ void dce_abm_destroy(struct abm **abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
if (abm_dce->base.dmcu_is_running == true)
abm_dce->base.funcs->set_abm_immediate_disable(*abm);
kfree(abm_dce);

File diff suppressed because it is too large Load Diff

View File

@ -33,6 +33,7 @@
#include "include/logger_interface.h"
#include "dce_clock_source.h"
#include "clk_mgr.h"
#include "reg_helper.h"

View File

@ -388,6 +388,9 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
/* Set initialized ramping boundary value */
REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF);
/* Set backlight ramping stepsize */
REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
/* Set command to initialize microcontroller */
REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
MCP_INIT_DMCU);
@ -813,6 +816,9 @@ void dce_dmcu_destroy(struct dmcu **dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
if (dmcu_dce->base.dmcu_state == DMCU_RUNNING)
dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true);
kfree(dmcu_dce);
*dmcu = NULL;
}

View File

@ -263,4 +263,6 @@ struct dmcu *dcn10_dmcu_create(
void dce_dmcu_destroy(struct dmcu **dmcu);
static const uint32_t abm_gain_stepsize = 0x0060;
#endif /* _DCE_ABM_H_ */

View File

@ -1124,19 +1124,6 @@ union audio_cea_channels {
} channels;
};
struct audio_clock_info {
/* pixel clock frequency*/
uint32_t pixel_clock_in_10khz;
/* N - 32KHz audio */
uint32_t n_32khz;
/* CTS - 32KHz audio*/
uint32_t cts_32khz;
uint32_t n_44khz;
uint32_t cts_44khz;
uint32_t n_48khz;
uint32_t cts_48khz;
};
/* 25.2MHz/1.001*/
/* 25.2MHz/1.001*/
/* 25.2MHz*/

View File

@ -25,6 +25,7 @@
#include "dm_services.h"
#include "dc.h"
#include "core_types.h"
#include "clk_mgr.h"
#include "hw_sequencer.h"
#include "dce100_hw_sequencer.h"
#include "resource.h"
@ -111,8 +112,8 @@ void dce100_prepare_bandwidth(
{
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
false);
}
@ -123,8 +124,8 @@ void dce100_optimize_bandwidth(
{
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
true);
}

View File

@ -35,8 +35,6 @@
#include "irq/dce110/irq_service_dce110.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_ipp.h"
#include "dce/dce_transform.h"
@ -137,19 +135,6 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
#define ipp_regs(id)\
[id] = {\
IPP_DCE100_REG_LIST_DCE_BASE(id)\
@ -746,9 +731,6 @@ static void destruct(struct dce110_resource_pool *pool)
dce_aud_destroy(&pool->base.audios[i]);
}
if (pool->base.clk_mgr != NULL)
dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.abm != NULL)
dce_abm_destroy(&pool->base.abm);
@ -974,16 +956,6 @@ static bool construct(
}
}
pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,

View File

@ -46,6 +46,7 @@
#include "link_encoder.h"
#include "link_hwss.h"
#include "clock_source.h"
#include "clk_mgr.h"
#include "abm.h"
#include "audio.h"
#include "reg_helper.h"
@ -960,6 +961,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
struct pp_smu_funcs *pp_smu = NULL;
unsigned int i, num_audio = 1;
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
return;
if (core_dc->res_pool->pp_smu)
pp_smu = core_dc->res_pool->pp_smu;
@ -979,6 +983,8 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
/* TODO: audio should be per stream rather than per link */
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, false);
if (pipe_ctx->stream_res.audio)
pipe_ctx->stream_res.audio->enabled = true;
}
}
@ -987,6 +993,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct pp_smu_funcs *pp_smu = NULL;
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
return;
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
@ -1020,6 +1029,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
/* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
* stream->stream_engine_id);
*/
if (pipe_ctx->stream_res.audio)
pipe_ctx->stream_res.audio->enabled = false;
}
}
@ -2313,6 +2324,7 @@ static void init_hw(struct dc *dc)
struct dc_bios *bp;
struct transform *xfm;
struct abm *abm;
struct dmcu *dmcu;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@ -2340,9 +2352,6 @@ static void init_hw(struct dc *dc)
* default signal on connector). */
struct dc_link *link = dc->links[i];
if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
dc->hwss.edp_power_control(link, true);
link->link_enc->funcs->hw_init(link->link_enc);
}
@ -2368,6 +2377,10 @@ static void init_hw(struct dc *dc)
abm->funcs->abm_init(abm);
}
dmcu = dc->res_pool->dmcu;
if (dmcu != NULL && abm != NULL)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
if (dc->fbc_compressor)
dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
@ -2378,7 +2391,7 @@ void dce110_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
struct clk_mgr *dccg = dc->res_pool->clk_mgr;
struct clk_mgr *dccg = dc->clk_mgr;
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
@ -2392,7 +2405,7 @@ void dce110_optimize_bandwidth(
struct dc *dc,
struct dc_state *context)
{
struct clk_mgr *dccg = dc->res_pool->clk_mgr;
struct clk_mgr *dccg = dc->clk_mgr;
dce110_set_displaymarks(dc, context);

View File

@ -30,8 +30,6 @@
#include "resource.h"
#include "dce110/dce110_resource.h"
#include "dce/dce_clk_mgr.h"
#include "include/irq_service_interface.h"
#include "dce/dce_audio.h"
#include "dce110/dce110_timing_generator.h"
@ -149,18 +147,6 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCE110_COMMON_REG_LIST()
};
@ -811,9 +797,6 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
if (pool->base.clk_mgr != NULL)
dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
@ -1346,16 +1329,6 @@ static bool construct(
}
}
pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,

View File

@ -34,8 +34,6 @@
#include "dce110/dce110_timing_generator.h"
#include "irq/dce110/irq_service_dce110.h"
#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_transform.h"
#include "dce/dce_link_encoder.h"
@ -148,19 +146,6 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCE110_COMMON_REG_LIST()
};
@ -774,9 +759,6 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
if (pool->base.clk_mgr != NULL)
dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
@ -1225,16 +1207,6 @@ static bool construct(
}
}
pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,

View File

@ -46,8 +46,7 @@
#include "dce110/dce110_hw_sequencer.h"
#include "dce120/dce120_hw_sequencer.h"
#include "dce/dce_transform.h"
#include "dce/dce_clk_mgr.h"
#include "clk_mgr.h"
#include "dce/dce_audio.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
@ -609,9 +608,6 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
if (pool->base.clk_mgr != NULL)
dce_clk_mgr_destroy(&pool->base.clk_mgr);
}
static void read_dce_straps(
@ -1048,17 +1044,6 @@ static bool construct(
}
}
if (is_vg20)
pool->base.clk_mgr = dce121_clk_mgr_create(ctx);
else
pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto dccg_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
@ -1186,7 +1171,7 @@ static bool construct(
* here.
*/
if (is_vg20 && dce121_xgmi_enabled(dc->hwseq))
dce121_clock_patch_xgmi_ss_info(pool->base.clk_mgr);
dce121_clock_patch_xgmi_ss_info(dc->clk_mgr);
/* Create hardware sequencer */
if (!dce120_hw_sequencer_create(dc))
@ -1205,7 +1190,6 @@ static bool construct(
irqs_create_fail:
controller_create_fail:
dccg_create_fail:
clk_src_create_fail:
res_create_fail:

View File

@ -37,7 +37,6 @@
#include "dce110/dce110_timing_generator.h"
#include "dce110/dce110_resource.h"
#include "dce80/dce80_timing_generator.h"
#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
@ -154,19 +153,6 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
#define ipp_regs(id)\
[id] = {\
IPP_COMMON_REG_LIST_DCE_BASE(id)\
@ -802,9 +788,6 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
if (pool->base.clk_mgr != NULL)
dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
}
@ -955,16 +938,6 @@ static bool dce80_construct(
}
}
pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
@ -1164,16 +1137,6 @@ static bool dce81_construct(
}
}
pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,
@ -1369,16 +1332,6 @@ static bool dce83_construct(
}
}
pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
}
pool->base.dmcu = dce_dmcu_create(ctx,
&dmcu_regs,
&dmcu_shift,

View File

@ -24,7 +24,7 @@
DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o

View File

@ -1,355 +0,0 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn10_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dal_asic_id.h"
#define TO_DCE_CLK_MGR(clocks)\
container_of(clocks, struct dce_clk_mgr, base)
#define REG(reg) \
(clk_mgr_dce->regs->reg)
#undef FN
#define FN(reg_name, field_name) \
clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
#define CTX \
clk_mgr_dce->base.ctx
#define DC_LOGGER \
clk_mgr->ctx->logger
static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
/* increase clock, looking for div is 0 for current, request div is 1*/
if (dispclk_increase) {
/* already divided by 2, no need to reach target clk with 2 steps*/
if (cur_dpp_div)
return new_clocks->dispclk_khz;
/* request disp clk is lower than maximum supported dpp clk,
* no need to reach target clk with two steps.
*/
if (new_clocks->dispclk_khz <= disp_clk_threshold)
return new_clocks->dispclk_khz;
/* target dpp clk not request divided by 2, still within threshold */
if (!request_dpp_div)
return new_clocks->dispclk_khz;
} else {
/* decrease clock, looking for current dppclk divided by 2,
* request dppclk not divided by 2.
*/
/* current dpp clk not divided by 2, no need to ramp*/
if (!cur_dpp_div)
return new_clocks->dispclk_khz;
/* current disp clk is lower than current maximum dpp clk,
* no need to ramp
*/
if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
return new_clocks->dispclk_khz;
/* request dpp clk need to be divided by 2 */
if (request_dpp_div)
return new_clocks->dispclk_khz;
}
return disp_clk_threshold;
}
static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
{
int i;
struct dc *dc = clk_mgr->ctx->dc;
int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
/* set disp clk to dpp clk threshold */
if (clk_mgr->funcs->set_dispclk && clk_mgr->funcs->set_dprefclk) {
clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
clk_mgr->funcs->set_dprefclk(clk_mgr);
} else
dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
/* update request dpp clk division option */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if (!pipe_ctx->plane_state)
continue;
pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
pipe_ctx->plane_res.dpp,
request_dpp_div,
true);
}
/* If target clk not same as dppclk threshold, set to target clock */
if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) {
if (clk_mgr->funcs->set_dispclk && clk_mgr->funcs->set_dprefclk) {
clk_mgr->funcs->set_dispclk(clk_mgr, new_clocks->dispclk_khz);
clk_mgr->funcs->set_dprefclk(clk_mgr);
} else
dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
}
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
}
static int get_active_display_cnt(
struct dc *dc,
struct dc_state *context)
{
int i, display_count;
display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
/*
* Only notify active stream or virtual stream.
* Need to notify virtual stream to work around
* headless case. HPD does not fire when system is in
* S0i2.
*/
if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
display_count++;
}
return display_count;
}
static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
{
struct dc *dc = clk_mgr->ctx->dc;
struct dc_debug_options *debug = &dc->debug;
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct pp_smu_funcs_rv *pp_smu = NULL;
bool send_request_to_increase = false;
bool send_request_to_lower = false;
int display_count;
bool enter_display_off = false;
display_count = get_active_display_cnt(dc, context);
if (dc->res_pool->pp_smu)
pp_smu = &dc->res_pool->pp_smu->rv_funcs;
if (display_count == 0)
enter_display_off = true;
if (enter_display_off == safe_to_lower) {
/*
* Notify SMU active displays
* if function pointer not set up, this message is
* sent as part of pplib_apply_display_requirements.
*/
if (pp_smu && pp_smu->set_display_count)
pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
}
if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
|| new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
|| new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
|| new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
send_request_to_increase = true;
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
send_request_to_lower = true;
}
// F Clock
if (debug->force_fclk_khz != 0)
new_clocks->fclk_khz = debug->force_fclk_khz;
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
send_request_to_lower = true;
}
//DCF Clock
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
send_request_to_lower = true;
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
send_request_to_lower = true;
}
/* make sure dcf clk is before dpp clk to
* make sure we have enough voltage to run dpp clk
*/
if (send_request_to_increase) {
/*use dcfclk to request voltage*/
if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
/* dcn1 dppclk is tied to dispclk */
/* program dispclk on = as a w/a for sleep resume clock ramping issues */
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
|| new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
send_request_to_lower = true;
}
if (!send_request_to_increase && send_request_to_lower) {
/*use dcfclk to request voltage*/
if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
}
#define VBIOSSMC_MSG_SetDispclkFreq 0x4
#define VBIOSSMC_MSG_SetDprefclkFreq 0x5
int dcn10_set_dispclk(struct clk_mgr *clk_mgr_base, int requested_dispclk_khz)
{
int actual_dispclk_set_khz = -1;
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr_base);
/* First clear response register */
//dm_write_reg(ctx, mmMP1_SMN_C2PMSG_91, 0);
REG_WRITE(MP1_SMN_C2PMSG_91, 0);
/* Set the parameter register for the SMU message, unit is Mhz */
//dm_write_reg(ctx, mmMP1_SMN_C2PMSG_83, requested_dispclk_khz / 1000);
REG_WRITE(MP1_SMN_C2PMSG_83, requested_dispclk_khz / 1000);
/* Trigger the message transaction by writing the message ID */
//dm_write_reg(ctx, mmMP1_SMN_C2PMSG_67, VBIOSSMC_MSG_SetDispclkFreq);
REG_WRITE(MP1_SMN_C2PMSG_67, VBIOSSMC_MSG_SetDispclkFreq);
REG_WAIT(MP1_SMN_C2PMSG_91, CONTENT, 1, 10, 200000);
/* Actual dispclk set is returned in the parameter register */
actual_dispclk_set_khz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
return actual_dispclk_set_khz;
}
int dcn10_set_dprefclk(struct clk_mgr *clk_mgr_base)
{
int actual_dprefclk_set_khz = -1;
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr_base);
REG_WRITE(MP1_SMN_C2PMSG_91, 0);
/* Set the parameter register for the SMU message */
REG_WRITE(MP1_SMN_C2PMSG_83, clk_mgr_dce->dprefclk_khz / 1000);
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, VBIOSSMC_MSG_SetDprefclkFreq);
/* Wait for SMU response */
REG_WAIT(MP1_SMN_C2PMSG_91, CONTENT, 1, 10, 200000);
actual_dprefclk_set_khz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
return actual_dprefclk_set_khz;
}
int (*set_dispclk)(struct pp_smu *pp_smu, int dispclk);
int (*set_dprefclk)(struct pp_smu *pp_smu);
static struct clk_mgr_funcs dcn1_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn1_update_clocks
};
struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
{
struct dc_debug_options *debug = &ctx->dc->debug;
struct dc_bios *bp = ctx->dc_bios;
struct dc_firmware_info fw_info = { { 0 } };
struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
if (clk_mgr_dce == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
clk_mgr_dce->base.ctx = ctx;
clk_mgr_dce->base.funcs = &dcn1_funcs;
clk_mgr_dce->dfs_bypass_disp_clk = 0;
clk_mgr_dce->dprefclk_ss_percentage = 0;
clk_mgr_dce->dprefclk_ss_divider = 1000;
clk_mgr_dce->ss_on_dprefclk = false;
clk_mgr_dce->dprefclk_khz = 600000;
if (bp->integrated_info)
clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
bp->funcs->get_firmware_info(bp, &fw_info);
clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
if (clk_mgr_dce->dentist_vco_freq_khz == 0)
clk_mgr_dce->dentist_vco_freq_khz = 3600000;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
clk_mgr_dce->dfs_bypass_enabled = true;
dce_clock_read_ss_info(clk_mgr_dce);
return &clk_mgr_dce->base;
}

View File

@ -38,6 +38,22 @@
type exp_resion_start_segment;\
type field_region_linear_slope
#define TF_HELPER_REG_LIST \
uint32_t start_cntl_b; \
uint32_t start_cntl_g; \
uint32_t start_cntl_r; \
uint32_t start_slope_cntl_b; \
uint32_t start_slope_cntl_g; \
uint32_t start_slope_cntl_r; \
uint32_t start_end_cntl1_b; \
uint32_t start_end_cntl2_b; \
uint32_t start_end_cntl1_g; \
uint32_t start_end_cntl2_g; \
uint32_t start_end_cntl1_r; \
uint32_t start_end_cntl2_r; \
uint32_t region_start; \
uint32_t region_end
#define TF_CM_REG_FIELD_LIST(type) \
type csc_c11; \
type csc_c12
@ -54,20 +70,7 @@ struct xfer_func_reg {
struct xfer_func_shift shifts;
struct xfer_func_mask masks;
uint32_t start_cntl_b;
uint32_t start_cntl_g;
uint32_t start_cntl_r;
uint32_t start_slope_cntl_b;
uint32_t start_slope_cntl_g;
uint32_t start_slope_cntl_r;
uint32_t start_end_cntl1_b;
uint32_t start_end_cntl2_b;
uint32_t start_end_cntl1_g;
uint32_t start_end_cntl2_g;
uint32_t start_end_cntl1_r;
uint32_t start_end_cntl2_r;
uint32_t region_start;
uint32_t region_end;
TF_HELPER_REG_LIST;
};
struct cm_color_matrix_shift {

View File

@ -45,6 +45,8 @@
#include "dcn10_cm_common.h"
#include "dc_link_dp.h"
#include "dccg.h"
#include "clk_mgr.h"
#define DC_LOGGER_INIT(logger)
@ -1100,9 +1102,6 @@ static void dcn10_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
dc->hwss.edp_power_control(link, true);
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
@ -1144,6 +1143,9 @@ static void dcn10_init_hw(struct dc *dc)
if (dmcu != NULL)
dmcu->funcs->dmcu_init(dmcu);
if (abm != NULL && dmcu != NULL)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
@ -1158,7 +1160,7 @@ static void dcn10_init_hw(struct dc *dc)
enable_power_gating_plane(dc->hwseq, true);
memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
memset(&dc->clk_mgr->clks, 0, sizeof(dc->clk_mgr->clks));
}
static void dcn10_reset_hw_ctx_wrap(
@ -2070,7 +2072,7 @@ void update_dchubp_dpp(
*/
if (plane_state->update_flags.bits.full_update) {
bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
dc->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
dpp,
@ -2083,9 +2085,9 @@ void update_dchubp_dpp(
dpp->inst,
pipe_ctx->plane_res.bw.dppclk_khz);
else
dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
dc->res_pool->clk_mgr->clks.dispclk_khz;
dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
dc->clk_mgr->clks.dispclk_khz / 2 :
dc->clk_mgr->clks.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@ -2448,8 +2450,8 @@ static void dcn10_prepare_bandwidth(
if (context->stream_count == 0)
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
false);
}
@ -2480,8 +2482,8 @@ static void dcn10_optimize_bandwidth(
if (context->stream_count == 0)
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
true);
}
@ -2504,8 +2506,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
{
int i = 0;
struct drr_params params = {0};
// DRR should set trigger event to monitor surface update event
unsigned int event_triggers = 0x80;
// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
unsigned int event_triggers = 0x800;
params.vertical_total_max = vmax;
params.vertical_total_min = vmin;

View File

@ -43,7 +43,7 @@
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
#include "dcn10_clk_mgr.h"
#include "clk_mgr.h"
static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
{

View File

@ -1359,5 +1359,5 @@ void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
/* 1/4 window (the maximum allowed) */
AUX_REG_UPDATE(AUX_DPHY_RX_CONTROL0,
AUX_RX_RECEIVE_WINDOW, 1);
AUX_RX_RECEIVE_WINDOW, 0);
}

View File

@ -791,6 +791,32 @@ void optc1_set_static_screen_control(
OTG_STATIC_SCREEN_FRAME_COUNT, 2);
}
void optc1_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_GLOBAL_CONTROL2, 0,
MANUAL_FLOW_CONTROL_SEL, optc->inst);
REG_SET_8(OTG_TRIGA_CNTL, 0,
OTG_TRIGA_SOURCE_SELECT, 22,
OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst,
OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1,
OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
OTG_TRIGA_POLARITY_SELECT, 0,
OTG_TRIGA_FREQUENCY_SELECT, 0,
OTG_TRIGA_DELAY, 0,
OTG_TRIGA_CLEAR, 1);
}
void optc1_program_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_MANUAL_FLOW_CONTROL, 0,
MANUAL_FLOW_CONTROL, 1);
}
/**
*****************************************************************************
@ -823,6 +849,10 @@ void optc1_set_drr(
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
@ -1458,6 +1488,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc1_configure_crc,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc1_program_manual_trigger,
.setup_manual_trigger = optc1_setup_manual_trigger
};
void dcn10_timing_generator_init(struct optc *optc1)

View File

@ -84,13 +84,18 @@
SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst)
SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
SR(GSL_SOURCE_SELECT),\
SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst)
#define TG_COMMON_REG_LIST_DCN1_0(inst) \
TG_COMMON_REG_LIST_DCN(inst),\
SRI(OTG_TEST_PATTERN_PARAMETERS, OTG, inst),\
SRI(OTG_TEST_PATTERN_CONTROL, OTG, inst),\
SRI(OTG_TEST_PATTERN_COLOR, OTG, inst)
SRI(OTG_TEST_PATTERN_COLOR, OTG, inst),\
SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst)
struct dcn_optc_registers {
@ -124,6 +129,8 @@ struct dcn_optc_registers {
uint32_t OTG_V_TOTAL_MIN;
uint32_t OTG_V_TOTAL_CONTROL;
uint32_t OTG_TRIGA_CNTL;
uint32_t OTG_TRIGA_MANUAL_TRIG;
uint32_t OTG_MANUAL_FLOW_CONTROL;
uint32_t OTG_FORCE_COUNT_NOW_CNTL;
uint32_t OTG_STATIC_SCREEN_CONTROL;
uint32_t OTG_STATUS_FRAME_COUNT;
@ -156,6 +163,7 @@ struct dcn_optc_registers {
uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
uint32_t GSL_SOURCE_SELECT;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@ -213,6 +221,11 @@ struct dcn_optc_registers {
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\
SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\
SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\
SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\
SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\
SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\
@ -266,8 +279,11 @@ struct dcn_optc_registers {
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh)
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh)
#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
@ -282,7 +298,8 @@ struct dcn_optc_registers {
SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_COLOR_FORMAT, mask_sh),\
SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_MASK, mask_sh),\
SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh)
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh),\
SF(OTG0_OTG_MANUAL_FLOW_CONTROL, MANUAL_FLOW_CONTROL, mask_sh),\
#define TG_REG_FIELD_LIST_DCN1_0(type) \
type VSTARTUP_START;\
@ -338,6 +355,11 @@ struct dcn_optc_registers {
type OTG_TRIGA_SOURCE_PIPE_SELECT;\
type OTG_TRIGA_RISING_EDGE_DETECT_CNTL;\
type OTG_TRIGA_FALLING_EDGE_DETECT_CNTL;\
type OTG_TRIGA_POLARITY_SELECT;\
type OTG_TRIGA_FREQUENCY_SELECT;\
type OTG_TRIGA_DELAY;\
type OTG_TRIGA_CLEAR;\
type OTG_TRIGA_MANUAL_TRIG;\
type OTG_STATIC_SCREEN_EVENT_MASK;\
type OTG_STATIC_SCREEN_FRAME_COUNT;\
type OTG_FRAME_COUNT;\
@ -413,7 +435,12 @@ struct dcn_optc_registers {
type OTG_CRC0_WINDOWB_X_START;\
type OTG_CRC0_WINDOWB_X_END;\
type OTG_CRC0_WINDOWB_Y_START;\
type OTG_CRC0_WINDOWB_Y_END;
type OTG_CRC0_WINDOWB_Y_END;\
type GSL0_READY_SOURCE_SEL;\
type GSL1_READY_SOURCE_SEL;\
type GSL2_READY_SOURCE_SEL;\
type MANUAL_FLOW_CONTROL;\
type MANUAL_FLOW_CONTROL_SEL;
#define TG_REG_FIELD_LIST(type) \

View File

@ -39,7 +39,6 @@
#include "dcn10_opp.h"
#include "dcn10_link_encoder.h"
#include "dcn10_stream_encoder.h"
#include "dcn10_clk_mgr.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@ -199,6 +198,7 @@ enum dcn10_clk_src_array_id {
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
/* macros to expend register list macro defined in HW object header file
* end *********************/
@ -489,27 +489,6 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
#define mmMP1_SMN_C2PMSG_91 0x1629B
#define mmMP1_SMN_C2PMSG_83 0x16293
#define mmMP1_SMN_C2PMSG_67 0x16283
#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xffffffffL
#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xffffffffL
#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xffffffffL
#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x00000000
#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x00000000
#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x00000000
static const struct clk_mgr_shift clk_mgr_shift = {
CLK_MASK_SH_LIST_RV1(__SHIFT)
};
static const struct clk_mgr_mask clk_mgr_mask = {
CLK_MASK_SH_LIST_RV1(_MASK)
};
static const struct resource_caps res_cap = {
.num_timing_generator = 4,
.num_opp = 4,
@ -980,9 +959,6 @@ static void destruct(struct dcn10_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
if (pool->base.clk_mgr != NULL)
dce_clk_mgr_destroy(&pool->base.clk_mgr);
kfree(pool->base.pp_smu);
}
@ -1436,13 +1412,6 @@ static bool construct(
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto fail;
}
if (!dc->debug.disable_pplib_clock_request)
dcn_bw_update_from_pplib(dc);
dcn_bw_sync_calcs_and_dml(dc);

View File

@ -472,7 +472,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom);
}
static void enc1_stream_encoder_set_stream_attribute_helper(
void enc1_stream_encoder_set_stream_attribute_helper(
struct dcn10_stream_encoder *enc1,
struct dc_crtc_timing *crtc_timing)
{
@ -1091,19 +1091,6 @@ union audio_cea_channels {
} channels;
};
struct audio_clock_info {
/* pixel clock frequency*/
uint32_t pixel_clock_in_10khz;
/* N - 32KHz audio */
uint32_t n_32khz;
/* CTS - 32KHz audio*/
uint32_t cts_32khz;
uint32_t n_44khz;
uint32_t cts_44khz;
uint32_t n_48khz;
uint32_t cts_48khz;
};
/* 25.2MHz/1.001*/
/* 25.2MHz/1.001*/
/* 25.2MHz*/
@ -1206,7 +1193,7 @@ static union audio_cea_channels speakers_to_channels(
return cea_channels;
}
static void get_audio_clock_info(
void get_audio_clock_info(
enum dc_color_depth color_depth,
uint32_t crtc_pixel_clock_in_khz,
uint32_t actual_pixel_clock_in_khz,
@ -1410,7 +1397,7 @@ static void enc1_se_setup_dp_audio(
REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
}
static void enc1_se_enable_audio_clock(
void enc1_se_enable_audio_clock(
struct stream_encoder *enc,
bool enable)
{
@ -1432,7 +1419,7 @@ static void enc1_se_enable_audio_clock(
*/
}
static void enc1_se_enable_dp_audio(
void enc1_se_enable_dp_audio(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);

View File

@ -552,4 +552,21 @@ void enc1_dig_connect_to_otg(
struct stream_encoder *enc,
int tg_inst);
void enc1_stream_encoder_set_stream_attribute_helper(
struct dcn10_stream_encoder *enc1,
struct dc_crtc_timing *crtc_timing);
void enc1_se_enable_audio_clock(
struct stream_encoder *enc,
bool enable);
void enc1_se_enable_dp_audio(
struct stream_encoder *enc);
void get_audio_clock_info(
enum dc_color_depth color_depth,
uint32_t crtc_pixel_clock_in_khz,
uint32_t actual_pixel_clock_in_khz,
struct audio_clock_info *audio_clock_info);
#endif /* __DC_STREAM_ENCODER_DCN10_H__ */

View File

@ -41,6 +41,7 @@ enum pp_smu_ver {
*/
PP_SMU_UNSUPPORTED,
PP_SMU_VER_RV,
PP_SMU_VER_MAX
};
@ -56,12 +57,31 @@ struct pp_smu {
const void *dm;
};
enum pp_smu_status {
PP_SMU_RESULT_UNDEFINED = 0,
PP_SMU_RESULT_OK = 1,
PP_SMU_RESULT_FAIL,
PP_SMU_RESULT_UNSUPPORTED
};
#define PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN 0x0
#define PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX 0xFFFF
enum wm_type {
WM_TYPE_PSTATE_CHG = 0,
WM_TYPE_RETRAINING = 1,
};
/* This structure is a copy of WatermarkRowGeneric_t defined by smuxx_driver_if.h*/
struct pp_smu_wm_set_range {
unsigned int wm_inst;
uint32_t min_fill_clk_mhz;
uint32_t max_fill_clk_mhz;
uint32_t min_drain_clk_mhz;
uint32_t max_drain_clk_mhz;
uint16_t min_fill_clk_mhz;
uint16_t max_fill_clk_mhz;
uint16_t min_drain_clk_mhz;
uint16_t max_drain_clk_mhz;
uint8_t wm_inst;
uint8_t wm_type;
};
#define MAX_WATERMARK_SETS 4
@ -122,6 +142,7 @@ struct pp_smu_funcs {
struct pp_smu ctx;
union {
struct pp_smu_funcs_rv rv_funcs;
};
};

View File

@ -147,4 +147,10 @@ enum dm_validation_status {
DML_FAIL_V_RATIO_PREFETCH,
};
enum writeback_config {
dm_normal,
dm_whole_buffer_for_single_stream_no_interleave,
dm_whole_buffer_for_single_stream_interleave,
};
#endif

View File

@ -82,7 +82,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
/********** DAL Core*********************/
#include "hw/clk_mgr.h"
#include "transform.h"
#include "dpp.h"
@ -178,7 +177,6 @@ struct resource_pool {
unsigned int audio_count;
struct audio_support audio_support;
struct clk_mgr *clk_mgr;
struct dccg *dccg;
struct irq_service *irqs;

View File

@ -32,7 +32,7 @@
#include "bw_fixed.h"
#include "../dml/display_mode_lib.h"
#include "hw/clk_mgr.h"
struct dc;
struct dc_state;

View File

@ -37,7 +37,7 @@ struct abm_backlight_registers {
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
bool dmcu_is_running;
/* registers setting needs to be saved and restored at InitBacklight */
struct abm_backlight_registers stored_backlight_registers;
};

View File

@ -57,6 +57,7 @@ struct audio {
const struct audio_funcs *funcs;
struct dc_context *ctx;
unsigned int inst;
bool enabled;
};
#endif /* __DAL_AUDIO__ */

View File

@ -26,17 +26,22 @@
#ifndef __DAL_CLK_MGR_H__
#define __DAL_CLK_MGR_H__
#include "dm_services_types.h"
#include "dc.h"
struct clk_mgr {
struct dc_context *ctx;
struct clk_mgr_funcs *funcs;
/* Public interfaces */
struct dc_clocks clks;
struct clk_states {
uint32_t dprefclk_khz;
};
struct clk_mgr_funcs {
/*
* This function should set new clocks based on the input "safe_to_lower".
* If safe_to_lower == false, then only clocks which are to be increased
* should changed.
* If safe_to_lower == true, then only clocks which are to be decreased
* should be changed.
*/
void (*update_clocks)(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower);
@ -45,11 +50,23 @@ struct clk_mgr_funcs {
void (*init_clocks)(struct clk_mgr *clk_mgr);
/* Returns actual clk that's set */
int (*set_dispclk)(struct clk_mgr *clk_mgr, int requested_dispclk_khz);
int (*set_dprefclk)(struct clk_mgr *clk_mgr);
void (*enable_pme_wa) (struct clk_mgr *clk_mgr);
};
void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr_base);
struct clk_mgr {
struct dc_context *ctx;
struct clk_mgr_funcs *funcs;
struct dc_clocks clks;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
};
/* forward declarations */
struct dccg;
struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg);
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr);
#endif /* __DAL_CLK_MGR_H__ */

View File

@ -1,5 +1,5 @@
/*
* Copyright 2012-16 Advanced Micro Devices, Inc.
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -23,14 +23,40 @@
*
*/
#ifndef _DCE_CLK_MGR_H_
#define _DCE_CLK_MGR_H_
#ifndef __DAL_CLK_MGR_INTERNAL_H__
#define __DAL_CLK_MGR_INTERNAL_H__
#include "clk_mgr.h"
#include "dccg.h"
#define MEMORY_TYPE_MULTIPLIER_CZ 4
/*
* only thing needed from here is MEMORY_TYPE_MULTIPLIER_CZ, which is also
* used in resource, perhaps this should be defined somewhere more common.
*/
#include "resource.h"
/*
***************************************************************************************
****************** Clock Manager Private Macros and Defines ***************************
***************************************************************************************
*/
#define TO_CLK_MGR_INTERNAL(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_internal, base)
#define CTX \
clk_mgr->base.ctx
#define DC_LOGGER \
clk_mgr->ctx->logger
#define CLK_BASE(inst) \
CLK_BASE_INNER(inst)
#define CLK_SRI(reg_name, block, inst)\
.reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## _ ## inst ## _ ## reg_name
#define CLK_COMMON_REG_LIST_DCE_BASE() \
.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
@ -39,11 +65,6 @@
#define CLK_COMMON_REG_LIST_DCN_BASE() \
SR(DENTIST_DISPCLK_CNTL)
#define VBIOS_SMU_MSG_BOX_REG_LIST_RV() \
.MP1_SMN_C2PMSG_91 = mmMP1_SMN_C2PMSG_91, \
.MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \
.MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67
#define CLK_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@ -68,51 +89,62 @@
type DENTIST_DISPCLK_WDIVIDER; \
type DENTIST_DISPCLK_CHG_DONE;
#define VBIOS_SMU_REG_FIELD_LIST(type) \
type CONTENT;
struct clk_mgr_shift {
CLK_REG_FIELD_LIST(uint8_t)
VBIOS_SMU_REG_FIELD_LIST(uint32_t)
};
struct clk_mgr_mask {
CLK_REG_FIELD_LIST(uint32_t)
VBIOS_SMU_REG_FIELD_LIST(uint32_t)
};
/*
***************************************************************************************
****************** Clock Manager Private Structures ***********************************
***************************************************************************************
*/
struct clk_mgr_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
uint32_t MP1_SMN_C2PMSG_67;
uint32_t MP1_SMN_C2PMSG_83;
uint32_t MP1_SMN_C2PMSG_91;
};
struct clk_mgr_shift {
CLK_REG_FIELD_LIST(uint8_t)
};
struct clk_mgr_mask {
CLK_REG_FIELD_LIST(uint32_t)
};
struct state_dependent_clocks {
int display_clk_khz;
int pixel_clk_khz;
};
struct dce_clk_mgr {
struct clk_mgr_internal {
struct clk_mgr base;
struct pp_smu_funcs *pp_smu;
struct clk_mgr_internal_funcs *funcs;
struct dccg *dccg;
/*
* For backwards compatbility with previous implementation
* TODO: remove these after everything transitions to new pattern
* Rationale is that clk registers change a lot across DCE versions
* and a shared data structure doesn't really make sense.
*/
const struct clk_mgr_registers *regs;
const struct clk_mgr_shift *clk_mgr_shift;
const struct clk_mgr_mask *clk_mgr_mask;
struct dccg *dccg;
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
/*TODO: figure out which of the below fields should be here vs in asic specific portion */
int dentist_vco_freq_khz;
/* Cache the status of DFS-bypass feature*/
bool dfs_bypass_enabled;
/* True if the DFS-bypass feature is enabled and active. */
bool dfs_bypass_active;
/* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
* This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
/*
* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
* This is basically "Crystal Frequency In KHz" (XTALIN) frequency
*/
int dfs_bypass_disp_clk;
/**
@ -147,76 +179,33 @@ struct dce_clk_mgr {
* DPREFCLK SS percentage Divider (100 or 1000).
*/
int dprefclk_ss_divider;
int dprefclk_khz;
enum dm_pp_clocks_state max_clks_state;
enum dm_pp_clocks_state cur_min_clks_state;
};
/* Starting DID for each range */
enum dentist_base_divider_id {
DENTIST_BASE_DID_1 = 0x08,
DENTIST_BASE_DID_2 = 0x40,
DENTIST_BASE_DID_3 = 0x60,
DENTIST_BASE_DID_4 = 0x7e,
DENTIST_MAX_DID = 0x7f
struct clk_mgr_internal_funcs {
int (*set_dispclk)(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
int (*set_dprefclk)(struct clk_mgr_internal *clk_mgr);
};
/* Starting point and step size for each divider range.*/
enum dentist_divider_range {
DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
};
/*
***************************************************************************************
****************** Clock Manager Level Helper functions *******************************
***************************************************************************************
*/
static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
{
return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
}
void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce);
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context);
int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz);
int dce112_set_dispclk(struct clk_mgr *clk_mgr, int requested_clk_khz);
int dce112_set_dprefclk(struct clk_mgr *clk_mgr);
struct clk_mgr *dce_clk_mgr_create(
struct dc_context *ctx,
const struct clk_mgr_registers *regs,
const struct clk_mgr_shift *clk_shift,
const struct clk_mgr_mask *clk_mask);
struct clk_mgr *dce110_clk_mgr_create(
struct dc_context *ctx,
const struct clk_mgr_registers *regs,
const struct clk_mgr_shift *clk_shift,
const struct clk_mgr_mask *clk_mask);
struct clk_mgr *dce112_clk_mgr_create(
struct dc_context *ctx,
const struct clk_mgr_registers *regs,
const struct clk_mgr_shift *clk_shift,
const struct clk_mgr_mask *clk_mask);
struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx);
void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr);
void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
int dentist_get_divider_from_did(int did);
#endif /* _DCE_CLK_MGR_H_ */
#endif //__DAL_CLK_MGR_INTERNAL_H__

View File

@ -64,7 +64,22 @@ struct dcn_dpp_state {
uint32_t gamut_remap_c33_c34;
};
struct CM_bias_params {
uint32_t cm_bias_cr_r;
uint32_t cm_bias_y_g;
uint32_t cm_bias_cb_b;
uint32_t cm_bias_format;
};
struct dpp_funcs {
void (*dpp_program_cm_dealpha)(struct dpp *dpp_base,
uint32_t enable, uint32_t additive_blending);
void (*dpp_program_cm_bias)(
struct dpp *dpp_base,
struct CM_bias_params *bias_params);
void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
void (*dpp_reset)(struct dpp *dpp);
@ -155,9 +170,11 @@ struct dpp_funcs {
uint32_t width,
uint32_t height
);
void (*dpp_set_hdr_multiplier)(
struct dpp *dpp_base,
uint32_t multiplier);
void (*set_optional_cursor_attributes)(
struct dpp *dpp_base,
struct dpp_cursor_attributes *attr);

View File

@ -52,6 +52,19 @@ enum dp_component_depth {
DP_COMPONENT_PIXEL_DEPTH_16BPC = 0x00000004
};
struct audio_clock_info {
/* pixel clock frequency*/
uint32_t pixel_clock_in_10khz;
/* N - 32KHz audio */
uint32_t n_32khz;
/* CTS - 32KHz audio*/
uint32_t cts_32khz;
uint32_t n_44khz;
uint32_t cts_44khz;
uint32_t n_48khz;
uint32_t cts_48khz;
};
struct encoder_info_frame {
/* auxiliary video information */
struct dc_info_packet avi;

View File

@ -238,6 +238,9 @@ struct timing_generator_funcs {
bool (*get_crc)(struct timing_generator *tg,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
void (*program_manual_trigger)(struct timing_generator *optc);
void (*setup_manual_trigger)(struct timing_generator *optc);
void (*set_vtg_params)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
};

View File

@ -30,6 +30,8 @@
#include "dal_asic_id.h"
#include "dm_pp_smu.h"
#define MEMORY_TYPE_MULTIPLIER_CZ 4
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);

View File

@ -131,19 +131,20 @@
#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
#define RAVEN_A0 0x01
#define RAVEN_B0 0x21
/* DCN1_01 */
#define PICASSO_A0 0x41
/* DCN1_01 */
#define RAVEN2_A0 0x81
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
#define ASIC_REV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#define RAVEN1_F0 0xF0
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 0xF0))
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
#define FAMILY_RV 142 /* DCN 1*/
/*

View File

@ -1569,13 +1569,15 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
output_tf->tf == TRANSFER_FUNCTION_SRGB) {
if (ramp == NULL)
return true;
if (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256))
if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
(!mapUserRamp && ramp->type == GAMMA_RGB_256))
return true;
}
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
(mapUserRamp || ramp->type != GAMMA_RGB_256)) {
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL);

View File

@ -256,7 +256,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
!no_intr, NULL);
!no_intr, NULL, true);
if (ret)
return ret;

View File

@ -559,7 +559,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos)
return;
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
if (r)
goto error_free;

View File

@ -539,7 +539,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
if (unlikely(r != 0)) {
return r;
}

View File

@ -166,27 +166,34 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
reservation_object_assert_held(bo->resv);
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
BUG_ON(!list_empty(&bo->lru));
if (!list_empty(&bo->lru))
return;
man = &bdev->man[bo->mem.mem_type];
if (mem->placement & TTM_PL_FLAG_NO_EVICT)
return;
man = &bdev->man[mem->mem_type];
list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref);
if (bo->ttm && !(bo->ttm->page_flags &
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap,
&bdev->glob->swap_lru[bo->priority]);
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
}
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
ttm_bo_add_mem_to_lru(bo, &bo->mem);
}
EXPORT_SYMBOL(ttm_bo_add_to_lru);
@ -766,32 +773,72 @@ EXPORT_SYMBOL(ttm_bo_eviction_valuable);
* b. Otherwise, trylock it.
*/
static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx, bool *locked)
struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
{
bool ret = false;
*locked = false;
if (bo->resv == ctx->resv) {
reservation_object_assert_held(bo->resv);
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
|| !list_empty(&bo->ddestroy))
ret = true;
*locked = false;
if (busy)
*busy = false;
} else {
*locked = reservation_object_trylock(bo->resv);
ret = *locked;
ret = reservation_object_trylock(bo->resv);
*locked = ret;
if (busy)
*busy = !ret;
}
return ret;
}
/**
* ttm_mem_evict_wait_busy - wait for a busy BO to become available
*
* @busy_bo: BO which couldn't be locked with trylock
* @ctx: operation context
* @ticket: acquire ticket
*
* Try to lock a busy buffer object to avoid failing eviction.
*/
static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket)
{
int r;
if (!busy_bo || !ticket)
return -EBUSY;
if (ctx->interruptible)
r = reservation_object_lock_interruptible(busy_bo->resv,
ticket);
else
r = reservation_object_lock(busy_bo->resv, ticket);
/*
* TODO: It would be better to keep the BO locked until allocation is at
* least tried one more time, but that would mean a much larger rework
* of TTM.
*/
if (!r)
reservation_object_unlock(busy_bo->resv);
return r == -EDEADLK ? -EAGAIN : r;
}
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx)
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo = NULL;
bool locked = false;
unsigned i;
int ret;
@ -799,8 +846,15 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
bool busy;
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
&busy)) {
if (busy && !busy_bo &&
bo->resv->lock.ctx != ticket)
busy_bo = bo;
continue;
}
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
@ -819,8 +873,13 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
if (!bo) {
if (busy_bo)
ttm_bo_get(busy_bo);
spin_unlock(&glob->lru_lock);
return -EBUSY;
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
ttm_bo_put(busy_bo);
return ret;
}
kref_get(&bo->list_kref);
@ -892,13 +951,12 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
* space, or we've evicted everything and there isn't enough space.
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
const struct ttm_place *place,
struct ttm_mem_reg *mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
do {
@ -907,11 +965,12 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
bo->resv->lock.ctx);
if (unlikely(ret != 0))
return ret;
} while (1);
mem->mem_type = mem_type;
return ttm_bo_add_move_fence(bo, man, mem);
}
@ -959,6 +1018,59 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
return true;
}
/**
* ttm_bo_mem_placement - check if placement is compatible
* @bo: BO to find memory for
* @place: where to search
* @mem: the memory object to fill in
* @ctx: operation context
*
* Check if placement is compatible and fill in mem structure.
* Returns -EBUSY if placement won't work or negative error code.
* 0 when placement can be used.
*/
static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_mem_reg *mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
uint32_t mem_type = TTM_PL_SYSTEM;
struct ttm_mem_type_manager *man;
uint32_t cur_flags = 0;
int ret;
ret = ttm_mem_type_from_place(place, &mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
if (!man->has_type || !man->use_type)
return -EBUSY;
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
return -EBUSY;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
mem->mem_type = mem_type;
mem->placement = cur_flags;
if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
spin_lock(&bo->bdev->glob->lru_lock);
ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, mem);
spin_unlock(&bo->bdev->glob->lru_lock);
}
return 0;
}
/**
* Creates space for memory region @mem according to its type.
*
@ -973,12 +1085,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0;
bool type_found = false;
bool type_ok = false;
bool has_erestartsys = false;
int i, ret;
ret = reservation_object_reserve_shared(bo->resv, 1);
@ -988,97 +1095,70 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
struct ttm_mem_type_manager *man;
ret = ttm_mem_type_from_place(place, &mem_type);
ret = ttm_bo_mem_placement(bo, place, mem, ctx);
if (ret == -EBUSY)
continue;
if (ret)
return ret;
man = &bdev->man[mem_type];
if (!man->has_type || !man->use_type)
continue;
type_ok = ttm_bo_mt_compatible(man, mem_type, place,
&cur_flags);
if (!type_ok)
continue;
goto error;
type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, place->flags,
~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM)
break;
mem->mm_node = NULL;
if (mem->mem_type == TTM_PL_SYSTEM)
return 0;
man = &bdev->man[mem->mem_type];
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret))
return ret;
goto error;
if (mem->mm_node) {
ret = ttm_bo_add_move_fence(bo, man, mem);
if (unlikely(ret)) {
(*man->func->put_node)(man, mem);
return ret;
goto error;
}
break;
}
}
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
return 0;
}
}
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
ret = ttm_mem_type_from_place(place, &mem_type);
ret = ttm_bo_mem_placement(bo, place, mem, ctx);
if (ret == -EBUSY)
continue;
if (ret)
return ret;
man = &bdev->man[mem_type];
if (!man->has_type || !man->use_type)
continue;
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
continue;
goto error;
type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, place->flags,
~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
mem->mm_node = NULL;
if (mem->mem_type == TTM_PL_SYSTEM)
return 0;
ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
if (ret == 0 && mem->mm_node)
return 0;
if (ret && ret != -EBUSY)
goto error;
}
ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
return 0;
}
if (ret == -ERESTARTSYS)
has_erestartsys = true;
}
ret = -ENOMEM;
if (!type_found) {
pr_err(TTM_PFX "No compatible memory type found\n");
return -EINVAL;
ret = -EINVAL;
}
return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
spin_lock(&bo->bdev->glob->lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
}
return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
@ -1401,7 +1481,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
NULL);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
@ -1772,7 +1853,8 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
NULL)) {
ret = 0;
break;
}

View File

@ -69,6 +69,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
reservation_object_unlock(bo->resv);
}
@ -93,7 +94,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
struct list_head *dups)
struct list_head *dups, bool del_lru)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
@ -172,11 +173,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
if (ticket)
ww_acquire_done(ticket);
if (del_lru) {
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
}
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@ -203,7 +204,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
else
ttm_bo_move_to_lru_tail(bo, NULL);
reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);

View File

@ -63,7 +63,7 @@ int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
struct virtio_gpu_object *qobj;
int ret;
ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
if (ret != 0)
return ret;

View File

@ -464,7 +464,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
true);
if (unlikely(ret != 0))
goto out_no_reserve;

View File

@ -169,7 +169,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
NULL);
NULL, true);
}
/**

View File

@ -767,11 +767,12 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->bdev->glob->lru_lock);
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
else
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
}
reservation_object_unlock(bo->resv);
}

View File

@ -70,6 +70,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
* @dups: [out] optional list of duplicates.
* @del_lru: true if BOs should be removed from the LRU.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@ -98,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
struct list_head *dups);
struct list_head *dups, bool del_lru);
/**
* function ttm_eu_fence_buffer_objects.