Merge tag 'amd-drm-next-6.1-2022-09-23' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.1-2022-09-23: amdgpu: - SDMA fix - Add new firmware types to debugfs/IOCTL version queries - Misc spelling and grammar fixes - Misc code cleanups - DCN 3.2.x fixes - DCN 3.1.x fixes - CS cleanup - Gang submit support - Clang fixes - Non-DC audio fix - GPUVM locking fixes - Vega10 PWN fan speed fix amdkgd: - MQD manager cleanup - Misc spelling and grammar fixes UAPI: - Add new firmware types to the FW version query IOCTL Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220923215729.6061-1-alexander.deucher@amd.com
This commit is contained in:
commit
e8573000f4
@ -885,6 +885,7 @@ struct amdgpu_device {
|
||||
u64 fence_context;
|
||||
unsigned num_rings;
|
||||
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
|
||||
struct dma_fence __rcu *gang_submit;
|
||||
bool ib_pool_ready;
|
||||
struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
|
||||
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
|
||||
@ -1294,6 +1295,8 @@ u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
|
||||
u32 reg);
|
||||
void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
|
||||
u32 reg, u32 v);
|
||||
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
|
||||
struct dma_fence *gang);
|
||||
|
||||
/* atpx handler */
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
|
@ -686,6 +686,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
|
||||
ib->length_dw = ib_len;
|
||||
/* This works for NO_HWS. TODO: need to handle without knowing VMID */
|
||||
job->vmid = vmid;
|
||||
job->num_ibs = 1;
|
||||
|
||||
ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
|
||||
|
||||
|
@ -1674,10 +1674,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
adev->mode_info.dither_property,
|
||||
AMDGPU_FMT_DITHER_DISABLE);
|
||||
|
||||
if (amdgpu_audio != 0)
|
||||
if (amdgpu_audio != 0) {
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.audio_property,
|
||||
AMDGPU_AUDIO_AUTO);
|
||||
amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
|
||||
}
|
||||
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
connector->interlace_allowed = true;
|
||||
@ -1799,6 +1801,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.audio_property,
|
||||
AMDGPU_AUDIO_AUTO);
|
||||
amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
|
||||
}
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.dither_property,
|
||||
@ -1852,6 +1855,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.audio_property,
|
||||
AMDGPU_AUDIO_AUTO);
|
||||
amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
|
||||
}
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.dither_property,
|
||||
@ -1902,6 +1906,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.audio_property,
|
||||
AMDGPU_AUDIO_AUTO);
|
||||
amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
|
||||
}
|
||||
drm_object_attach_property(&amdgpu_connector->base.base,
|
||||
adev->mode_info.dither_property,
|
||||
|
@ -64,11 +64,51 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
|
||||
struct drm_amdgpu_cs_chunk_ib *chunk_ib)
|
||||
{
|
||||
struct drm_sched_entity *entity;
|
||||
unsigned int i;
|
||||
int r;
|
||||
|
||||
r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
|
||||
chunk_ib->ip_instance,
|
||||
chunk_ib->ring, &entity);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* Abort if there is no run queue associated with this entity.
|
||||
* Possibly because of disabled HW IP.
|
||||
*/
|
||||
if (entity->rq == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check if we can add this IB to some existing job */
|
||||
for (i = 0; i < p->gang_size; ++i)
|
||||
if (p->entities[i] == entity)
|
||||
return i;
|
||||
|
||||
/* If not increase the gang size if possible */
|
||||
if (i == AMDGPU_CS_GANG_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
p->entities[i] = entity;
|
||||
p->gang_size = i + 1;
|
||||
return i;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
|
||||
struct drm_amdgpu_cs_chunk_ib *chunk_ib,
|
||||
unsigned int *num_ibs)
|
||||
{
|
||||
++(*num_ibs);
|
||||
int r;
|
||||
|
||||
r = amdgpu_cs_job_idx(p, chunk_ib);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
++(num_ibs[r]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -142,11 +182,12 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
||||
union drm_amdgpu_cs *cs)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
uint64_t *chunk_array_user;
|
||||
uint64_t *chunk_array;
|
||||
unsigned size, num_ibs = 0;
|
||||
uint32_t uf_offset = 0;
|
||||
unsigned int size;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
@ -209,7 +250,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
||||
if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
|
||||
goto free_partial_kdata;
|
||||
|
||||
ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, &num_ibs);
|
||||
ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
|
||||
if (ret)
|
||||
goto free_partial_kdata;
|
||||
break;
|
||||
@ -246,17 +287,28 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
||||
}
|
||||
}
|
||||
|
||||
ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
|
||||
if (ret)
|
||||
goto free_all_kdata;
|
||||
if (!p->gang_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
|
||||
for (i = 0; i < p->gang_size; ++i) {
|
||||
ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
|
||||
if (ret)
|
||||
goto free_all_kdata;
|
||||
|
||||
ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
|
||||
&fpriv->vm);
|
||||
if (ret)
|
||||
goto free_all_kdata;
|
||||
}
|
||||
p->gang_leader = p->jobs[p->gang_size - 1];
|
||||
|
||||
if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
|
||||
ret = -ECANCELED;
|
||||
goto free_all_kdata;
|
||||
}
|
||||
|
||||
if (p->uf_entry.tv.bo)
|
||||
p->job->uf_addr = uf_offset;
|
||||
p->gang_leader->uf_addr = uf_offset;
|
||||
kvfree(chunk_array);
|
||||
|
||||
/* Use this opportunity to fill in task info for the vm */
|
||||
@ -278,93 +330,69 @@ free_chunk:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
||||
struct amdgpu_cs_parser *parser)
|
||||
static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk,
|
||||
unsigned int *ce_preempt,
|
||||
unsigned int *de_preempt)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
||||
struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
int r, ce_preempt = 0, de_preempt = 0;
|
||||
struct amdgpu_ring *ring;
|
||||
int i, j;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
int r;
|
||||
|
||||
for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
|
||||
struct amdgpu_cs_chunk *chunk;
|
||||
struct amdgpu_ib *ib;
|
||||
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
|
||||
struct drm_sched_entity *entity;
|
||||
r = amdgpu_cs_job_idx(p, chunk_ib);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
chunk = &parser->chunks[i];
|
||||
ib = &parser->job->ibs[j];
|
||||
chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
|
||||
|
||||
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
|
||||
continue;
|
||||
|
||||
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
|
||||
chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
|
||||
ce_preempt++;
|
||||
else
|
||||
de_preempt++;
|
||||
|
||||
/* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
|
||||
if (ce_preempt > 1 || de_preempt > 1)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
|
||||
chunk_ib->ip_instance, chunk_ib->ring,
|
||||
&entity);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
|
||||
parser->job->preamble_status |=
|
||||
AMDGPU_PREAMBLE_IB_PRESENT;
|
||||
|
||||
if (parser->entity && parser->entity != entity)
|
||||
return -EINVAL;
|
||||
|
||||
/* Return if there is no run queue associated with this entity.
|
||||
* Possibly because of disabled HW IP*/
|
||||
if (entity->rq == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
parser->entity = entity;
|
||||
|
||||
ring = to_amdgpu_ring(entity->rq->sched);
|
||||
r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
|
||||
chunk_ib->ib_bytes : 0,
|
||||
AMDGPU_IB_POOL_DELAYED, ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
ib->gpu_addr = chunk_ib->va_start;
|
||||
ib->length_dw = chunk_ib->ib_bytes / 4;
|
||||
ib->flags = chunk_ib->flags;
|
||||
|
||||
j++;
|
||||
}
|
||||
job = p->jobs[r];
|
||||
ring = amdgpu_job_ring(job);
|
||||
ib = &job->ibs[job->num_ibs++];
|
||||
|
||||
/* MM engine doesn't support user fences */
|
||||
ring = to_amdgpu_ring(parser->entity->rq->sched);
|
||||
if (parser->job->uf_addr && ring->funcs->no_user_fence)
|
||||
if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
|
||||
return -EINVAL;
|
||||
|
||||
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
|
||||
chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
|
||||
(*ce_preempt)++;
|
||||
else
|
||||
(*de_preempt)++;
|
||||
|
||||
/* Each GFX command submit allows only 1 IB max
|
||||
* preemptible for CE & DE */
|
||||
if (*ce_preempt > 1 || *de_preempt > 1)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
|
||||
job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
|
||||
|
||||
r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
|
||||
chunk_ib->ib_bytes : 0,
|
||||
AMDGPU_IB_POOL_DELAYED, ib);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to get ib !\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
ib->gpu_addr = chunk_ib->va_start;
|
||||
ib->length_dw = chunk_ib->ib_bytes / 4;
|
||||
ib->flags = chunk_ib->flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
unsigned num_deps;
|
||||
int i, r;
|
||||
struct drm_amdgpu_cs_chunk_dep *deps;
|
||||
|
||||
deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_dep);
|
||||
|
||||
@ -402,7 +430,7 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
||||
dma_fence_put(old);
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(&p->job->sync, fence);
|
||||
r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
|
||||
dma_fence_put(fence);
|
||||
if (r)
|
||||
return r;
|
||||
@ -410,9 +438,9 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
|
||||
uint32_t handle, u64 point,
|
||||
u64 flags)
|
||||
static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
|
||||
uint32_t handle, u64 point,
|
||||
u64 flags)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
int r;
|
||||
@ -424,25 +452,23 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(&p->job->sync, fence);
|
||||
r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
|
||||
dma_fence_put(fence);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_sem *deps;
|
||||
struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
|
||||
unsigned num_deps;
|
||||
int i, r;
|
||||
|
||||
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_sem);
|
||||
for (i = 0; i < num_deps; ++i) {
|
||||
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
|
||||
0, 0);
|
||||
r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -450,21 +476,19 @@ static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
|
||||
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
|
||||
unsigned num_deps;
|
||||
int i, r;
|
||||
|
||||
syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
|
||||
for (i = 0; i < num_deps; ++i) {
|
||||
r = amdgpu_syncobj_lookup_and_add_to_sync(p,
|
||||
syncobj_deps[i].handle,
|
||||
syncobj_deps[i].point,
|
||||
syncobj_deps[i].flags);
|
||||
r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
|
||||
syncobj_deps[i].point,
|
||||
syncobj_deps[i].flags);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -472,14 +496,13 @@ static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_sem *deps;
|
||||
struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
|
||||
unsigned num_deps;
|
||||
int i;
|
||||
|
||||
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_sem);
|
||||
|
||||
@ -507,15 +530,13 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
|
||||
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
|
||||
unsigned num_deps;
|
||||
int i;
|
||||
|
||||
syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
|
||||
|
||||
@ -552,9 +573,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
||||
struct amdgpu_cs_parser *p)
|
||||
static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
unsigned int ce_preempt = 0, de_preempt = 0;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < p->nchunks; ++i) {
|
||||
@ -563,29 +584,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
||||
chunk = &p->chunks[i];
|
||||
|
||||
switch (chunk->chunk_id) {
|
||||
case AMDGPU_CHUNK_ID_IB:
|
||||
r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
case AMDGPU_CHUNK_ID_DEPENDENCIES:
|
||||
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
|
||||
r = amdgpu_cs_process_fence_dep(p, chunk);
|
||||
r = amdgpu_cs_p2_dependencies(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
|
||||
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
|
||||
r = amdgpu_cs_p2_syncobj_in(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
|
||||
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
|
||||
r = amdgpu_cs_p2_syncobj_out(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
|
||||
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
|
||||
r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
|
||||
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
|
||||
r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
|
||||
if (r)
|
||||
return r;
|
||||
break;
|
||||
@ -830,6 +856,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
struct list_head duplicates;
|
||||
unsigned int i;
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
@ -913,16 +940,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
e->bo_va = amdgpu_vm_bo_find(vm, bo);
|
||||
}
|
||||
|
||||
/* Move fence waiting after getting reservation lock of
|
||||
* PD root. Then there is no need on a ctx mutex lock.
|
||||
*/
|
||||
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
|
||||
goto error_validate;
|
||||
}
|
||||
|
||||
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
|
||||
&p->bytes_moved_vis_threshold);
|
||||
p->bytes_moved = 0;
|
||||
@ -943,125 +960,139 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
if (r)
|
||||
goto error_validate;
|
||||
|
||||
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
|
||||
p->bytes_moved_vis);
|
||||
|
||||
amdgpu_job_set_resources(p->job, p->bo_list->gds_obj,
|
||||
p->bo_list->gws_obj, p->bo_list->oa_obj);
|
||||
|
||||
if (!r && p->uf_entry.tv.bo) {
|
||||
if (p->uf_entry.tv.bo) {
|
||||
struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
|
||||
|
||||
r = amdgpu_ttm_alloc_gart(&uf->tbo);
|
||||
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
|
||||
if (r)
|
||||
goto error_validate;
|
||||
|
||||
p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
|
||||
}
|
||||
|
||||
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
|
||||
p->bytes_moved_vis);
|
||||
|
||||
for (i = 0; i < p->gang_size; ++i)
|
||||
amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
|
||||
p->bo_list->gws_obj,
|
||||
p->bo_list->oa_obj);
|
||||
return 0;
|
||||
|
||||
error_validate:
|
||||
if (r)
|
||||
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
||||
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
||||
|
||||
out_free_user_pages:
|
||||
if (r) {
|
||||
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
|
||||
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
|
||||
|
||||
if (!e->user_pages)
|
||||
continue;
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
|
||||
kvfree(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
}
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
if (!e->user_pages)
|
||||
continue;
|
||||
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
|
||||
kvfree(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
|
||||
static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
if (!trace_amdgpu_cs_enabled())
|
||||
return;
|
||||
|
||||
for (i = 0; i < parser->job->num_ibs; i++)
|
||||
trace_amdgpu_cs(parser, i);
|
||||
for (i = 0; i < p->gang_size; ++i) {
|
||||
struct amdgpu_job *job = p->jobs[i];
|
||||
|
||||
for (j = 0; j < job->num_ibs; ++j)
|
||||
trace_amdgpu_cs(p, job, &job->ibs[j]);
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
struct amdgpu_ring *ring = amdgpu_job_ring(job);
|
||||
unsigned int i;
|
||||
int r;
|
||||
|
||||
/* Only for UVD/VCE VM emulation */
|
||||
if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < job->num_ibs; ++i) {
|
||||
struct amdgpu_ib *ib = &job->ibs[i];
|
||||
struct amdgpu_bo_va_mapping *m;
|
||||
struct amdgpu_bo *aobj;
|
||||
uint64_t va_start;
|
||||
uint8_t *kptr;
|
||||
|
||||
va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
|
||||
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
|
||||
if (r) {
|
||||
DRM_ERROR("IB va_start is invalid\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
if ((va_start + ib->length_dw * 4) >
|
||||
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
|
||||
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* the IB should be reserved at this point */
|
||||
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
|
||||
|
||||
if (ring->funcs->parse_cs) {
|
||||
memcpy(ib->ptr, kptr, ib->length_dw * 4);
|
||||
amdgpu_bo_kunmap(aobj);
|
||||
|
||||
r = amdgpu_ring_parse_cs(ring, p, job, ib);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
ib->ptr = (uint32_t *)kptr;
|
||||
r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
|
||||
amdgpu_bo_kunmap(aobj);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
unsigned int i;
|
||||
int r;
|
||||
|
||||
for (i = 0; i < p->gang_size; ++i) {
|
||||
r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_job *job = p->gang_leader;
|
||||
struct amdgpu_device *adev = p->adev;
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct amdgpu_bo *bo;
|
||||
unsigned int i;
|
||||
int r;
|
||||
|
||||
/* Only for UVD/VCE VM emulation */
|
||||
if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
|
||||
unsigned i, j;
|
||||
|
||||
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
|
||||
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
|
||||
struct amdgpu_bo_va_mapping *m;
|
||||
struct amdgpu_bo *aobj = NULL;
|
||||
struct amdgpu_cs_chunk *chunk;
|
||||
uint64_t offset, va_start;
|
||||
struct amdgpu_ib *ib;
|
||||
uint8_t *kptr;
|
||||
|
||||
chunk = &p->chunks[i];
|
||||
ib = &p->job->ibs[j];
|
||||
chunk_ib = chunk->kdata;
|
||||
|
||||
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
|
||||
continue;
|
||||
|
||||
va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
|
||||
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
|
||||
if (r) {
|
||||
DRM_ERROR("IB va_start is invalid\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
if ((va_start + chunk_ib->ib_bytes) >
|
||||
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
|
||||
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* the IB should be reserved at this point */
|
||||
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
|
||||
kptr += va_start - offset;
|
||||
|
||||
if (ring->funcs->parse_cs) {
|
||||
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
|
||||
amdgpu_bo_kunmap(aobj);
|
||||
|
||||
r = amdgpu_ring_parse_cs(ring, p, p->job, ib);
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
ib->ptr = (uint32_t *)kptr;
|
||||
r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib);
|
||||
amdgpu_bo_kunmap(aobj);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
if (!p->job->vm)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
@ -1070,7 +1101,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
|
||||
r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1081,7 +1112,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
|
||||
r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -1100,7 +1131,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
|
||||
r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -1113,11 +1144,18 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(&p->job->sync, vm->last_update);
|
||||
r = amdgpu_sync_fence(&job->sync, vm->last_update);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
|
||||
for (i = 0; i < p->gang_size; ++i) {
|
||||
job = p->jobs[i];
|
||||
|
||||
if (!job->vm)
|
||||
continue;
|
||||
|
||||
job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
|
||||
}
|
||||
|
||||
if (amdgpu_vm_debug) {
|
||||
/* Invalidate all BOs to test for userspace bugs */
|
||||
@ -1138,7 +1176,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_job *leader = p->gang_leader;
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
unsigned int i;
|
||||
int r;
|
||||
|
||||
list_for_each_entry(e, &p->validated, tv.head) {
|
||||
@ -1148,12 +1188,23 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
||||
|
||||
sync_mode = amdgpu_bo_explicit_sync(bo) ?
|
||||
AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
|
||||
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
|
||||
r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode,
|
||||
&fpriv->vm);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < p->gang_size - 1; ++i) {
|
||||
r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]);
|
||||
if (r && r != -ERESTARTSYS)
|
||||
DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
|
||||
@ -1177,20 +1228,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
union drm_amdgpu_cs *cs)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct drm_sched_entity *entity = p->entity;
|
||||
struct amdgpu_job *leader = p->gang_leader;
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
struct amdgpu_job *job;
|
||||
unsigned int i;
|
||||
uint64_t seq;
|
||||
int r;
|
||||
|
||||
job = p->job;
|
||||
p->job = NULL;
|
||||
for (i = 0; i < p->gang_size; ++i)
|
||||
drm_sched_job_arm(&p->jobs[i]->base);
|
||||
|
||||
r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
for (i = 0; i < (p->gang_size - 1); ++i) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
drm_sched_job_arm(&job->base);
|
||||
fence = &p->jobs[i]->base.s_fence->scheduled;
|
||||
r = amdgpu_sync_fence(&leader->sync, fence);
|
||||
if (r)
|
||||
goto error_cleanup;
|
||||
}
|
||||
|
||||
if (p->gang_size > 1) {
|
||||
for (i = 0; i < p->gang_size; ++i)
|
||||
amdgpu_job_set_gang_leader(p->jobs[i], leader);
|
||||
}
|
||||
|
||||
/* No memory allocation is allowed while holding the notifier lock.
|
||||
* The lock is held until amdgpu_cs_submit is finished and fence is
|
||||
@ -1201,6 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
|
||||
* -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
|
||||
*/
|
||||
r = 0;
|
||||
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
|
||||
|
||||
@ -1208,62 +1268,65 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
}
|
||||
if (r) {
|
||||
r = -EAGAIN;
|
||||
goto error_abort;
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
p->fence = dma_fence_get(&job->base.s_fence->finished);
|
||||
p->fence = dma_fence_get(&leader->base.s_fence->finished);
|
||||
list_for_each_entry(e, &p->validated, tv.head) {
|
||||
|
||||
seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
|
||||
/* Everybody except for the gang leader uses READ */
|
||||
for (i = 0; i < (p->gang_size - 1); ++i) {
|
||||
dma_resv_add_fence(e->tv.bo->base.resv,
|
||||
&p->jobs[i]->base.s_fence->finished,
|
||||
DMA_RESV_USAGE_READ);
|
||||
}
|
||||
|
||||
/* The gang leader is remembered as writer */
|
||||
e->tv.num_shared = 0;
|
||||
}
|
||||
|
||||
seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1],
|
||||
p->fence);
|
||||
amdgpu_cs_post_dependencies(p);
|
||||
|
||||
if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
|
||||
if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
|
||||
!p->ctx->preamble_presented) {
|
||||
job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
|
||||
leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
|
||||
p->ctx->preamble_presented = true;
|
||||
}
|
||||
|
||||
cs->out.handle = seq;
|
||||
job->uf_sequence = seq;
|
||||
leader->uf_sequence = seq;
|
||||
|
||||
amdgpu_job_free_resources(job);
|
||||
|
||||
trace_amdgpu_cs_ioctl(job);
|
||||
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
for (i = 0; i < p->gang_size; ++i) {
|
||||
amdgpu_job_free_resources(p->jobs[i]);
|
||||
trace_amdgpu_cs_ioctl(p->jobs[i]);
|
||||
drm_sched_entity_push_job(&p->jobs[i]->base);
|
||||
p->jobs[i] = NULL;
|
||||
}
|
||||
|
||||
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
|
||||
|
||||
/* Make sure all BOs are remembered as writers */
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list)
|
||||
e->tv.num_shared = 0;
|
||||
|
||||
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
||||
|
||||
mutex_unlock(&p->adev->notifier_lock);
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
error_abort:
|
||||
drm_sched_job_cleanup(&job->base);
|
||||
error_unlock:
|
||||
mutex_unlock(&p->adev->notifier_lock);
|
||||
|
||||
error_unlock:
|
||||
amdgpu_job_free(job);
|
||||
error_cleanup:
|
||||
for (i = 0; i < p->gang_size; ++i)
|
||||
drm_sched_job_cleanup(&p->jobs[i]->base);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Cleanup the parser structure */
|
||||
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
||||
bool backoff)
|
||||
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (error && backoff) {
|
||||
ttm_eu_backoff_reservation(&parser->ticket,
|
||||
&parser->validated);
|
||||
mutex_unlock(&parser->bo_list->bo_list_mutex);
|
||||
}
|
||||
|
||||
for (i = 0; i < parser->num_post_deps; i++) {
|
||||
drm_syncobj_put(parser->post_deps[i].syncobj);
|
||||
kfree(parser->post_deps[i].chain);
|
||||
@ -1280,8 +1343,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
||||
for (i = 0; i < parser->nchunks; i++)
|
||||
kvfree(parser->chunks[i].kdata);
|
||||
kvfree(parser->chunks);
|
||||
if (parser->job)
|
||||
amdgpu_job_free(parser->job);
|
||||
for (i = 0; i < parser->gang_size; ++i) {
|
||||
if (parser->jobs[i])
|
||||
amdgpu_job_free(parser->jobs[i]);
|
||||
}
|
||||
if (parser->uf_entry.tv.bo) {
|
||||
struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
|
||||
|
||||
@ -1293,7 +1358,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct amdgpu_cs_parser parser;
|
||||
bool reserved_buffers = false;
|
||||
int r;
|
||||
|
||||
if (amdgpu_ras_intr_triggered())
|
||||
@ -1306,22 +1370,16 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
if (r) {
|
||||
if (printk_ratelimit())
|
||||
DRM_ERROR("Failed to initialize parser %d!\n", r);
|
||||
goto out;
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_cs_pass1(&parser, data);
|
||||
if (r)
|
||||
goto out;
|
||||
goto error_fini;
|
||||
|
||||
r = amdgpu_cs_ib_fill(adev, &parser);
|
||||
r = amdgpu_cs_pass2(&parser);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_cs_dependencies(adev, &parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed in the dependencies handling %d!\n", r);
|
||||
goto out;
|
||||
}
|
||||
goto error_fini;
|
||||
|
||||
r = amdgpu_cs_parser_bos(&parser, data);
|
||||
if (r) {
|
||||
@ -1329,25 +1387,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
DRM_ERROR("Not enough memory for command submission!\n");
|
||||
else if (r != -ERESTARTSYS && r != -EAGAIN)
|
||||
DRM_ERROR("Failed to process the buffer list %d!\n", r);
|
||||
goto out;
|
||||
goto error_fini;
|
||||
}
|
||||
|
||||
reserved_buffers = true;
|
||||
|
||||
trace_amdgpu_cs_ibs(&parser);
|
||||
r = amdgpu_cs_patch_jobs(&parser);
|
||||
if (r)
|
||||
goto error_backoff;
|
||||
|
||||
r = amdgpu_cs_vm_handling(&parser);
|
||||
if (r)
|
||||
goto out;
|
||||
goto error_backoff;
|
||||
|
||||
r = amdgpu_cs_sync_rings(&parser);
|
||||
if (r)
|
||||
goto out;
|
||||
goto error_backoff;
|
||||
|
||||
trace_amdgpu_cs_ibs(&parser);
|
||||
|
||||
r = amdgpu_cs_submit(&parser, data);
|
||||
out:
|
||||
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
|
||||
if (r)
|
||||
goto error_backoff;
|
||||
|
||||
amdgpu_cs_parser_fini(&parser);
|
||||
return 0;
|
||||
|
||||
error_backoff:
|
||||
ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
|
||||
mutex_unlock(&parser.bo_list->bo_list_mutex);
|
||||
|
||||
error_fini:
|
||||
amdgpu_cs_parser_fini(&parser);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include "amdgpu_bo_list.h"
|
||||
#include "amdgpu_ring.h"
|
||||
|
||||
#define AMDGPU_CS_GANG_SIZE 4
|
||||
|
||||
struct amdgpu_bo_va_mapping;
|
||||
|
||||
struct amdgpu_cs_chunk {
|
||||
@ -50,9 +52,11 @@ struct amdgpu_cs_parser {
|
||||
unsigned nchunks;
|
||||
struct amdgpu_cs_chunk *chunks;
|
||||
|
||||
/* scheduler job object */
|
||||
struct amdgpu_job *job;
|
||||
struct drm_sched_entity *entity;
|
||||
/* scheduler job objects */
|
||||
unsigned int gang_size;
|
||||
struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
|
||||
struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE];
|
||||
struct amdgpu_job *gang_leader;
|
||||
|
||||
/* buffer objects */
|
||||
struct ww_acquire_ctx ticket;
|
||||
|
@ -3511,6 +3511,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
adev->gmc.gart_size = 512 * 1024 * 1024;
|
||||
adev->accel_working = false;
|
||||
adev->num_rings = 0;
|
||||
RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
|
||||
adev->mman.buffer_funcs = NULL;
|
||||
adev->mman.buffer_funcs_ring = NULL;
|
||||
adev->vm_manager.vm_pte_funcs = NULL;
|
||||
@ -3992,6 +3993,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
|
||||
release_firmware(adev->firmware.gpu_info_fw);
|
||||
adev->firmware.gpu_info_fw = NULL;
|
||||
adev->accel_working = false;
|
||||
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
|
||||
|
||||
amdgpu_reset_fini(adev);
|
||||
|
||||
@ -4749,6 +4751,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
bool need_full_reset, skip_hw_reset, vram_lost = false;
|
||||
int r = 0;
|
||||
bool gpu_reset_for_dev_remove = 0;
|
||||
|
||||
/* Try reset handler method first */
|
||||
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
|
||||
@ -4768,6 +4771,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
||||
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
||||
skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
|
||||
|
||||
gpu_reset_for_dev_remove =
|
||||
test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
|
||||
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
||||
|
||||
/*
|
||||
* ASIC reset has to be done on all XGMI hive nodes ASAP
|
||||
* to allow proper links negotiation in FW (within 1 sec)
|
||||
@ -4812,6 +4819,18 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
||||
amdgpu_ras_intr_cleared();
|
||||
}
|
||||
|
||||
/* Since the mode1 reset affects base ip blocks, the
|
||||
* phase1 ip blocks need to be resumed. Otherwise there
|
||||
* will be a BIOS signature error and the psp bootloader
|
||||
* can't load kdb on the next amdgpu install.
|
||||
*/
|
||||
if (gpu_reset_for_dev_remove) {
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list)
|
||||
amdgpu_device_ip_resume_phase1(tmp_adev);
|
||||
|
||||
goto end;
|
||||
}
|
||||
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||
if (need_full_reset) {
|
||||
/* post card */
|
||||
@ -5134,6 +5153,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
bool need_emergency_restart = false;
|
||||
bool audio_suspended = false;
|
||||
int tmp_vram_lost_counter;
|
||||
bool gpu_reset_for_dev_remove = false;
|
||||
|
||||
gpu_reset_for_dev_remove =
|
||||
test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
|
||||
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
|
||||
|
||||
/*
|
||||
* Special case: RAS triggered and full reset isn't supported
|
||||
@ -5169,8 +5193,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
*/
|
||||
INIT_LIST_HEAD(&device_list);
|
||||
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
list_add_tail(&tmp_adev->reset_list, &device_list);
|
||||
if (gpu_reset_for_dev_remove && adev->shutdown)
|
||||
tmp_adev->shutdown = true;
|
||||
}
|
||||
if (!list_is_first(&adev->reset_list, &device_list))
|
||||
list_rotate_to_front(&adev->reset_list, &device_list);
|
||||
device_list_handle = &device_list;
|
||||
@ -5253,6 +5280,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
|
||||
retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||
if (gpu_reset_for_dev_remove) {
|
||||
/* Workaroud for ASICs need to disable SMC first */
|
||||
amdgpu_device_smu_fini_early(tmp_adev);
|
||||
}
|
||||
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
|
||||
/*TODO Should we stop ?*/
|
||||
if (r) {
|
||||
@ -5286,6 +5317,9 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
||||
adev->asic_reset_res = 0;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (!r && gpu_reset_for_dev_remove)
|
||||
goto recover_end;
|
||||
}
|
||||
|
||||
skip_hw_reset:
|
||||
@ -5359,6 +5393,7 @@ skip_sched_resume:
|
||||
amdgpu_device_unset_mp1_state(tmp_adev);
|
||||
}
|
||||
|
||||
recover_end:
|
||||
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
|
||||
reset_list);
|
||||
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
|
||||
@ -5927,3 +5962,36 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
|
||||
(void)RREG32(data);
|
||||
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_switch_gang - switch to a new gang
|
||||
* @adev: amdgpu_device pointer
|
||||
* @gang: the gang to switch to
|
||||
*
|
||||
* Try to switch to a new gang.
|
||||
* Returns: NULL if we switched to the new gang or a reference to the current
|
||||
* gang leader.
|
||||
*/
|
||||
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
|
||||
struct dma_fence *gang)
|
||||
{
|
||||
struct dma_fence *old = NULL;
|
||||
|
||||
do {
|
||||
dma_fence_put(old);
|
||||
rcu_read_lock();
|
||||
old = dma_fence_get_rcu_safe(&adev->gang_submit);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (old == gang)
|
||||
break;
|
||||
|
||||
if (!dma_fence_is_signaled(old))
|
||||
return old;
|
||||
|
||||
} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
|
||||
old, gang) != old);
|
||||
|
||||
dma_fence_put(old);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_damage_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
@ -497,6 +498,11 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
|
||||
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
|
||||
.destroy = drm_gem_fb_destroy,
|
||||
.create_handle = drm_gem_fb_create_handle,
|
||||
};
|
||||
|
||||
static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
|
||||
.destroy = drm_gem_fb_destroy,
|
||||
.create_handle = drm_gem_fb_create_handle,
|
||||
.dirty = drm_atomic_helper_dirtyfb,
|
||||
};
|
||||
|
||||
@ -1102,7 +1108,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
|
||||
if (drm_drv_uses_atomic_modeset(dev))
|
||||
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
|
||||
else
|
||||
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -102,9 +102,10 @@
|
||||
* - 3.46.0 - To enable hot plug amdgpu tests in libdrm
|
||||
* - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
|
||||
* - 3.48.0 - Add IP discovery version info to HW INFO
|
||||
* 3.49.0 - Add gang submit into CS IOCTL
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 48
|
||||
#define KMS_DRIVER_MINOR 49
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit;
|
||||
@ -2186,6 +2187,37 @@ amdgpu_pci_remove(struct pci_dev *pdev)
|
||||
pm_runtime_forbid(dev->dev);
|
||||
}
|
||||
|
||||
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
|
||||
bool need_to_reset_gpu = false;
|
||||
|
||||
if (adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
struct amdgpu_hive_info *hive;
|
||||
|
||||
hive = amdgpu_get_xgmi_hive(adev);
|
||||
if (hive->device_remove_count == 0)
|
||||
need_to_reset_gpu = true;
|
||||
hive->device_remove_count++;
|
||||
amdgpu_put_xgmi_hive(hive);
|
||||
} else {
|
||||
need_to_reset_gpu = true;
|
||||
}
|
||||
|
||||
/* Workaround for ASICs need to reset SMU.
|
||||
* Called only when the first device is removed.
|
||||
*/
|
||||
if (need_to_reset_gpu) {
|
||||
struct amdgpu_reset_context reset_context;
|
||||
|
||||
adev->shutdown = true;
|
||||
memset(&reset_context, 0, sizeof(reset_context));
|
||||
reset_context.method = AMD_RESET_METHOD_NONE;
|
||||
reset_context.reset_req_dev = adev;
|
||||
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
|
||||
set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags);
|
||||
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_driver_unload_kms(dev);
|
||||
|
||||
drm_dev_unplug(dev);
|
||||
|
@ -304,6 +304,10 @@ struct amdgpu_gfx {
|
||||
uint32_t rlc_srlg_feature_version;
|
||||
uint32_t rlc_srls_fw_version;
|
||||
uint32_t rlc_srls_feature_version;
|
||||
uint32_t rlcp_ucode_version;
|
||||
uint32_t rlcp_ucode_feature_version;
|
||||
uint32_t rlcv_ucode_version;
|
||||
uint32_t rlcv_ucode_feature_version;
|
||||
uint32_t mec_feature_version;
|
||||
uint32_t mec2_feature_version;
|
||||
bool mec_fw_write_wait;
|
||||
|
@ -105,7 +105,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
*/
|
||||
(*job)->base.sched = &adev->rings[0]->sched;
|
||||
(*job)->vm = vm;
|
||||
(*job)->num_ibs = num_ibs;
|
||||
|
||||
amdgpu_sync_create(&(*job)->sync);
|
||||
amdgpu_sync_create(&(*job)->sched_sync);
|
||||
@ -125,6 +124,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
(*job)->num_ibs = 1;
|
||||
r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
|
||||
if (r)
|
||||
kfree(*job);
|
||||
@ -173,11 +173,29 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
}
|
||||
|
||||
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
|
||||
struct amdgpu_job *leader)
|
||||
{
|
||||
struct dma_fence *fence = &leader->base.s_fence->scheduled;
|
||||
|
||||
WARN_ON(job->gang_submit);
|
||||
|
||||
/*
|
||||
* Don't add a reference when we are the gang leader to avoid circle
|
||||
* dependency.
|
||||
*/
|
||||
if (job != leader)
|
||||
dma_fence_get(fence);
|
||||
job->gang_submit = fence;
|
||||
}
|
||||
|
||||
void amdgpu_job_free(struct amdgpu_job *job)
|
||||
{
|
||||
amdgpu_job_free_resources(job);
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
if (job->gang_submit != &job->base.s_fence->scheduled)
|
||||
dma_fence_put(job->gang_submit);
|
||||
|
||||
if (!job->hw_fence.ops)
|
||||
kfree(job);
|
||||
@ -247,12 +265,16 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
fence = amdgpu_sync_get_fence(&job->sync);
|
||||
}
|
||||
|
||||
if (!fence && job->gang_submit)
|
||||
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct dma_fence *fence = NULL, *finished;
|
||||
struct amdgpu_job *job;
|
||||
int r = 0;
|
||||
@ -264,8 +286,10 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||
|
||||
trace_amdgpu_sched_run_job(job);
|
||||
|
||||
if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
|
||||
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
|
||||
/* Skip job if VRAM is lost and never resubmit gangs */
|
||||
if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) ||
|
||||
(job->job_run_counter && job->gang_submit))
|
||||
dma_fence_set_error(finished, -ECANCELED);
|
||||
|
||||
if (finished->error < 0) {
|
||||
DRM_INFO("Skip scheduling IBs!\n");
|
||||
|
@ -50,6 +50,7 @@ struct amdgpu_job {
|
||||
struct amdgpu_sync sync;
|
||||
struct amdgpu_sync sched_sync;
|
||||
struct dma_fence hw_fence;
|
||||
struct dma_fence *gang_submit;
|
||||
uint32_t preamble_status;
|
||||
uint32_t preemption_status;
|
||||
bool vm_needs_flush;
|
||||
@ -72,6 +73,11 @@ struct amdgpu_job {
|
||||
struct amdgpu_ib ibs[];
|
||||
};
|
||||
|
||||
static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
|
||||
{
|
||||
return to_amdgpu_ring(job->base.entity->rq->sched);
|
||||
}
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
struct amdgpu_job **job, struct amdgpu_vm *vm);
|
||||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
@ -79,6 +85,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
|
||||
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
|
||||
void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
|
||||
struct amdgpu_job *leader);
|
||||
void amdgpu_job_free(struct amdgpu_job *job);
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
void *owner, struct dma_fence **f);
|
||||
|
@ -247,6 +247,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
fw_info->ver = adev->gfx.rlc_srls_fw_version;
|
||||
fw_info->feature = adev->gfx.rlc_srls_feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_RLCP:
|
||||
fw_info->ver = adev->gfx.rlcp_ucode_version;
|
||||
fw_info->feature = adev->gfx.rlcp_ucode_feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_RLCV:
|
||||
fw_info->ver = adev->gfx.rlcv_ucode_version;
|
||||
fw_info->feature = adev->gfx.rlcv_ucode_feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GFX_MEC:
|
||||
if (query_fw->index == 0) {
|
||||
fw_info->ver = adev->gfx.mec_fw_version;
|
||||
@ -328,6 +336,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
fw_info->ver = adev->psp.cap_fw_version;
|
||||
fw_info->feature = adev->psp.cap_feature_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_MES_KIQ:
|
||||
fw_info->ver = adev->mes.ucode_fw_version[0];
|
||||
fw_info->feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_MES:
|
||||
fw_info->ver = adev->mes.ucode_fw_version[1];
|
||||
fw_info->feature = 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1469,6 +1485,22 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* RLCP */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCP;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "RLCP feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* RLCV */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "RLCV feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* MEC */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
|
||||
query_fw.index = 0;
|
||||
@ -1581,6 +1613,22 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
|
||||
fw_info.feature, fw_info.ver);
|
||||
}
|
||||
|
||||
/* MES_KIQ */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_MES_KIQ;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "MES_KIQ feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* MES */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_MES;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
|
||||
|
||||
return 0;
|
||||
|
@ -511,6 +511,11 @@ static int psp_sw_fini(void *handle)
|
||||
kfree(cmd);
|
||||
cmd = NULL;
|
||||
|
||||
if (psp->km_ring.ring_mem)
|
||||
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
|
||||
&psp->km_ring.ring_mem_mc_addr,
|
||||
(void **)&psp->km_ring.ring_mem);
|
||||
|
||||
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
|
||||
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
|
||||
|
@ -31,6 +31,7 @@ enum AMDGPU_RESET_FLAGS {
|
||||
AMDGPU_NEED_FULL_RESET = 0,
|
||||
AMDGPU_SKIP_HW_RESET = 1,
|
||||
AMDGPU_SKIP_MODE2_RESET = 2,
|
||||
AMDGPU_RESET_FOR_DEVICE_REMOVE = 3,
|
||||
};
|
||||
|
||||
struct amdgpu_reset_context {
|
||||
|
@ -140,8 +140,10 @@ TRACE_EVENT(amdgpu_bo_create,
|
||||
);
|
||||
|
||||
TRACE_EVENT(amdgpu_cs,
|
||||
TP_PROTO(struct amdgpu_cs_parser *p, int i),
|
||||
TP_ARGS(p, i),
|
||||
TP_PROTO(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib),
|
||||
TP_ARGS(p, job, ib),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct amdgpu_bo_list *, bo_list)
|
||||
__field(u32, ring)
|
||||
@ -151,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->bo_list = p->bo_list;
|
||||
__entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
|
||||
__entry->dw = p->job->ibs[i].length_dw;
|
||||
__entry->ring = to_amdgpu_ring(job->base.sched)->idx;
|
||||
__entry->dw = ib->length_dw;
|
||||
__entry->fences = amdgpu_fence_count_emitted(
|
||||
to_amdgpu_ring(p->entity->rq->sched));
|
||||
to_amdgpu_ring(job->base.sched));
|
||||
),
|
||||
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
|
||||
__entry->bo_list, __entry->ring, __entry->dw,
|
||||
|
@ -164,70 +164,138 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
|
||||
} else if (version_major == 2) {
|
||||
const struct rlc_firmware_header_v2_0 *rlc_hdr =
|
||||
container_of(hdr, struct rlc_firmware_header_v2_0, header);
|
||||
const struct rlc_firmware_header_v2_1 *rlc_hdr_v2_1 =
|
||||
container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
|
||||
const struct rlc_firmware_header_v2_2 *rlc_hdr_v2_2 =
|
||||
container_of(rlc_hdr_v2_1, struct rlc_firmware_header_v2_2, v2_1);
|
||||
const struct rlc_firmware_header_v2_3 *rlc_hdr_v2_3 =
|
||||
container_of(rlc_hdr_v2_2, struct rlc_firmware_header_v2_3, v2_2);
|
||||
const struct rlc_firmware_header_v2_4 *rlc_hdr_v2_4 =
|
||||
container_of(rlc_hdr_v2_3, struct rlc_firmware_header_v2_4, v2_3);
|
||||
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(rlc_hdr->ucode_feature_version));
|
||||
DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
|
||||
DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
|
||||
DRM_DEBUG("save_and_restore_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->save_and_restore_offset));
|
||||
DRM_DEBUG("clear_state_descriptor_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
|
||||
DRM_DEBUG("avail_scratch_ram_locations: %u\n",
|
||||
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
|
||||
DRM_DEBUG("reg_restore_list_size: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_restore_list_size));
|
||||
DRM_DEBUG("reg_list_format_start: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_start));
|
||||
DRM_DEBUG("reg_list_format_separate_start: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
|
||||
DRM_DEBUG("starting_offsets_start: %u\n",
|
||||
le32_to_cpu(rlc_hdr->starting_offsets_start));
|
||||
DRM_DEBUG("reg_list_format_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
|
||||
DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_size_bytes));
|
||||
DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
|
||||
DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
|
||||
DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
|
||||
if (version_minor == 1) {
|
||||
const struct rlc_firmware_header_v2_1 *v2_1 =
|
||||
container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
|
||||
switch (version_minor) {
|
||||
case 0:
|
||||
/* rlc_hdr v2_0 */
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(rlc_hdr->ucode_feature_version));
|
||||
DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
|
||||
DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
|
||||
DRM_DEBUG("save_and_restore_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->save_and_restore_offset));
|
||||
DRM_DEBUG("clear_state_descriptor_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
|
||||
DRM_DEBUG("avail_scratch_ram_locations: %u\n",
|
||||
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
|
||||
DRM_DEBUG("reg_restore_list_size: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_restore_list_size));
|
||||
DRM_DEBUG("reg_list_format_start: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_start));
|
||||
DRM_DEBUG("reg_list_format_separate_start: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
|
||||
DRM_DEBUG("starting_offsets_start: %u\n",
|
||||
le32_to_cpu(rlc_hdr->starting_offsets_start));
|
||||
DRM_DEBUG("reg_list_format_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
|
||||
DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_size_bytes));
|
||||
DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
|
||||
DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
|
||||
DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
|
||||
DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
|
||||
break;
|
||||
case 1:
|
||||
/* rlc_hdr v2_1 */
|
||||
DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
|
||||
le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
|
||||
le32_to_cpu(rlc_hdr_v2_1->reg_list_format_direct_reg_list_length));
|
||||
DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_ucode_ver));
|
||||
DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_feature_ver));
|
||||
DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_size_bytes));
|
||||
DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_offset_bytes));
|
||||
DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_ucode_ver));
|
||||
DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_feature_ver));
|
||||
DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_size_bytes));
|
||||
DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_offset_bytes));
|
||||
DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_ucode_ver));
|
||||
DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_feature_ver));
|
||||
DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_size_bytes));
|
||||
DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
|
||||
le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
|
||||
le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_offset_bytes));
|
||||
break;
|
||||
case 2:
|
||||
/* rlc_hdr v2_2 */
|
||||
DRM_DEBUG("rlc_iram_ucode_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_size_bytes));
|
||||
DRM_DEBUG("rlc_iram_ucode_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_offset_bytes));
|
||||
DRM_DEBUG("rlc_dram_ucode_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_size_bytes));
|
||||
DRM_DEBUG("rlc_dram_ucode_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_offset_bytes));
|
||||
break;
|
||||
case 3:
|
||||
/* rlc_hdr v2_3 */
|
||||
DRM_DEBUG("rlcp_ucode_version: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_version));
|
||||
DRM_DEBUG("rlcp_ucode_feature_version: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_feature_version));
|
||||
DRM_DEBUG("rlcp_ucode_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_size_bytes));
|
||||
DRM_DEBUG("rlcp_ucode_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_offset_bytes));
|
||||
DRM_DEBUG("rlcv_ucode_version: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_version));
|
||||
DRM_DEBUG("rlcv_ucode_feature_version: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_feature_version));
|
||||
DRM_DEBUG("rlcv_ucode_size_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_size_bytes));
|
||||
DRM_DEBUG("rlcv_ucode_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_offset_bytes));
|
||||
break;
|
||||
case 4:
|
||||
/* rlc_hdr v2_4 */
|
||||
DRM_DEBUG("global_tap_delays_ucode_size_bytes :%u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_size_bytes));
|
||||
DRM_DEBUG("global_tap_delays_ucode_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_offset_bytes));
|
||||
DRM_DEBUG("se0_tap_delays_ucode_size_bytes :%u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_size_bytes));
|
||||
DRM_DEBUG("se0_tap_delays_ucode_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_offset_bytes));
|
||||
DRM_DEBUG("se1_tap_delays_ucode_size_bytes :%u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_size_bytes));
|
||||
DRM_DEBUG("se1_tap_delays_ucode_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_offset_bytes));
|
||||
DRM_DEBUG("se2_tap_delays_ucode_size_bytes :%u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_size_bytes));
|
||||
DRM_DEBUG("se2_tap_delays_ucode_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_offset_bytes));
|
||||
DRM_DEBUG("se3_tap_delays_ucode_size_bytes :%u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_size_bytes));
|
||||
DRM_DEBUG("se3_tap_delays_ucode_offset_bytes: %u\n",
|
||||
le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_offset_bytes));
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown RLC v2 ucode: v2.%u\n", version_minor);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
|
||||
|
@ -261,8 +261,12 @@ struct rlc_firmware_header_v2_2 {
|
||||
/* version_major=2, version_minor=3 */
|
||||
struct rlc_firmware_header_v2_3 {
|
||||
struct rlc_firmware_header_v2_2 v2_2;
|
||||
uint32_t rlcp_ucode_version;
|
||||
uint32_t rlcp_ucode_feature_version;
|
||||
uint32_t rlcp_ucode_size_bytes;
|
||||
uint32_t rlcp_ucode_offset_bytes;
|
||||
uint32_t rlcv_ucode_version;
|
||||
uint32_t rlcv_ucode_feature_version;
|
||||
uint32_t rlcv_ucode_size_bytes;
|
||||
uint32_t rlcv_ucode_offset_bytes;
|
||||
};
|
||||
|
@ -183,10 +183,12 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
|
||||
struct amdgpu_bo *bo = vm_bo->bo;
|
||||
|
||||
vm_bo->moved = true;
|
||||
spin_lock(&vm_bo->vm->status_lock);
|
||||
if (bo->tbo.type == ttm_bo_type_kernel)
|
||||
list_move(&vm_bo->vm_status, &vm->evicted);
|
||||
else
|
||||
list_move_tail(&vm_bo->vm_status, &vm->evicted);
|
||||
spin_unlock(&vm_bo->vm->status_lock);
|
||||
}
|
||||
/**
|
||||
* amdgpu_vm_bo_moved - vm_bo is moved
|
||||
@ -198,7 +200,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
|
||||
*/
|
||||
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
|
||||
{
|
||||
spin_lock(&vm_bo->vm->status_lock);
|
||||
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
|
||||
spin_unlock(&vm_bo->vm->status_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -211,7 +215,9 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
|
||||
*/
|
||||
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
|
||||
{
|
||||
spin_lock(&vm_bo->vm->status_lock);
|
||||
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
|
||||
spin_unlock(&vm_bo->vm->status_lock);
|
||||
vm_bo->moved = false;
|
||||
}
|
||||
|
||||
@ -225,9 +231,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
|
||||
*/
|
||||
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
|
||||
{
|
||||
spin_lock(&vm_bo->vm->invalidated_lock);
|
||||
spin_lock(&vm_bo->vm->status_lock);
|
||||
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
|
||||
spin_unlock(&vm_bo->vm->invalidated_lock);
|
||||
spin_unlock(&vm_bo->vm->status_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -240,10 +246,13 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
|
||||
*/
|
||||
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
|
||||
{
|
||||
if (vm_bo->bo->parent)
|
||||
if (vm_bo->bo->parent) {
|
||||
spin_lock(&vm_bo->vm->status_lock);
|
||||
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
|
||||
else
|
||||
spin_unlock(&vm_bo->vm->status_lock);
|
||||
} else {
|
||||
amdgpu_vm_bo_idle(vm_bo);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -256,9 +265,9 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
|
||||
*/
|
||||
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
|
||||
{
|
||||
spin_lock(&vm_bo->vm->invalidated_lock);
|
||||
spin_lock(&vm_bo->vm->status_lock);
|
||||
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
|
||||
spin_unlock(&vm_bo->vm->invalidated_lock);
|
||||
spin_unlock(&vm_bo->vm->status_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -363,12 +372,20 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
int (*validate)(void *p, struct amdgpu_bo *bo),
|
||||
void *param)
|
||||
{
|
||||
struct amdgpu_vm_bo_base *bo_base, *tmp;
|
||||
struct amdgpu_vm_bo_base *bo_base;
|
||||
struct amdgpu_bo *shadow;
|
||||
struct amdgpu_bo *bo;
|
||||
int r;
|
||||
|
||||
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
|
||||
struct amdgpu_bo *bo = bo_base->bo;
|
||||
struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
|
||||
spin_lock(&vm->status_lock);
|
||||
while (!list_empty(&vm->evicted)) {
|
||||
bo_base = list_first_entry(&vm->evicted,
|
||||
struct amdgpu_vm_bo_base,
|
||||
vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
bo = bo_base->bo;
|
||||
shadow = amdgpu_bo_shadowed(bo);
|
||||
|
||||
r = validate(param, bo);
|
||||
if (r)
|
||||
@ -385,7 +402,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
|
||||
amdgpu_vm_bo_relocated(bo_base);
|
||||
}
|
||||
spin_lock(&vm->status_lock);
|
||||
}
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
amdgpu_vm_eviction_lock(vm);
|
||||
vm->evicting = false;
|
||||
@ -406,13 +425,18 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
*/
|
||||
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
||||
{
|
||||
bool empty;
|
||||
bool ret;
|
||||
|
||||
amdgpu_vm_eviction_lock(vm);
|
||||
ret = !vm->evicting;
|
||||
amdgpu_vm_eviction_unlock(vm);
|
||||
|
||||
return ret && list_empty(&vm->evicted);
|
||||
spin_lock(&vm->status_lock);
|
||||
empty = list_empty(&vm->evicted);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
return ret && empty;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -680,9 +704,14 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm_update_params params;
|
||||
struct amdgpu_vm_bo_base *entry;
|
||||
bool flush_tlb_needed = false;
|
||||
LIST_HEAD(relocated);
|
||||
int r, idx;
|
||||
|
||||
if (list_empty(&vm->relocated))
|
||||
spin_lock(&vm->status_lock);
|
||||
list_splice_init(&vm->relocated, &relocated);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
if (list_empty(&relocated))
|
||||
return 0;
|
||||
|
||||
if (!drm_dev_enter(adev_to_drm(adev), &idx))
|
||||
@ -697,7 +726,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
list_for_each_entry(entry, &vm->relocated, vm_status) {
|
||||
list_for_each_entry(entry, &relocated, vm_status) {
|
||||
/* vm_flush_needed after updating moved PDEs */
|
||||
flush_tlb_needed |= entry->moved;
|
||||
|
||||
@ -713,9 +742,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
|
||||
if (flush_tlb_needed)
|
||||
atomic64_inc(&vm->tlb_seq);
|
||||
|
||||
while (!list_empty(&vm->relocated)) {
|
||||
entry = list_first_entry(&vm->relocated,
|
||||
struct amdgpu_vm_bo_base,
|
||||
while (!list_empty(&relocated)) {
|
||||
entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
|
||||
vm_status);
|
||||
amdgpu_vm_bo_idle(entry);
|
||||
}
|
||||
@ -912,6 +940,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va, *tmp;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
|
||||
if (!bo_va->base.bo)
|
||||
continue;
|
||||
@ -936,7 +965,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
|
||||
amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
|
||||
gtt_mem, cpu_mem);
|
||||
}
|
||||
spin_lock(&vm->invalidated_lock);
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
|
||||
if (!bo_va->base.bo)
|
||||
continue;
|
||||
@ -949,7 +977,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
|
||||
amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
|
||||
gtt_mem, cpu_mem);
|
||||
}
|
||||
spin_unlock(&vm->invalidated_lock);
|
||||
spin_unlock(&vm->status_lock);
|
||||
}
|
||||
/**
|
||||
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
|
||||
@ -1278,24 +1306,29 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va, *tmp;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct dma_resv *resv;
|
||||
bool clear;
|
||||
int r;
|
||||
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
|
||||
spin_lock(&vm->status_lock);
|
||||
while (!list_empty(&vm->moved)) {
|
||||
bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
|
||||
base.vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
/* Per VM BOs never need to bo cleared in the page tables */
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
if (r)
|
||||
return r;
|
||||
spin_lock(&vm->status_lock);
|
||||
}
|
||||
|
||||
spin_lock(&vm->invalidated_lock);
|
||||
while (!list_empty(&vm->invalidated)) {
|
||||
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
|
||||
base.vm_status);
|
||||
resv = bo_va->base.bo->tbo.base.resv;
|
||||
spin_unlock(&vm->invalidated_lock);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
/* Try to reserve the BO to avoid clearing its ptes */
|
||||
if (!amdgpu_vm_debug && dma_resv_trylock(resv))
|
||||
@ -1310,9 +1343,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
|
||||
if (!clear)
|
||||
dma_resv_unlock(resv);
|
||||
spin_lock(&vm->invalidated_lock);
|
||||
spin_lock(&vm->status_lock);
|
||||
}
|
||||
spin_unlock(&vm->invalidated_lock);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1387,7 +1420,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
|
||||
|
||||
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
|
||||
!bo_va->base.moved) {
|
||||
list_move(&bo_va->base.vm_status, &vm->moved);
|
||||
amdgpu_vm_bo_moved(&bo_va->base);
|
||||
}
|
||||
trace_amdgpu_vm_bo_map(bo_va, mapping);
|
||||
}
|
||||
@ -1763,9 +1796,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&vm->invalidated_lock);
|
||||
spin_lock(&vm->status_lock);
|
||||
list_del(&bo_va->base.vm_status);
|
||||
spin_unlock(&vm->invalidated_lock);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
|
||||
list_del(&mapping->list);
|
||||
@ -2019,9 +2052,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
INIT_LIST_HEAD(&vm->moved);
|
||||
INIT_LIST_HEAD(&vm->idle);
|
||||
INIT_LIST_HEAD(&vm->invalidated);
|
||||
spin_lock_init(&vm->invalidated_lock);
|
||||
spin_lock_init(&vm->status_lock);
|
||||
INIT_LIST_HEAD(&vm->freed);
|
||||
INIT_LIST_HEAD(&vm->done);
|
||||
INIT_LIST_HEAD(&vm->pt_freed);
|
||||
INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
|
||||
|
||||
/* create scheduler entities for page table updates */
|
||||
r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
|
||||
@ -2223,6 +2258,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
|
||||
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
|
||||
|
||||
flush_work(&vm->pt_free_work);
|
||||
|
||||
root = amdgpu_bo_ref(vm->root.bo);
|
||||
amdgpu_bo_reserve(root, true);
|
||||
amdgpu_vm_set_pasid(adev, vm, 0);
|
||||
@ -2484,8 +2521,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
|
||||
/* Intentionally setting invalid PTE flag
|
||||
* combination to force a no-retry-fault
|
||||
*/
|
||||
flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
|
||||
AMDGPU_PTE_TF;
|
||||
flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
|
||||
value = 0;
|
||||
} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
|
||||
/* Redirect the access to the dummy page */
|
||||
@ -2548,6 +2584,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
|
||||
unsigned int total_done_objs = 0;
|
||||
unsigned int id = 0;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
seq_puts(m, "\tIdle BOs:\n");
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
|
||||
if (!bo_va->base.bo)
|
||||
@ -2585,7 +2622,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
|
||||
id = 0;
|
||||
|
||||
seq_puts(m, "\tInvalidated BOs:\n");
|
||||
spin_lock(&vm->invalidated_lock);
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
|
||||
if (!bo_va->base.bo)
|
||||
continue;
|
||||
@ -2600,7 +2636,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
|
||||
continue;
|
||||
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
|
||||
}
|
||||
spin_unlock(&vm->invalidated_lock);
|
||||
spin_unlock(&vm->status_lock);
|
||||
total_done_objs = id;
|
||||
|
||||
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
|
||||
|
@ -254,6 +254,9 @@ struct amdgpu_vm {
|
||||
bool evicting;
|
||||
unsigned int saved_flags;
|
||||
|
||||
/* Lock to protect vm_bo add/del/move on all lists of vm */
|
||||
spinlock_t status_lock;
|
||||
|
||||
/* BOs who needs a validation */
|
||||
struct list_head evicted;
|
||||
|
||||
@ -268,7 +271,6 @@ struct amdgpu_vm {
|
||||
|
||||
/* regular invalidated BOs, but not yet updated in the PT */
|
||||
struct list_head invalidated;
|
||||
spinlock_t invalidated_lock;
|
||||
|
||||
/* BO mappings freed, but not yet updated in the PT */
|
||||
struct list_head freed;
|
||||
@ -276,6 +278,10 @@ struct amdgpu_vm {
|
||||
/* BOs which are invalidated, has been updated in the PTs */
|
||||
struct list_head done;
|
||||
|
||||
/* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
|
||||
struct list_head pt_freed;
|
||||
struct work_struct pt_free_work;
|
||||
|
||||
/* contains the page directory */
|
||||
struct amdgpu_vm_bo_base root;
|
||||
struct dma_fence *last_update;
|
||||
@ -471,6 +477,7 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
|
||||
int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
|
||||
uint64_t start, uint64_t end,
|
||||
uint64_t dst, uint64_t flags);
|
||||
void amdgpu_vm_pt_free_work(struct work_struct *work);
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
|
||||
|
@ -637,10 +637,34 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
|
||||
}
|
||||
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
|
||||
entry->bo->vm_bo = NULL;
|
||||
|
||||
spin_lock(&entry->vm->status_lock);
|
||||
list_del(&entry->vm_status);
|
||||
spin_unlock(&entry->vm->status_lock);
|
||||
amdgpu_bo_unref(&entry->bo);
|
||||
}
|
||||
|
||||
void amdgpu_vm_pt_free_work(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_vm_bo_base *entry, *next;
|
||||
struct amdgpu_vm *vm;
|
||||
LIST_HEAD(pt_freed);
|
||||
|
||||
vm = container_of(work, struct amdgpu_vm, pt_free_work);
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
list_splice_init(&vm->pt_freed, &pt_freed);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
/* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
|
||||
amdgpu_bo_reserve(vm->root.bo, true);
|
||||
|
||||
list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
|
||||
amdgpu_vm_pt_free(entry);
|
||||
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_pt_free_dfs - free PD/PT levels
|
||||
*
|
||||
@ -652,11 +676,24 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
|
||||
*/
|
||||
static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_vm_pt_cursor *start)
|
||||
struct amdgpu_vm_pt_cursor *start,
|
||||
bool unlocked)
|
||||
{
|
||||
struct amdgpu_vm_pt_cursor cursor;
|
||||
struct amdgpu_vm_bo_base *entry;
|
||||
|
||||
if (unlocked) {
|
||||
spin_lock(&vm->status_lock);
|
||||
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
|
||||
list_move(&entry->vm_status, &vm->pt_freed);
|
||||
|
||||
if (start)
|
||||
list_move(&start->entry->vm_status, &vm->pt_freed);
|
||||
spin_unlock(&vm->status_lock);
|
||||
schedule_work(&vm->pt_free_work);
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
|
||||
amdgpu_vm_pt_free(entry);
|
||||
|
||||
@ -673,7 +710,7 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
|
||||
*/
|
||||
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
{
|
||||
amdgpu_vm_pt_free_dfs(adev, vm, NULL);
|
||||
amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -966,7 +1003,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
|
||||
if (cursor.entry->bo) {
|
||||
params->table_freed = true;
|
||||
amdgpu_vm_pt_free_dfs(adev, params->vm,
|
||||
&cursor);
|
||||
&cursor,
|
||||
params->unlocked);
|
||||
}
|
||||
amdgpu_vm_pt_next(adev, &cursor);
|
||||
}
|
||||
|
@ -212,12 +212,15 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
||||
int r;
|
||||
|
||||
/* Wait for PD/PT moves to be completed */
|
||||
dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_KERNEL, fence) {
|
||||
dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
r = amdgpu_sync_fence(&p->job->sync, fence);
|
||||
if (r)
|
||||
if (r) {
|
||||
dma_resv_iter_end(&cursor);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
dma_resv_iter_end(&cursor);
|
||||
|
||||
do {
|
||||
ndw = p->num_dw_left;
|
||||
|
@ -406,14 +406,14 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
|
||||
if (adev->reset_domain->type != XGMI_HIVE) {
|
||||
hive->reset_domain =
|
||||
amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
|
||||
if (!hive->reset_domain) {
|
||||
dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
|
||||
ret = -ENOMEM;
|
||||
kobject_put(&hive->kobj);
|
||||
kfree(hive);
|
||||
hive = NULL;
|
||||
goto pro_end;
|
||||
}
|
||||
if (!hive->reset_domain) {
|
||||
dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
|
||||
ret = -ENOMEM;
|
||||
kobject_put(&hive->kobj);
|
||||
kfree(hive);
|
||||
hive = NULL;
|
||||
goto pro_end;
|
||||
}
|
||||
} else {
|
||||
amdgpu_reset_get_reset_domain(adev->reset_domain);
|
||||
hive->reset_domain = adev->reset_domain;
|
||||
|
@ -43,6 +43,7 @@ struct amdgpu_hive_info {
|
||||
} pstate;
|
||||
|
||||
struct amdgpu_reset_domain *reset_domain;
|
||||
uint32_t device_remove_count;
|
||||
};
|
||||
|
||||
struct amdgpu_pcs_ras_field {
|
||||
|
@ -475,8 +475,13 @@ static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev)
|
||||
const struct rlc_firmware_header_v2_3 *rlc_hdr;
|
||||
|
||||
rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
|
||||
adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
|
||||
adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
|
||||
adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
|
||||
adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
|
||||
|
||||
adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
|
||||
adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
|
||||
adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
|
||||
adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
|
||||
}
|
||||
|
@ -1103,10 +1103,13 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
|
||||
*flags |= AMDGPU_PDE_BFS(0x9);
|
||||
|
||||
} else if (level == AMDGPU_VM_PDB0) {
|
||||
if (*flags & AMDGPU_PDE_PTE)
|
||||
if (*flags & AMDGPU_PDE_PTE) {
|
||||
*flags &= ~AMDGPU_PDE_PTE;
|
||||
else
|
||||
if (!(*flags & AMDGPU_PTE_VALID))
|
||||
*addr |= 1 << PAGE_SHIFT;
|
||||
} else {
|
||||
*flags |= AMDGPU_PTE_TF;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1761,21 +1761,23 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
|
||||
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
|
||||
};
|
||||
|
||||
static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
|
||||
static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
struct drm_gpu_scheduler **scheds;
|
||||
|
||||
/* The create msg must be in the first IB submitted */
|
||||
if (atomic_read(&p->entity->fence_seq))
|
||||
if (atomic_read(&job->base.entity->fence_seq))
|
||||
return -EINVAL;
|
||||
|
||||
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
|
||||
[AMDGPU_RING_PRIO_DEFAULT].sched;
|
||||
drm_sched_entity_modify_sched(p->entity, scheds, 1);
|
||||
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
|
||||
static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
|
||||
uint64_t addr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_bo_va_mapping *map;
|
||||
@ -1846,7 +1848,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
|
||||
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
|
||||
continue;
|
||||
|
||||
r = vcn_v3_0_limit_sched(p);
|
||||
r = vcn_v3_0_limit_sched(p, job);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
@ -1860,7 +1862,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
|
||||
struct amdgpu_ring *ring = amdgpu_job_ring(job);
|
||||
uint32_t msg_lo = 0, msg_hi = 0;
|
||||
unsigned i;
|
||||
int r;
|
||||
@ -1879,7 +1881,8 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
||||
msg_hi = val;
|
||||
} else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
|
||||
val == 0) {
|
||||
r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
|
||||
r = vcn_v3_0_dec_msg(p, job,
|
||||
((u64)msg_hi) << 32 | msg_lo);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -1591,21 +1591,23 @@ static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
}
|
||||
}
|
||||
|
||||
static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p)
|
||||
static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job)
|
||||
{
|
||||
struct drm_gpu_scheduler **scheds;
|
||||
|
||||
/* The create msg must be in the first IB submitted */
|
||||
if (atomic_read(&p->entity->fence_seq))
|
||||
if (atomic_read(&job->base.entity->fence_seq))
|
||||
return -EINVAL;
|
||||
|
||||
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
|
||||
[AMDGPU_RING_PRIO_0].sched;
|
||||
drm_sched_entity_modify_sched(p->entity, scheds, 1);
|
||||
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
|
||||
static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
|
||||
uint64_t addr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_bo_va_mapping *map;
|
||||
@ -1676,7 +1678,7 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
|
||||
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
|
||||
continue;
|
||||
|
||||
r = vcn_v4_0_limit_sched(p);
|
||||
r = vcn_v4_0_limit_sched(p, job);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
@ -1689,32 +1691,34 @@ out:
|
||||
#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
|
||||
|
||||
static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib)
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
|
||||
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
|
||||
struct amdgpu_ring *ring = amdgpu_job_ring(job);
|
||||
struct amdgpu_vcn_decode_buffer *decode_buffer;
|
||||
uint64_t addr;
|
||||
uint32_t val;
|
||||
int r = 0;
|
||||
|
||||
/* The first instance can decode anything */
|
||||
if (!ring->me)
|
||||
return r;
|
||||
return 0;
|
||||
|
||||
/* unified queue ib header has 8 double words. */
|
||||
if (ib->length_dw < 8)
|
||||
return r;
|
||||
return 0;
|
||||
|
||||
val = amdgpu_ib_get_value(ib, 6); //RADEON_VCN_ENGINE_TYPE
|
||||
if (val != RADEON_VCN_ENGINE_TYPE_DECODE)
|
||||
return 0;
|
||||
|
||||
if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
|
||||
decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
|
||||
decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
|
||||
|
||||
if (decode_buffer->valid_buf_flag & 0x1)
|
||||
r = vcn_v4_0_dec_msg(p, ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
|
||||
decode_buffer->msg_buffer_address_lo);
|
||||
}
|
||||
return r;
|
||||
if (!(decode_buffer->valid_buf_flag & 0x1))
|
||||
return 0;
|
||||
|
||||
addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
|
||||
decode_buffer->msg_buffer_address_lo;
|
||||
return vcn_v4_0_dec_msg(p, job, addr);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
|
||||
|
@ -898,7 +898,7 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
|
||||
pr_debug("addr 0x%lx of process mm is detroyed\n", addr);
|
||||
pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
|
@ -177,14 +177,6 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
|
||||
uint32_t pipe_id, uint32_t queue_id,
|
||||
struct queue_properties *p, struct mm_struct *mms)
|
||||
{
|
||||
return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
|
||||
queue_id, p->doorbell_off);
|
||||
}
|
||||
|
||||
static void update_mqd(struct mqd_manager *mm, void *mqd,
|
||||
struct queue_properties *q,
|
||||
struct mqd_update_info *minfo)
|
||||
@ -256,31 +248,6 @@ static uint32_t read_doorbell_id(void *mqd)
|
||||
return m->queue_doorbell_id0;
|
||||
}
|
||||
|
||||
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
|
||||
enum kfd_preempt_type type,
|
||||
unsigned int timeout, uint32_t pipe_id,
|
||||
uint32_t queue_id)
|
||||
{
|
||||
return mm->dev->kfd2kgd->hqd_destroy
|
||||
(mm->dev->adev, mqd, type, timeout,
|
||||
pipe_id, queue_id);
|
||||
}
|
||||
|
||||
static void free_mqd(struct mqd_manager *mm, void *mqd,
|
||||
struct kfd_mem_obj *mqd_mem_obj)
|
||||
{
|
||||
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
|
||||
}
|
||||
|
||||
static bool is_occupied(struct mqd_manager *mm, void *mqd,
|
||||
uint64_t queue_address, uint32_t pipe_id,
|
||||
uint32_t queue_id)
|
||||
{
|
||||
return mm->dev->kfd2kgd->hqd_is_occupied(
|
||||
mm->dev->adev, queue_address,
|
||||
pipe_id, queue_id);
|
||||
}
|
||||
|
||||
static int get_wave_state(struct mqd_manager *mm, void *mqd,
|
||||
void __user *ctl_stack,
|
||||
u32 *ctl_stack_used_size,
|
||||
@ -349,15 +316,6 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
|
||||
mm->update_mqd(mm, m, q, NULL);
|
||||
}
|
||||
|
||||
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
|
||||
uint32_t pipe_id, uint32_t queue_id,
|
||||
struct queue_properties *p, struct mm_struct *mms)
|
||||
{
|
||||
return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
|
||||
(uint32_t __user *)p->write_ptr,
|
||||
mms);
|
||||
}
|
||||
|
||||
#define SDMA_RLC_DUMMY_DEFAULT 0xf
|
||||
|
||||
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
|
||||
@ -389,25 +347,6 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
|
||||
q->is_active = QUEUE_IS_ACTIVE(*q);
|
||||
}
|
||||
|
||||
/*
|
||||
* * preempt type here is ignored because there is only one way
|
||||
* * to preempt sdma queue
|
||||
*/
|
||||
static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
|
||||
enum kfd_preempt_type type,
|
||||
unsigned int timeout, uint32_t pipe_id,
|
||||
uint32_t queue_id)
|
||||
{
|
||||
return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
|
||||
}
|
||||
|
||||
static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
|
||||
uint64_t queue_address, uint32_t pipe_id,
|
||||
uint32_t queue_id)
|
||||
{
|
||||
return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int debugfs_show_mqd(struct seq_file *m, void *data)
|
||||
@ -445,11 +384,11 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
|
||||
pr_debug("%s@%i\n", __func__, __LINE__);
|
||||
mqd->allocate_mqd = allocate_mqd;
|
||||
mqd->init_mqd = init_mqd;
|
||||
mqd->free_mqd = free_mqd;
|
||||
mqd->free_mqd = kfd_free_mqd_cp;
|
||||
mqd->load_mqd = load_mqd;
|
||||
mqd->update_mqd = update_mqd;
|
||||
mqd->destroy_mqd = destroy_mqd;
|
||||
mqd->is_occupied = is_occupied;
|
||||
mqd->destroy_mqd = kfd_destroy_mqd_cp;
|
||||
mqd->is_occupied = kfd_is_occupied_cp;
|
||||
mqd->mqd_size = sizeof(struct v11_compute_mqd);
|
||||
mqd->get_wave_state = get_wave_state;
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
@ -462,10 +401,10 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
|
||||
mqd->allocate_mqd = allocate_hiq_mqd;
|
||||
mqd->init_mqd = init_mqd_hiq;
|
||||
mqd->free_mqd = free_mqd_hiq_sdma;
|
||||
mqd->load_mqd = hiq_load_mqd_kiq;
|
||||
mqd->load_mqd = kfd_hiq_load_mqd_kiq;
|
||||
mqd->update_mqd = update_mqd;
|
||||
mqd->destroy_mqd = destroy_mqd;
|
||||
mqd->is_occupied = is_occupied;
|
||||
mqd->destroy_mqd = kfd_destroy_mqd_cp;
|
||||
mqd->is_occupied = kfd_is_occupied_cp;
|
||||
mqd->mqd_size = sizeof(struct v11_compute_mqd);
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
mqd->debugfs_show_mqd = debugfs_show_mqd;
|
||||
@ -476,11 +415,11 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
|
||||
case KFD_MQD_TYPE_DIQ:
|
||||
mqd->allocate_mqd = allocate_mqd;
|
||||
mqd->init_mqd = init_mqd_hiq;
|
||||
mqd->free_mqd = free_mqd;
|
||||
mqd->free_mqd = kfd_free_mqd_cp;
|
||||
mqd->load_mqd = load_mqd;
|
||||
mqd->update_mqd = update_mqd;
|
||||
mqd->destroy_mqd = destroy_mqd;
|
||||
mqd->is_occupied = is_occupied;
|
||||
mqd->destroy_mqd = kfd_destroy_mqd_cp;
|
||||
mqd->is_occupied = kfd_is_occupied_cp;
|
||||
mqd->mqd_size = sizeof(struct v11_compute_mqd);
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
mqd->debugfs_show_mqd = debugfs_show_mqd;
|
||||
@ -491,10 +430,10 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
|
||||
mqd->allocate_mqd = allocate_sdma_mqd;
|
||||
mqd->init_mqd = init_mqd_sdma;
|
||||
mqd->free_mqd = free_mqd_hiq_sdma;
|
||||
mqd->load_mqd = load_mqd_sdma;
|
||||
mqd->load_mqd = kfd_load_mqd_sdma;
|
||||
mqd->update_mqd = update_mqd_sdma;
|
||||
mqd->destroy_mqd = destroy_mqd_sdma;
|
||||
mqd->is_occupied = is_occupied_sdma;
|
||||
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
|
||||
mqd->is_occupied = kfd_is_occupied_sdma;
|
||||
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
|
||||
|
@ -4748,7 +4748,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
|
||||
plane_info->visible = true;
|
||||
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
|
||||
|
||||
plane_info->layer_index = 0;
|
||||
plane_info->layer_index = plane_state->normalized_zpos;
|
||||
|
||||
ret = fill_plane_color_attributes(plane_state, plane_info->format,
|
||||
&plane_info->color_space);
|
||||
@ -4816,7 +4816,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
||||
dc_plane_state->global_alpha = plane_info.global_alpha;
|
||||
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
|
||||
dc_plane_state->dcc = plane_info.dcc;
|
||||
dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
|
||||
dc_plane_state->layer_index = plane_info.layer_index;
|
||||
dc_plane_state->flip_int_enabled = true;
|
||||
|
||||
/*
|
||||
@ -9469,6 +9469,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* DC consults the zpos (layer_index in DC terminology) to determine the
|
||||
* hw plane on which to enable the hw cursor (see
|
||||
* `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
|
||||
* atomic state, so call drm helper to normalize zpos.
|
||||
*/
|
||||
drm_atomic_normalize_zpos(dev, state);
|
||||
|
||||
/* Remove exiting planes if they are modified */
|
||||
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
|
||||
ret = dm_update_plane_state(dc, state, plane,
|
||||
|
@ -880,8 +880,17 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
|
||||
|
||||
void dm_helpers_init_panel_settings(
|
||||
struct dc_context *ctx,
|
||||
struct dc_panel_config *panel_config)
|
||||
struct dc_panel_config *panel_config,
|
||||
struct dc_sink *sink)
|
||||
{
|
||||
// Extra Panel Power Sequence
|
||||
panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms;
|
||||
panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms;
|
||||
panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off;
|
||||
panel_config->pps.extra_post_t7_ms = 0;
|
||||
panel_config->pps.extra_pre_t11_ms = 0;
|
||||
panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms;
|
||||
panel_config->pps.extra_post_OUI_ms = 0;
|
||||
// Feature DSC
|
||||
panel_config->dsc.disable_dsc_edp = false;
|
||||
panel_config->dsc.force_dsc_edp_policy = 0;
|
||||
|
@ -850,7 +850,7 @@ static enum bp_result get_ss_info_v4_1(
|
||||
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_HDMI:
|
||||
ss_info->spread_spectrum_percentage =
|
||||
@ -860,7 +860,7 @@ static enum bp_result get_ss_info_v4_1(
|
||||
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
/* TODO LVDS not support anymore? */
|
||||
case AS_SIGNAL_TYPE_DISPLAY_PORT:
|
||||
@ -871,7 +871,7 @@ static enum bp_result get_ss_info_v4_1(
|
||||
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_GPU_PLL:
|
||||
/* atom_firmware: DAL only get data from dce_info table.
|
||||
@ -885,7 +885,7 @@ static enum bp_result get_ss_info_v4_1(
|
||||
DATA_TABLES(smu_info));
|
||||
if (!smu_info)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage);
|
||||
ss_info->spread_spectrum_percentage =
|
||||
smu_info->waflclk_ss_percentage;
|
||||
ss_info->spread_spectrum_range =
|
||||
@ -893,7 +893,7 @@ static enum bp_result get_ss_info_v4_1(
|
||||
if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_XGMI: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_XGMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
default:
|
||||
result = BP_RESULT_UNSUPPORTED;
|
||||
@ -930,6 +930,7 @@ static enum bp_result get_ss_info_v4_2(
|
||||
if (!smu_info)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage);
|
||||
ss_info->type.STEP_AND_DELAY_INFO = false;
|
||||
ss_info->spread_percentage_divider = 1000;
|
||||
/* BIOS no longer uses target clock. Always enable for now */
|
||||
@ -944,7 +945,7 @@ static enum bp_result get_ss_info_v4_2(
|
||||
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_HDMI:
|
||||
ss_info->spread_spectrum_percentage =
|
||||
@ -954,7 +955,7 @@ static enum bp_result get_ss_info_v4_2(
|
||||
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
/* TODO LVDS not support anymore? */
|
||||
case AS_SIGNAL_TYPE_DISPLAY_PORT:
|
||||
@ -965,7 +966,7 @@ static enum bp_result get_ss_info_v4_2(
|
||||
if (smu_info->gpuclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_GPU_PLL:
|
||||
/* atom_firmware: DAL only get data from dce_info table.
|
||||
@ -1015,7 +1016,7 @@ static enum bp_result get_ss_info_v4_5(
|
||||
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_HDMI:
|
||||
ss_info->spread_spectrum_percentage =
|
||||
@ -1025,7 +1026,7 @@ static enum bp_result get_ss_info_v4_5(
|
||||
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_DISPLAY_PORT:
|
||||
ss_info->spread_spectrum_percentage =
|
||||
@ -1035,7 +1036,7 @@ static enum bp_result get_ss_info_v4_5(
|
||||
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
|
||||
ss_info->type.CENTER_MODE = true;
|
||||
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT: %d\n", ss_info->spread_spectrum_percentage);
|
||||
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
|
||||
break;
|
||||
case AS_SIGNAL_TYPE_GPU_PLL:
|
||||
/* atom_smu_info_v4_0 does not have fields for SS for SMU Display PLL anymore.
|
||||
@ -1860,7 +1861,7 @@ static enum bp_result get_firmware_info_v3_2(
|
||||
/* Vega12 */
|
||||
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
|
||||
DATA_TABLES(smu_info));
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
|
||||
if (!smu_info_v3_2)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
@ -1869,7 +1870,7 @@ static enum bp_result get_firmware_info_v3_2(
|
||||
/* Vega20 */
|
||||
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
|
||||
DATA_TABLES(smu_info));
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
|
||||
if (!smu_info_v3_3)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
@ -2011,7 +2012,7 @@ static enum bp_result get_firmware_info_v3_4(
|
||||
|
||||
if (!smu_info_v3_5)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_5->gpuclk_ss_percentage);
|
||||
info->default_engine_clk = smu_info_v3_5->bootup_dcefclk_10khz * 10;
|
||||
break;
|
||||
|
||||
@ -2417,6 +2418,7 @@ static enum bp_result get_integrated_info_v11(
|
||||
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
|
||||
DATA_TABLES(integratedsysteminfo));
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
|
||||
if (info_v11 == NULL)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
@ -2631,6 +2633,7 @@ static enum bp_result get_integrated_info_v2_1(
|
||||
|
||||
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
|
||||
DATA_TABLES(integratedsysteminfo));
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
|
||||
|
||||
if (info_v2_1 == NULL)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
@ -2792,6 +2795,8 @@ static enum bp_result get_integrated_info_v2_2(
|
||||
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
|
||||
DATA_TABLES(integratedsysteminfo));
|
||||
|
||||
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
|
||||
|
||||
if (info_v2_2 == NULL)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
@ -2943,6 +2948,27 @@ static enum bp_result construct_integrated_info(
|
||||
default:
|
||||
return result;
|
||||
}
|
||||
if (result == BP_RESULT_OK) {
|
||||
|
||||
DC_LOG_BIOS("edp1:\n"
|
||||
"\tedp_pwr_on_off_delay = %d\n"
|
||||
"\tedp_pwr_on_vary_bl_to_blon = %d\n"
|
||||
"\tedp_pwr_down_bloff_to_vary_bloff = %d\n"
|
||||
"\tedp_bootup_bl_level = %d\n",
|
||||
info->edp1_info.edp_pwr_on_off_delay,
|
||||
info->edp1_info.edp_pwr_on_vary_bl_to_blon,
|
||||
info->edp1_info.edp_pwr_down_bloff_to_vary_bloff,
|
||||
info->edp1_info.edp_bootup_bl_level);
|
||||
DC_LOG_BIOS("edp2:\n"
|
||||
"\tedp_pwr_on_off_delayv = %d\n"
|
||||
"\tedp_pwr_on_vary_bl_to_blon = %d\n"
|
||||
"\tedp_pwr_down_bloff_to_vary_bloff = %d\n"
|
||||
"\tedp_bootup_bl_level = %d\n",
|
||||
info->edp2_info.edp_pwr_on_off_delay,
|
||||
info->edp2_info.edp_pwr_on_vary_bl_to_blon,
|
||||
info->edp2_info.edp_pwr_down_bloff_to_vary_bloff,
|
||||
info->edp2_info.edp_bootup_bl_level);
|
||||
}
|
||||
}
|
||||
|
||||
if (result != BP_RESULT_OK)
|
||||
@ -3320,6 +3346,7 @@ static enum bp_result bios_get_board_layout_info(
|
||||
struct bios_parser *bp;
|
||||
|
||||
static enum bp_result record_result;
|
||||
unsigned int max_slots;
|
||||
|
||||
const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
|
||||
GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
|
||||
@ -3336,8 +3363,14 @@ static enum bp_result bios_get_board_layout_info(
|
||||
}
|
||||
|
||||
board_layout_info->num_of_slots = 0;
|
||||
max_slots = MAX_BOARD_SLOTS;
|
||||
|
||||
for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
|
||||
// Assume single slot on v1_5
|
||||
if (bp->object_info_tbl.revision.minor == 5) {
|
||||
max_slots = 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_slots; ++i) {
|
||||
record_result = get_bracket_layout_record(dcb,
|
||||
slot_index_to_vbios_id[i],
|
||||
&board_layout_info->slots[i]);
|
||||
|
@ -104,7 +104,7 @@ static int dcn31_get_active_display_cnt_wa(
|
||||
return display_count;
|
||||
}
|
||||
|
||||
static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
|
||||
{
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int i;
|
||||
@ -115,9 +115,10 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
if (pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
if (disable)
|
||||
if (disable) {
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
else
|
||||
reset_sync_context_for_pipe(dc, context, i);
|
||||
} else
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
}
|
||||
}
|
||||
@ -216,11 +217,11 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
dcn31_disable_otg_wa(clk_mgr_base, true);
|
||||
dcn31_disable_otg_wa(clk_mgr_base, context, true);
|
||||
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
|
||||
dcn31_disable_otg_wa(clk_mgr_base, false);
|
||||
dcn31_disable_otg_wa(clk_mgr_base, context, false);
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ static int dcn314_get_active_display_cnt_wa(
|
||||
return display_count;
|
||||
}
|
||||
|
||||
static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
|
||||
{
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int i;
|
||||
@ -136,11 +136,11 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
|
||||
if (pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
|
||||
dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
if (disable)
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
if (disable) {
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
else
|
||||
reset_sync_context_for_pipe(dc, context, i);
|
||||
} else
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
}
|
||||
}
|
||||
@ -240,11 +240,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
dcn314_disable_otg_wa(clk_mgr_base, true);
|
||||
dcn314_disable_otg_wa(clk_mgr_base, context, true);
|
||||
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
|
||||
dcn314_disable_otg_wa(clk_mgr_base, false);
|
||||
dcn314_disable_otg_wa(clk_mgr_base, context, false);
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
@ -51,6 +51,9 @@
|
||||
#define TO_CLK_MGR_DCN315(clk_mgr)\
|
||||
container_of(clk_mgr, struct clk_mgr_dcn315, base)
|
||||
|
||||
#define UNSUPPORTED_DCFCLK 10000000
|
||||
#define MIN_DPP_DISP_CLK 100000
|
||||
|
||||
static int dcn315_get_active_display_cnt_wa(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
@ -84,7 +87,7 @@ static int dcn315_get_active_display_cnt_wa(
|
||||
return display_count;
|
||||
}
|
||||
|
||||
static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
|
||||
{
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int i;
|
||||
@ -96,9 +99,10 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
|
||||
dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
if (disable)
|
||||
if (disable) {
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
else
|
||||
reset_sync_context_for_pipe(dc, context, i);
|
||||
} else
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
}
|
||||
}
|
||||
@ -151,6 +155,9 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
}
|
||||
}
|
||||
|
||||
/* Lock pstate by requesting unsupported dcfclk if change is unsupported */
|
||||
if (!new_clocks->p_state_change_support)
|
||||
new_clocks->dcfclk_khz = UNSUPPORTED_DCFCLK;
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
|
||||
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
|
||||
dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
|
||||
@ -164,10 +171,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
|
||||
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
if (new_clocks->dppclk_khz < 100000)
|
||||
new_clocks->dppclk_khz = 100000;
|
||||
if (new_clocks->dispclk_khz < 100000)
|
||||
new_clocks->dispclk_khz = 100000;
|
||||
if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
|
||||
new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
|
||||
if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
|
||||
new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
|
||||
@ -180,12 +187,12 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
/* No need to apply the w/a if we haven't taken over from bios yet */
|
||||
if (clk_mgr_base->clks.dispclk_khz)
|
||||
dcn315_disable_otg_wa(clk_mgr_base, true);
|
||||
dcn315_disable_otg_wa(clk_mgr_base, context, true);
|
||||
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
|
||||
if (clk_mgr_base->clks.dispclk_khz)
|
||||
dcn315_disable_otg_wa(clk_mgr_base, false);
|
||||
dcn315_disable_otg_wa(clk_mgr_base, context, false);
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
@ -280,7 +287,7 @@ static struct wm_table ddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 64.0,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -288,7 +295,7 @@ static struct wm_table ddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 64.0,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -296,7 +303,7 @@ static struct wm_table ddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 64.0,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -304,7 +311,7 @@ static struct wm_table ddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 64.0,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -561,8 +568,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
|
||||
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
|
||||
bw_params->vram_type = bios_info->memory_type;
|
||||
bw_params->num_channels = bios_info->ma_channel_number;
|
||||
if (!bw_params->num_channels)
|
||||
bw_params->num_channels = 2;
|
||||
bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
|
||||
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
bw_params->wm_table.entries[i].wm_inst = i;
|
||||
|
@ -112,7 +112,7 @@ static int dcn316_get_active_display_cnt_wa(
|
||||
return display_count;
|
||||
}
|
||||
|
||||
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
|
||||
{
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int i;
|
||||
@ -124,9 +124,10 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
|
||||
dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
if (disable)
|
||||
if (disable) {
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
else
|
||||
reset_sync_context_for_pipe(dc, context, i);
|
||||
} else
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
}
|
||||
}
|
||||
@ -221,11 +222,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
dcn316_disable_otg_wa(clk_mgr_base, true);
|
||||
dcn316_disable_otg_wa(clk_mgr_base, context, true);
|
||||
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
|
||||
dcn316_disable_otg_wa(clk_mgr_base, false);
|
||||
dcn316_disable_otg_wa(clk_mgr_base, context, false);
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
@ -1184,11 +1184,7 @@ static void disable_vbios_mode_if_required(
|
||||
pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
|
||||
|
||||
if (pix_clk_100hz != requested_pix_clk_100hz) {
|
||||
if (dc->hwss.update_phy_state)
|
||||
dc->hwss.update_phy_state(dc->current_state,
|
||||
pipe, TX_OFF_SYMCLK_OFF);
|
||||
else
|
||||
core_link_disable_stream(pipe);
|
||||
core_link_disable_stream(pipe);
|
||||
pipe->stream->dpms_off = false;
|
||||
}
|
||||
}
|
||||
@ -3061,11 +3057,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
||||
|
||||
if (stream_update->dpms_off) {
|
||||
if (*stream_update->dpms_off) {
|
||||
if (dc->hwss.update_phy_state)
|
||||
dc->hwss.update_phy_state(dc->current_state,
|
||||
pipe_ctx, TX_OFF_SYMCLK_ON);
|
||||
else
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
/* for dpms, keep acquired resources*/
|
||||
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
|
||||
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
|
||||
@ -3075,12 +3067,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
|
||||
} else {
|
||||
if (get_seamless_boot_stream_count(context) == 0)
|
||||
dc->hwss.prepare_bandwidth(dc, dc->current_state);
|
||||
|
||||
if (dc->hwss.update_phy_state)
|
||||
dc->hwss.update_phy_state(dc->current_state,
|
||||
pipe_ctx, TX_ON_SYMCLK_ON);
|
||||
else
|
||||
core_link_enable_stream(dc->current_state, pipe_ctx);
|
||||
core_link_enable_stream(dc->current_state, pipe_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -848,20 +848,13 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
|
||||
|
||||
bool reset_cur_dp_mst_topology(struct dc_link *link)
|
||||
{
|
||||
bool result = false;
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
|
||||
LINK_INFO("link=%d, mst branch is now Disconnected\n",
|
||||
link->link_index);
|
||||
|
||||
revert_dpia_mst_dsc_always_on_wa(link);
|
||||
result = dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
|
||||
|
||||
link->mst_stream_alloc_table.stream_count = 0;
|
||||
memset(link->mst_stream_alloc_table.stream_allocations,
|
||||
0,
|
||||
sizeof(link->mst_stream_alloc_table.stream_allocations));
|
||||
return result;
|
||||
return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
|
||||
}
|
||||
|
||||
static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
|
||||
@ -1315,7 +1308,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
// Init dc_panel_config
|
||||
dm_helpers_init_panel_settings(dc_ctx, &link->panel_config);
|
||||
dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
|
||||
// Override dc_panel_config if system has specific settings
|
||||
dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
|
||||
}
|
||||
@ -1984,7 +1977,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
int i;
|
||||
bool apply_seamless_boot_optimization = false;
|
||||
uint32_t bl_oled_enable_delay = 50; // in ms
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
uint32_t post_oui_delay = 30; // 30ms
|
||||
/* Reduce link bandwidth between failed link training attempts. */
|
||||
bool do_fallback = false;
|
||||
|
||||
@ -2031,8 +2024,10 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
|
||||
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
|
||||
dpcd_set_source_specific_data(link);
|
||||
if (link->dpcd_sink_ext_caps.raw != 0)
|
||||
if (link->dpcd_sink_ext_caps.raw != 0) {
|
||||
post_oui_delay += link->panel_config.pps.extra_post_OUI_ms;
|
||||
msleep(post_oui_delay);
|
||||
}
|
||||
|
||||
// similarly, mode switch can cause loss of cable ID
|
||||
dpcd_write_cable_id_to_dprx(link);
|
||||
@ -2644,9 +2639,8 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_
|
||||
dp_set_fec_ready(link, link_res, false);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (signal != SIGNAL_TYPE_VIRTUAL)
|
||||
link->link_enc->funcs->disable_output(link->link_enc, signal);
|
||||
} else if (signal != SIGNAL_TYPE_VIRTUAL) {
|
||||
link->dc->hwss.disable_link_output(link, link_res, signal);
|
||||
}
|
||||
|
||||
if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
@ -2668,6 +2662,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
|
||||
bool is_over_340mhz = false;
|
||||
bool is_vga_mode = (stream->timing.h_addressable == 640)
|
||||
&& (stream->timing.v_addressable == 480);
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
|
||||
if (stream->phy_pix_clk == 0)
|
||||
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
|
||||
@ -2707,11 +2702,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
|
||||
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
display_color_depth = COLOR_DEPTH_888;
|
||||
|
||||
link->link_enc->funcs->enable_tmds_output(
|
||||
link->link_enc,
|
||||
dc->hwss.enable_tmds_link_output(
|
||||
link,
|
||||
&pipe_ctx->link_res,
|
||||
pipe_ctx->stream->signal,
|
||||
pipe_ctx->clock_source->id,
|
||||
display_color_depth,
|
||||
pipe_ctx->stream->signal,
|
||||
stream->phy_pix_clk);
|
||||
|
||||
if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
|
||||
@ -2722,15 +2718,16 @@ static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
struct dc_link *link = stream->link;
|
||||
struct dc *dc = stream->ctx->dc;
|
||||
|
||||
if (stream->phy_pix_clk == 0)
|
||||
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
|
||||
|
||||
memset(&stream->link->cur_link_settings, 0,
|
||||
sizeof(struct dc_link_settings));
|
||||
|
||||
link->link_enc->funcs->enable_lvds_output(
|
||||
link->link_enc,
|
||||
dc->hwss.enable_lvds_link_output(
|
||||
link,
|
||||
&pipe_ctx->link_res,
|
||||
pipe_ctx->clock_source->id,
|
||||
stream->phy_pix_clk);
|
||||
|
||||
@ -3568,6 +3565,35 @@ static void update_mst_stream_alloc_table(
|
||||
work_table[i];
|
||||
}
|
||||
|
||||
static void remove_stream_from_alloc_table(
|
||||
struct dc_link *link,
|
||||
struct stream_encoder *dio_stream_enc,
|
||||
struct hpo_dp_stream_encoder *hpo_dp_stream_enc)
|
||||
{
|
||||
int i = 0;
|
||||
struct link_mst_stream_allocation_table *table =
|
||||
&link->mst_stream_alloc_table;
|
||||
|
||||
if (hpo_dp_stream_enc) {
|
||||
for (; i < table->stream_count; i++)
|
||||
if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc)
|
||||
break;
|
||||
} else {
|
||||
for (; i < table->stream_count; i++)
|
||||
if (dio_stream_enc == table->stream_allocations[i].stream_enc)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i < table->stream_count) {
|
||||
i++;
|
||||
for (; i < table->stream_count; i++)
|
||||
table->stream_allocations[i-1] = table->stream_allocations[i];
|
||||
memset(&table->stream_allocations[table->stream_count-1], 0,
|
||||
sizeof(struct link_mst_stream_allocation));
|
||||
table->stream_count--;
|
||||
}
|
||||
}
|
||||
|
||||
static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
|
||||
{
|
||||
const uint32_t VCP_Y_PRECISION = 1000;
|
||||
@ -3985,26 +4011,32 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
|
||||
&empty_link_settings,
|
||||
avg_time_slots_per_mtp);
|
||||
|
||||
/* TODO: which component is responsible for remove payload table? */
|
||||
if (mst_mode) {
|
||||
/* when link is in mst mode, reply on mst manager to remove
|
||||
* payload
|
||||
*/
|
||||
if (dm_helpers_dp_mst_write_payload_allocation_table(
|
||||
stream->ctx,
|
||||
stream,
|
||||
&proposed_table,
|
||||
false)) {
|
||||
false))
|
||||
|
||||
update_mst_stream_alloc_table(
|
||||
link,
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc,
|
||||
&proposed_table);
|
||||
}
|
||||
else {
|
||||
DC_LOG_WARNING("Failed to update"
|
||||
"MST allocation table for"
|
||||
"pipe idx:%d\n",
|
||||
pipe_ctx->pipe_idx);
|
||||
}
|
||||
link,
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc,
|
||||
&proposed_table);
|
||||
else
|
||||
DC_LOG_WARNING("Failed to update"
|
||||
"MST allocation table for"
|
||||
"pipe idx:%d\n",
|
||||
pipe_ctx->pipe_idx);
|
||||
} else {
|
||||
/* when link is no longer in mst mode (mst hub unplugged),
|
||||
* remove payload with default dc logic
|
||||
*/
|
||||
remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc,
|
||||
pipe_ctx->stream_res.hpo_dp_stream_enc);
|
||||
}
|
||||
|
||||
DC_LOG_MST("%s"
|
||||
|
@ -2758,8 +2758,14 @@ bool perform_link_training_with_retries(
|
||||
skip_video_pattern);
|
||||
|
||||
/* Transmit idle pattern once training successful. */
|
||||
if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
|
||||
if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
|
||||
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
|
||||
/* Update verified link settings to current one
|
||||
* Because DPIA LT might fallback to lower link setting.
|
||||
*/
|
||||
link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
|
||||
link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
|
||||
}
|
||||
} else {
|
||||
status = dc_link_dp_perform_link_training(link,
|
||||
&pipe_ctx->link_res,
|
||||
@ -4518,25 +4524,15 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
|
||||
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
|
||||
if (link->dc->hwss.update_phy_state)
|
||||
link->dc->hwss.update_phy_state(link->dc->current_state,
|
||||
pipe_ctx, TX_OFF_SYMCLK_OFF);
|
||||
else
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
}
|
||||
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
|
||||
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
|
||||
if (link->dc->hwss.update_phy_state)
|
||||
link->dc->hwss.update_phy_state(link->dc->current_state,
|
||||
pipe_ctx, TX_ON_SYMCLK_ON);
|
||||
else
|
||||
core_link_enable_stream(link->dc->current_state, pipe_ctx);
|
||||
}
|
||||
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
|
||||
core_link_enable_stream(link->dc->current_state, pipe_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5133,6 +5129,14 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
|
||||
lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
|
||||
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
|
||||
|
||||
/* If this chip cap is set, at least one retimer must exist in the chain
|
||||
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
|
||||
if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
|
||||
(dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
|
||||
ASSERT(0);
|
||||
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
|
||||
}
|
||||
|
||||
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
|
||||
is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
|
||||
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
|
||||
@ -7069,68 +7073,16 @@ void dp_enable_link_phy(
|
||||
enum clock_source_id clock_source,
|
||||
const struct dc_link_settings *link_settings)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
struct pipe_ctx *pipes =
|
||||
link->dc->current_state->res_ctx.pipe_ctx;
|
||||
struct clock_source *dp_cs =
|
||||
link->dc->res_pool->dp_clock_source;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
|
||||
unsigned int i;
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
if (!link->dc->config.edp_no_power_sequencing)
|
||||
link->dc->hwss.edp_power_control(link, true);
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
}
|
||||
|
||||
/* If the current pixel clock source is not DTO(happens after
|
||||
* switching from HDMI passive dongle to DP on the same connector),
|
||||
* switch the pixel clock source to DTO.
|
||||
*/
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (pipes[i].stream != NULL &&
|
||||
pipes[i].stream->link == link) {
|
||||
if (pipes[i].clock_source != NULL &&
|
||||
pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
|
||||
pipes[i].clock_source = dp_cs;
|
||||
pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
|
||||
pipes[i].stream->timing.pix_clk_100hz;
|
||||
pipes[i].clock_source->funcs->program_pix_clk(
|
||||
pipes[i].clock_source,
|
||||
&pipes[i].stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(link_settings),
|
||||
&pipes[i].pll_settings);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
link->cur_link_settings = *link_settings;
|
||||
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
|
||||
if (dc->clk_mgr->funcs->notify_link_rate_change)
|
||||
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
|
||||
}
|
||||
|
||||
if (dmcu != NULL && dmcu->funcs->lock_phy)
|
||||
dmcu->funcs->lock_phy(dmcu);
|
||||
|
||||
if (link_hwss->ext.enable_dp_link_output)
|
||||
link_hwss->ext.enable_dp_link_output(link, link_res, signal,
|
||||
clock_source, link_settings);
|
||||
|
||||
if (dmcu != NULL && dmcu->funcs->unlock_phy)
|
||||
dmcu->funcs->unlock_phy(dmcu);
|
||||
|
||||
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
|
||||
link->dc->hwss.enable_dp_link_output(link, link_res, signal,
|
||||
clock_source, link_settings);
|
||||
dp_receiver_power_ctrl(link, true);
|
||||
}
|
||||
|
||||
void edp_add_delay_for_T9(struct dc_link *link)
|
||||
{
|
||||
if (link->local_sink &&
|
||||
link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
|
||||
udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
|
||||
if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
|
||||
udelay(link->panel_config.pps.extra_delay_backlight_off * 1000);
|
||||
}
|
||||
|
||||
bool edp_receiver_ready_T9(struct dc_link *link)
|
||||
@ -7186,9 +7138,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)
|
||||
} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
|
||||
}
|
||||
|
||||
if (link->local_sink &&
|
||||
link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
|
||||
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
|
||||
if (link && link->panel_config.pps.extra_t7_ms > 0)
|
||||
udelay(link->panel_config.pps.extra_t7_ms * 1000);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -7197,29 +7148,11 @@ void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_
|
||||
enum signal_type signal)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
|
||||
|
||||
if (!link->wa_flags.dp_keep_receiver_powered)
|
||||
dp_receiver_power_ctrl(link, false);
|
||||
|
||||
if (signal == SIGNAL_TYPE_EDP) {
|
||||
if (link->dc->hwss.edp_backlight_control)
|
||||
link->dc->hwss.edp_backlight_control(link, false);
|
||||
if (link_hwss->ext.disable_dp_link_output)
|
||||
link_hwss->ext.disable_dp_link_output(link, link_res, signal);
|
||||
link->dc->hwss.edp_power_control(link, false);
|
||||
} else {
|
||||
if (dmcu != NULL && dmcu->funcs->lock_phy)
|
||||
dmcu->funcs->lock_phy(dmcu);
|
||||
if (link_hwss->ext.disable_dp_link_output)
|
||||
link_hwss->ext.disable_dp_link_output(link, link_res, signal);
|
||||
if (dmcu != NULL && dmcu->funcs->unlock_phy)
|
||||
dmcu->funcs->unlock_phy(dmcu);
|
||||
}
|
||||
|
||||
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
|
||||
|
||||
dc->hwss.disable_link_output(link, link_res, signal);
|
||||
/* Clear current link setting.*/
|
||||
memset(&link->cur_link_settings, 0,
|
||||
sizeof(link->cur_link_settings));
|
||||
|
@ -3581,6 +3581,23 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
void reset_sync_context_for_pipe(const struct dc *dc,
|
||||
struct dc_state *context,
|
||||
uint8_t pipe_idx)
|
||||
{
|
||||
int i;
|
||||
struct pipe_ctx *pipe_ctx_reset;
|
||||
|
||||
/* reset the otg sync context for the pipe and its slave pipes if any */
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
pipe_ctx_reset = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) &&
|
||||
IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx))
|
||||
SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i);
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
|
||||
{
|
||||
/* TODO - get transmitter to phy idx mapping from DMUB */
|
||||
@ -3645,3 +3662,25 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
|
||||
else
|
||||
return get_virtual_link_hwss();
|
||||
}
|
||||
|
||||
bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
|
||||
{
|
||||
bool divisible = false;
|
||||
uint16_t h_blank_start = 0;
|
||||
uint16_t h_blank_end = 0;
|
||||
|
||||
if (stream) {
|
||||
h_blank_start = stream->timing.h_total - stream->timing.h_front_porch;
|
||||
h_blank_end = h_blank_start - stream->timing.h_addressable;
|
||||
|
||||
/* HTOTAL, Hblank start/end, and Hsync start/end all must be
|
||||
* divisible by 2 in order for the horizontal timing params
|
||||
* to be considered divisible by 2. Hsync start is always 0.
|
||||
*/
|
||||
divisible = (stream->timing.h_total % 2 == 0) &&
|
||||
(h_blank_start % 2 == 0) &&
|
||||
(h_blank_end % 2 == 0) &&
|
||||
(stream->timing.h_sync_width % 2 == 0);
|
||||
}
|
||||
return divisible;
|
||||
}
|
@ -47,7 +47,7 @@ struct aux_payload;
|
||||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.202"
|
||||
#define DC_VER "3.2.204"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -832,6 +832,7 @@ struct dc_debug_options {
|
||||
bool force_subvp_mclk_switch;
|
||||
bool allow_sw_cursor_fallback;
|
||||
unsigned int force_subvp_num_ways;
|
||||
bool alloc_extra_way_for_cursor;
|
||||
bool force_usr_allow;
|
||||
/* uses value at boot and disables switch */
|
||||
bool disable_dtb_ref_clk_switch;
|
||||
|
@ -674,12 +674,28 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
|
||||
pipe_data->pipe_config.subvp_data.processing_delay_lines =
|
||||
div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
|
||||
((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
|
||||
|
||||
if (subvp_pipe->bottom_pipe) {
|
||||
pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
|
||||
} else if (subvp_pipe->next_odm_pipe) {
|
||||
pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
|
||||
} else {
|
||||
pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0;
|
||||
}
|
||||
|
||||
// Find phantom pipe index based on phantom stream
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
|
||||
|
||||
if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
|
||||
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->pipe_idx;
|
||||
if (phantom_pipe->bottom_pipe) {
|
||||
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->pipe_idx;
|
||||
} else if (phantom_pipe->next_odm_pipe) {
|
||||
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->pipe_idx;
|
||||
} else {
|
||||
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -724,7 +740,9 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
if (pipe->plane_state && !pipe->top_pipe &&
|
||||
/* For SubVP pipe count, only count the top most (ODM / MPC) pipe
|
||||
*/
|
||||
if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_MAIN)
|
||||
subvp_pipes[subvp_count++] = pipe;
|
||||
}
|
||||
@ -737,7 +755,12 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
/* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
|
||||
* Any ODM or MPC splits being used in SubVP will be handled internally in
|
||||
* populate_subvp_cmd_pipe_info
|
||||
*/
|
||||
if (pipe->plane_state && pipe->stream->mall_stream_config.paired_stream &&
|
||||
!pipe->top_pipe && !pipe->prev_odm_pipe &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
|
||||
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
|
||||
} else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
|
||||
|
@ -85,6 +85,7 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0
|
||||
|
||||
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
|
||||
|
||||
void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
|
||||
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
|
||||
|
||||
#endif /* _DMUB_DC_SRV_H_ */
|
||||
|
@ -117,6 +117,16 @@ struct psr_settings {
|
||||
* Add a struct dc_panel_config under dc_link
|
||||
*/
|
||||
struct dc_panel_config {
|
||||
// extra panel power sequence parameters
|
||||
struct pps {
|
||||
unsigned int extra_t3_ms;
|
||||
unsigned int extra_t7_ms;
|
||||
unsigned int extra_delay_backlight_off;
|
||||
unsigned int extra_post_t7_ms;
|
||||
unsigned int extra_pre_t11_ms;
|
||||
unsigned int extra_t12_ms;
|
||||
unsigned int extra_post_OUI_ms;
|
||||
} pps;
|
||||
// edp DSC
|
||||
struct dsc {
|
||||
bool disable_dsc_edp;
|
||||
@ -244,7 +254,7 @@ struct dc_link {
|
||||
struct gpio *hpd_gpio;
|
||||
enum dc_link_fec_state fec_state;
|
||||
struct dc_panel_config panel_config;
|
||||
enum phy_state phy_state;
|
||||
struct phy_state phy_state;
|
||||
};
|
||||
|
||||
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
|
||||
|
@ -393,17 +393,18 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
||||
if (copy_settings_data->dsc_enable_status &&
|
||||
link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&
|
||||
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
|
||||
sizeof(link->dpcd_caps.sink_dev_id_str)))
|
||||
sizeof(DP_SINK_DEVICE_STR_ID_1)))
|
||||
link->psr_settings.force_ffu_mode = 1;
|
||||
else
|
||||
link->psr_settings.force_ffu_mode = 0;
|
||||
copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
|
||||
|
||||
if (link->fec_state == dc_link_fec_enabled &&
|
||||
link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&
|
||||
(!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
|
||||
sizeof(link->dpcd_caps.sink_dev_id_str)) ||
|
||||
sizeof(DP_SINK_DEVICE_STR_ID_1)) ||
|
||||
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
|
||||
sizeof(link->dpcd_caps.sink_dev_id_str))))
|
||||
sizeof(DP_SINK_DEVICE_STR_ID_2))))
|
||||
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1;
|
||||
else
|
||||
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;
|
||||
|
@ -722,7 +722,6 @@ void dce110_edp_wait_for_hpd_ready(
|
||||
struct dc_context *ctx = link->ctx;
|
||||
struct graphics_object_id connector = link->link_enc->connector;
|
||||
struct gpio *hpd;
|
||||
struct dc_sink *sink = link->local_sink;
|
||||
bool edp_hpd_high = false;
|
||||
uint32_t time_elapsed = 0;
|
||||
uint32_t timeout = power_up ?
|
||||
@ -755,9 +754,9 @@ void dce110_edp_wait_for_hpd_ready(
|
||||
return;
|
||||
}
|
||||
|
||||
if (sink != NULL) {
|
||||
if (sink->edid_caps.panel_patch.extra_t3_ms > 0) {
|
||||
int extra_t3_in_ms = sink->edid_caps.panel_patch.extra_t3_ms;
|
||||
if (link != NULL) {
|
||||
if (link->panel_config.pps.extra_t3_ms > 0) {
|
||||
int extra_t3_in_ms = link->panel_config.pps.extra_t3_ms;
|
||||
|
||||
msleep(extra_t3_in_ms);
|
||||
}
|
||||
@ -842,7 +841,7 @@ void dce110_edp_power_control(
|
||||
/* add time defined by a patch, if any (usually patch extra_t12_ms is 0) */
|
||||
if (link->local_sink != NULL)
|
||||
remaining_min_edp_poweroff_time_ms +=
|
||||
link->local_sink->edid_caps.panel_patch.extra_t12_ms;
|
||||
link->panel_config.pps.extra_t12_ms;
|
||||
|
||||
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
|
||||
if (dp_trace_get_edp_poweroff_timestamp(link) != 0) {
|
||||
@ -946,7 +945,7 @@ void dce110_edp_wait_for_T12(
|
||||
current_ts,
|
||||
dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
|
||||
|
||||
t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
|
||||
t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12
|
||||
|
||||
if (time_since_edp_poweroff_ms < t12_duration)
|
||||
msleep(t12_duration - time_since_edp_poweroff_ms);
|
||||
@ -965,6 +964,8 @@ void dce110_edp_backlight_control(
|
||||
struct dc_context *ctx = link->ctx;
|
||||
struct bp_transmitter_control cntl = { 0 };
|
||||
uint8_t panel_instance;
|
||||
unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
|
||||
unsigned int post_T7_delay = OLED_POST_T7_DELAY;
|
||||
|
||||
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
|
||||
!= CONNECTOR_ID_EDP) {
|
||||
@ -1043,8 +1044,10 @@ void dce110_edp_backlight_control(
|
||||
|
||||
link_transmitter_control(ctx->dc_bios, &cntl);
|
||||
|
||||
if (enable && link->dpcd_sink_ext_caps.bits.oled)
|
||||
msleep(OLED_POST_T7_DELAY);
|
||||
if (enable && link->dpcd_sink_ext_caps.bits.oled) {
|
||||
post_T7_delay += link->panel_config.pps.extra_post_t7_ms;
|
||||
msleep(post_T7_delay);
|
||||
}
|
||||
|
||||
if (link->dpcd_sink_ext_caps.bits.oled ||
|
||||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
|
||||
@ -1066,8 +1069,10 @@ void dce110_edp_backlight_control(
|
||||
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
|
||||
}
|
||||
|
||||
if (!enable && link->dpcd_sink_ext_caps.bits.oled)
|
||||
msleep(OLED_PRE_T11_DELAY);
|
||||
if (!enable && link->dpcd_sink_ext_caps.bits.oled) {
|
||||
pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
|
||||
msleep(pre_T11_delay);
|
||||
}
|
||||
}
|
||||
|
||||
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
|
||||
@ -1441,6 +1446,14 @@ static enum dc_status dce110_enable_stream_timing(
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
if (dc_is_hdmi_tmds_signal(stream->signal)) {
|
||||
stream->link->phy_state.symclk_ref_cnts.otg = 1;
|
||||
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
|
||||
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
|
||||
else
|
||||
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
|
||||
}
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->program_timing(
|
||||
pipe_ctx->stream_res.tg,
|
||||
&stream->timing,
|
||||
@ -1577,12 +1590,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
|
||||
|
||||
if (!stream->dpms_off) {
|
||||
if (dc->hwss.update_phy_state)
|
||||
dc->hwss.update_phy_state(context, pipe_ctx, TX_ON_SYMCLK_ON);
|
||||
else
|
||||
core_link_enable_stream(context, pipe_ctx);
|
||||
}
|
||||
if (!stream->dpms_off)
|
||||
core_link_enable_stream(context, pipe_ctx);
|
||||
|
||||
/* DCN3.1 FPGA Workaround
|
||||
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
|
||||
@ -2118,6 +2127,7 @@ static void dce110_reset_hw_ctx_wrap(
|
||||
BREAK_TO_DEBUGGER();
|
||||
}
|
||||
pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
|
||||
pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
|
||||
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
|
||||
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
|
||||
|
||||
@ -2168,7 +2178,8 @@ static void dce110_setup_audio_dto(
|
||||
continue;
|
||||
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
continue;
|
||||
if (pipe_ctx->stream_res.audio != NULL) {
|
||||
if (pipe_ctx->stream_res.audio != NULL &&
|
||||
pipe_ctx->stream_res.audio->enabled == false) {
|
||||
struct audio_output audio_output;
|
||||
|
||||
build_audio_output(context, pipe_ctx, &audio_output);
|
||||
@ -2208,7 +2219,8 @@ static void dce110_setup_audio_dto(
|
||||
if (!dc_is_dp_signal(pipe_ctx->stream->signal))
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->stream_res.audio != NULL) {
|
||||
if (pipe_ctx->stream_res.audio != NULL &&
|
||||
pipe_ctx->stream_res.audio->enabled == false) {
|
||||
struct audio_output audio_output;
|
||||
|
||||
build_audio_output(context, pipe_ctx, &audio_output);
|
||||
@ -2996,6 +3008,124 @@ void dce110_set_pipe(struct pipe_ctx *pipe_ctx)
|
||||
abm->funcs->set_pipe(abm, otg_inst, panel_cntl->inst);
|
||||
}
|
||||
|
||||
void dce110_enable_lvds_link_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum clock_source_id clock_source,
|
||||
uint32_t pixel_clock)
|
||||
{
|
||||
link->link_enc->funcs->enable_lvds_output(
|
||||
link->link_enc,
|
||||
clock_source,
|
||||
pixel_clock);
|
||||
link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
|
||||
}
|
||||
|
||||
void dce110_enable_tmds_link_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal,
|
||||
enum clock_source_id clock_source,
|
||||
enum dc_color_depth color_depth,
|
||||
uint32_t pixel_clock)
|
||||
{
|
||||
link->link_enc->funcs->enable_tmds_output(
|
||||
link->link_enc,
|
||||
clock_source,
|
||||
color_depth,
|
||||
signal,
|
||||
pixel_clock);
|
||||
link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
|
||||
}
|
||||
|
||||
void dce110_enable_dp_link_output(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal,
|
||||
enum clock_source_id clock_source,
|
||||
const struct dc_link_settings *link_settings)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
struct pipe_ctx *pipes =
|
||||
link->dc->current_state->res_ctx.pipe_ctx;
|
||||
struct clock_source *dp_cs =
|
||||
link->dc->res_pool->dp_clock_source;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
|
||||
unsigned int i;
|
||||
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
if (!link->dc->config.edp_no_power_sequencing)
|
||||
link->dc->hwss.edp_power_control(link, true);
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
}
|
||||
|
||||
/* If the current pixel clock source is not DTO(happens after
|
||||
* switching from HDMI passive dongle to DP on the same connector),
|
||||
* switch the pixel clock source to DTO.
|
||||
*/
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (pipes[i].stream != NULL &&
|
||||
pipes[i].stream->link == link) {
|
||||
if (pipes[i].clock_source != NULL &&
|
||||
pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
|
||||
pipes[i].clock_source = dp_cs;
|
||||
pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
|
||||
pipes[i].stream->timing.pix_clk_100hz;
|
||||
pipes[i].clock_source->funcs->program_pix_clk(
|
||||
pipes[i].clock_source,
|
||||
&pipes[i].stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(link_settings),
|
||||
&pipes[i].pll_settings);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
|
||||
if (dc->clk_mgr->funcs->notify_link_rate_change)
|
||||
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
|
||||
}
|
||||
|
||||
if (dmcu != NULL && dmcu->funcs->lock_phy)
|
||||
dmcu->funcs->lock_phy(dmcu);
|
||||
|
||||
if (link_hwss->ext.enable_dp_link_output)
|
||||
link_hwss->ext.enable_dp_link_output(link, link_res, signal,
|
||||
clock_source, link_settings);
|
||||
|
||||
link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
|
||||
|
||||
if (dmcu != NULL && dmcu->funcs->unlock_phy)
|
||||
dmcu->funcs->unlock_phy(dmcu);
|
||||
|
||||
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
|
||||
}
|
||||
|
||||
void dce110_disable_link_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
|
||||
if (signal == SIGNAL_TYPE_EDP &&
|
||||
link->dc->hwss.edp_backlight_control)
|
||||
link->dc->hwss.edp_backlight_control(link, false);
|
||||
else if (dmcu != NULL && dmcu->funcs->lock_phy)
|
||||
dmcu->funcs->lock_phy(dmcu);
|
||||
|
||||
link_hwss->disable_link_output(link, link_res, signal);
|
||||
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
|
||||
|
||||
if (signal == SIGNAL_TYPE_EDP &&
|
||||
link->dc->hwss.edp_backlight_control)
|
||||
link->dc->hwss.edp_power_control(link, false);
|
||||
else if (dmcu != NULL && dmcu->funcs->lock_phy)
|
||||
dmcu->funcs->unlock_phy(dmcu);
|
||||
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
|
||||
}
|
||||
|
||||
static const struct hw_sequencer_funcs dce110_funcs = {
|
||||
.program_gamut_remap = program_gamut_remap,
|
||||
.program_output_csc = program_output_csc,
|
||||
@ -3035,6 +3165,10 @@ static const struct hw_sequencer_funcs dce110_funcs = {
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_pipe = dce110_set_pipe,
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dce110_disable_link_output,
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dce110_private_funcs = {
|
||||
|
@ -90,6 +90,24 @@ bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
|
||||
uint32_t frame_ramp);
|
||||
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
|
||||
void dce110_set_pipe(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
void dce110_disable_link_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal);
|
||||
void dce110_enable_lvds_link_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum clock_source_id clock_source,
|
||||
uint32_t pixel_clock);
|
||||
void dce110_enable_tmds_link_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal,
|
||||
enum clock_source_id clock_source,
|
||||
enum dc_color_depth color_depth,
|
||||
uint32_t pixel_clock);
|
||||
void dce110_enable_dp_link_output(
|
||||
struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal,
|
||||
enum clock_source_id clock_source,
|
||||
const struct dc_link_settings *link_settings);
|
||||
#endif /* __DC_HWSS_DCE110_H__ */
|
||||
|
||||
|
@ -899,6 +899,14 @@ enum dc_status dcn10_enable_stream_timing(
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
if (dc_is_hdmi_tmds_signal(stream->signal)) {
|
||||
stream->link->phy_state.symclk_ref_cnts.otg = 1;
|
||||
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
|
||||
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
|
||||
else
|
||||
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
|
||||
}
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->program_timing(
|
||||
pipe_ctx->stream_res.tg,
|
||||
&stream->timing,
|
||||
@ -1017,6 +1025,7 @@ static void dcn10_reset_back_end_for_pipe(
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_drr)
|
||||
pipe_ctx->stream_res.tg->funcs->set_drr(
|
||||
pipe_ctx->stream_res.tg, NULL);
|
||||
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||
|
@ -82,6 +82,10 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_pipe = dce110_set_pipe,
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dce110_disable_link_output,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
|
||||
};
|
||||
|
@ -1393,6 +1393,12 @@ void optc1_read_otg_state(struct optc *optc1,
|
||||
REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
|
||||
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
|
||||
|
||||
REG_GET(OTG_VERTICAL_INTERRUPT1_CONTROL,
|
||||
OTG_VERTICAL_INTERRUPT1_INT_ENABLE, &s->vertical_interrupt1_en);
|
||||
|
||||
REG_GET(OTG_VERTICAL_INTERRUPT1_POSITION,
|
||||
OTG_VERTICAL_INTERRUPT1_LINE_START, &s->vertical_interrupt1_line);
|
||||
|
||||
REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
|
||||
OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en);
|
||||
|
||||
|
@ -583,6 +583,8 @@ struct dcn_otg_state {
|
||||
uint32_t underflow_occurred_status;
|
||||
uint32_t otg_enabled;
|
||||
uint32_t blank_enabled;
|
||||
uint32_t vertical_interrupt1_en;
|
||||
uint32_t vertical_interrupt1_line;
|
||||
uint32_t vertical_interrupt2_en;
|
||||
uint32_t vertical_interrupt2_line;
|
||||
};
|
||||
|
@ -274,6 +274,7 @@ struct dccg_registers {
|
||||
uint32_t DSCCLK2_DTO_PARAM;
|
||||
uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
|
||||
uint32_t DPSTREAMCLK_GATE_DISABLE;
|
||||
uint32_t DCCG_GATE_DISABLE_CNTL;
|
||||
uint32_t DCCG_GATE_DISABLE_CNTL2;
|
||||
uint32_t DCCG_GATE_DISABLE_CNTL3;
|
||||
uint32_t HDMISTREAMCLK0_DTO_PARAM;
|
||||
|
@ -445,226 +445,6 @@
|
||||
type DSCRM_DSC_FORWARD_EN; \
|
||||
type DSCRM_DSC_OPP_PIPE_SOURCE
|
||||
|
||||
#define DSC_REG_LIST_DCN314(id) \
|
||||
SRI(DSC_TOP_CONTROL, DSC_TOP, id),\
|
||||
SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\
|
||||
SRI(DSCC_CONFIG0, DSCC, id),\
|
||||
SRI(DSCC_CONFIG1, DSCC, id),\
|
||||
SRI(DSCC_STATUS, DSCC, id),\
|
||||
SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG0, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG1, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG2, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG3, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG4, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG5, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG6, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG7, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG8, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG9, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG10, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG11, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG12, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG13, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG14, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG15, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG16, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG17, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG18, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG19, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG20, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG21, DSCC, id),\
|
||||
SRI(DSCC_PPS_CONFIG22, DSCC, id),\
|
||||
SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\
|
||||
SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\
|
||||
SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\
|
||||
SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\
|
||||
SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\
|
||||
SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\
|
||||
SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\
|
||||
SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\
|
||||
SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\
|
||||
SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
|
||||
SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
|
||||
SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
|
||||
SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
|
||||
SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
|
||||
SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
|
||||
SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
|
||||
SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
|
||||
SRI(DSCCIF_CONFIG0, DSCCIF, id),\
|
||||
SRI(DSCCIF_CONFIG1, DSCCIF, id),\
|
||||
SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
|
||||
|
||||
#define DSC_REG_LIST_SH_MASK_DCN314(mask_sh)\
|
||||
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
|
||||
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
|
||||
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
|
||||
DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
|
||||
DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
|
||||
/*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
|
||||
DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \
|
||||
DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
|
||||
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
|
||||
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
|
||||
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
|
||||
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
|
||||
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \
|
||||
DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
|
||||
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
|
||||
DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \
|
||||
DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \
|
||||
DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
|
||||
DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
|
||||
|
||||
|
||||
struct dcn20_dsc_registers {
|
||||
uint32_t DSC_TOP_CONTROL;
|
||||
uint32_t DSC_DEBUG_CONTROL;
|
||||
|
@ -706,6 +706,14 @@ enum dc_status dcn20_enable_stream_timing(
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
}
|
||||
|
||||
if (dc_is_hdmi_tmds_signal(stream->signal)) {
|
||||
stream->link->phy_state.symclk_ref_cnts.otg = 1;
|
||||
if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
|
||||
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
|
||||
else
|
||||
stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
|
||||
}
|
||||
|
||||
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
|
||||
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
|
||||
|
||||
@ -1565,6 +1573,7 @@ static void dcn20_update_dchubp_dpp(
|
||||
/* Any updates are handled in dc interface, just need
|
||||
* to apply existing for plane enable / opp change */
|
||||
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|
||||
|| pipe_ctx->update_flags.bits.plane_changed
|
||||
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|
||||
|| pipe_ctx->stream->update_flags.bits.out_csc) {
|
||||
/* dpp/cm gamut remap*/
|
||||
@ -1898,10 +1907,13 @@ void dcn20_post_unlock_program_front_end(
|
||||
* can underflow due to HUBP_VTG_SEL programming if done in the regular front end
|
||||
* programming sequence).
|
||||
*/
|
||||
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
if (dc->hwss.update_phantom_vp_position)
|
||||
dc->hwss.update_phantom_vp_position(dc, context, pipe);
|
||||
dcn20_program_pipe(dc, pipe, context);
|
||||
while (pipe) {
|
||||
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
if (dc->hwss.update_phantom_vp_position)
|
||||
dc->hwss.update_phantom_vp_position(dc, context, pipe);
|
||||
dcn20_program_pipe(dc, pipe, context);
|
||||
}
|
||||
pipe = pipe->bottom_pipe;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2349,7 +2361,9 @@ static void dcn20_reset_back_end_for_pipe(
|
||||
struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
struct dc_link *link;
|
||||
struct dc_link *link = pipe_ctx->stream->link;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
|
||||
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
if (pipe_ctx->stream_res.stream_enc == NULL) {
|
||||
pipe_ctx->stream = NULL;
|
||||
@ -2357,19 +2371,15 @@ static void dcn20_reset_back_end_for_pipe(
|
||||
}
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
|
||||
link = pipe_ctx->stream->link;
|
||||
/* DPMS may already disable or */
|
||||
/* dpms_off status is incorrect due to fastboot
|
||||
* feature. When system resume from S4 with second
|
||||
* screen only, the dpms_off would be true but
|
||||
* VBIOS lit up eDP, so check link status too.
|
||||
*/
|
||||
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) {
|
||||
if (dc->hwss.update_phy_state)
|
||||
dc->hwss.update_phy_state(dc->current_state, pipe_ctx, TX_OFF_SYMCLK_OFF);
|
||||
else
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
} else if (pipe_ctx->stream_res.audio)
|
||||
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
else if (pipe_ctx->stream_res.audio)
|
||||
dc->hwss.disable_audio_stream(pipe_ctx);
|
||||
|
||||
/* free acquired resources */
|
||||
@ -2409,6 +2419,16 @@ static void dcn20_reset_back_end_for_pipe(
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_drr)
|
||||
pipe_ctx->stream_res.tg->funcs->set_drr(
|
||||
pipe_ctx->stream_res.tg, NULL);
|
||||
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
|
||||
* the case where the same symclk is shared across multiple otg
|
||||
* instances
|
||||
*/
|
||||
link->phy_state.symclk_ref_cnts.otg = 0;
|
||||
if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
|
||||
link_hwss->disable_link_output(link,
|
||||
&pipe_ctx->link_res, pipe_ctx->stream->signal);
|
||||
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||
|
@ -96,6 +96,10 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
|
||||
#ifndef TRIM_FSFT
|
||||
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
|
||||
#endif
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dce110_disable_link_output,
|
||||
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color
|
||||
|
@ -82,7 +82,7 @@ static bool patch_address_for_sbs_tb_stereo(
|
||||
return false;
|
||||
}
|
||||
|
||||
static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
|
||||
static bool gpu_addr_to_uma(struct dce_hwseq *hwseq,
|
||||
PHYSICAL_ADDRESS_LOC *addr)
|
||||
{
|
||||
bool is_in_uma;
|
||||
@ -98,6 +98,7 @@ static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
|
||||
} else {
|
||||
is_in_uma = false;
|
||||
}
|
||||
return is_in_uma;
|
||||
}
|
||||
|
||||
static void plane_address_in_gpu_space_to_uma(struct dce_hwseq *hwseq,
|
||||
|
@ -86,6 +86,10 @@ static const struct hw_sequencer_funcs dcn201_funcs = {
|
||||
.set_backlight_level = dce110_set_backlight_level,
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_pipe = dce110_set_pipe,
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dce110_disable_link_output,
|
||||
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
};
|
||||
|
@ -99,6 +99,10 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
|
||||
#ifndef TRIM_FSFT
|
||||
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
|
||||
#endif
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dce110_disable_link_output,
|
||||
.is_abm_supported = dcn21_is_abm_supported,
|
||||
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
|
@ -939,13 +939,32 @@ bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, s
|
||||
|
||||
void dcn30_hardware_release(struct dc *dc)
|
||||
{
|
||||
dc_dmub_srv_p_state_delegate(dc, false, NULL);
|
||||
bool subvp_in_use = false;
|
||||
uint32_t i;
|
||||
|
||||
dc_dmub_srv_p_state_delegate(dc, false, NULL);
|
||||
dc_dmub_setup_subvp_dmub_command(dc, dc->current_state, false);
|
||||
|
||||
/* SubVP treated the same way as FPO. If driver disable and
|
||||
* we are using a SubVP config, disable and force on DCN side
|
||||
* to prevent P-State hang on driver enable.
|
||||
*/
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->stream)
|
||||
continue;
|
||||
|
||||
if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
|
||||
subvp_in_use = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* If pstate unsupported, or still supported
|
||||
* by firmware, force it supported by dcn
|
||||
*/
|
||||
if (dc->current_state)
|
||||
if ((!dc->clk_mgr->clks.p_state_change_support ||
|
||||
if ((!dc->clk_mgr->clks.p_state_change_support || subvp_in_use ||
|
||||
dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) &&
|
||||
dc->res_pool->hubbub->funcs->force_pstate_change_control)
|
||||
dc->res_pool->hubbub->funcs->force_pstate_change_control(
|
||||
|
@ -100,6 +100,10 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.hardware_release = dcn30_hardware_release,
|
||||
.set_pipe = dcn21_set_pipe,
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dce110_disable_link_output,
|
||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
|
@ -99,6 +99,10 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
|
||||
.set_backlight_level = dcn21_set_backlight_level,
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.set_pipe = dcn21_set_pipe,
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dce110_disable_link_output,
|
||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.optimize_pwr_state = dcn21_optimize_pwr_state,
|
||||
|
@ -634,7 +634,7 @@ static const struct dcn20_vmid_mask vmid_masks = {
|
||||
DCN20_VMID_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
static const struct resource_caps res_cap_dcn301 = {
|
||||
static struct resource_caps res_cap_dcn301 = {
|
||||
.num_timing_generator = 4,
|
||||
.num_opp = 4,
|
||||
.num_video_plane = 4,
|
||||
@ -1429,6 +1429,8 @@ static bool dcn301_resource_construct(
|
||||
|
||||
ctx->dc_bios->regs = &bios_regs;
|
||||
|
||||
if (dc->ctx->asic_id.chip_id == DEVICE_ID_VGH_1435)
|
||||
res_cap_dcn301.num_pll = 2;
|
||||
pool->base.res_cap = &res_cap_dcn301;
|
||||
|
||||
pool->base.funcs = &dcn301_res_pool_funcs;
|
||||
|
@ -876,7 +876,7 @@ static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
|
||||
int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
|
||||
struct dcn_hubbub_phys_addr_config *pa_config)
|
||||
{
|
||||
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
|
||||
|
@ -122,6 +122,8 @@
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
|
||||
|
||||
int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
|
||||
struct dcn_hubbub_phys_addr_config *pa_config);
|
||||
|
||||
void hubbub31_construct(struct dcn20_hubbub *hubbub3,
|
||||
struct dc_context *ctx,
|
||||
|
@ -535,11 +535,11 @@ static void dcn31_reset_back_end_for_pipe(
|
||||
pipe_ctx->stream_res.tg,
|
||||
OPTC_DSC_DISABLED, 0, 0);
|
||||
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
|
||||
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
|
||||
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
|
||||
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
|
||||
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_drr)
|
||||
pipe_ctx->stream_res.tg->funcs->set_drr(
|
||||
@ -553,12 +553,9 @@ static void dcn31_reset_back_end_for_pipe(
|
||||
* screen only, the dpms_off would be true but
|
||||
* VBIOS lit up eDP, so check link status too.
|
||||
*/
|
||||
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) {
|
||||
if (dc->hwss.update_phy_state)
|
||||
dc->hwss.update_phy_state(dc->current_state, pipe_ctx, TX_OFF_SYMCLK_OFF);
|
||||
else
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
} else if (pipe_ctx->stream_res.audio)
|
||||
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
else if (pipe_ctx->stream_res.audio)
|
||||
dc->hwss.disable_audio_stream(pipe_ctx);
|
||||
|
||||
/* free acquired resources */
|
||||
|
@ -100,6 +100,10 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
|
||||
.set_backlight_level = dcn21_set_backlight_level,
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.set_pipe = dcn21_set_pipe,
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dce110_disable_link_output,
|
||||
.z10_restore = dcn31_z10_restore,
|
||||
.z10_save_init = dcn31_z10_save_init,
|
||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||
|
@ -890,7 +890,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_z10 = true,
|
||||
.optimize_edp_link_rate = true,
|
||||
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
|
||||
.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
|
||||
.dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
|
@ -63,34 +63,28 @@
|
||||
DCCG_SRII(PHASE, DTBCLK_DTO, 3),\
|
||||
SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
|
||||
SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
|
||||
SR(DCCG_AUDIO_DTO_SOURCE),\
|
||||
SR(DENTIST_DISPCLK_CNTL),\
|
||||
SR(DSCCLK0_DTO_PARAM),\
|
||||
SR(DSCCLK1_DTO_PARAM),\
|
||||
SR(DSCCLK2_DTO_PARAM),\
|
||||
SR(DSCCLK_DTO_CTRL),\
|
||||
SR(DCCG_GATE_DISABLE_CNTL2),\
|
||||
SR(DCCG_GATE_DISABLE_CNTL3),\
|
||||
SR(HDMISTREAMCLK0_DTO_PARAM),\
|
||||
SR(OTG_PIXEL_RATE_DIV),\
|
||||
SR(DTBCLK_P_CNTL),\
|
||||
SR(DCCG_AUDIO_DTO_SOURCE)
|
||||
|
||||
|
||||
#define DCCG_MASK_SH_LIST_DCN314(mask_sh) \
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
|
||||
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 2, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
|
||||
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
|
||||
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
|
||||
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
|
||||
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, mask_sh),\
|
||||
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, mask_sh),\
|
||||
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, mask_sh),\
|
||||
@ -100,7 +94,6 @@
|
||||
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK3_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_EN, mask_sh),\
|
||||
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
|
||||
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\
|
||||
@ -148,7 +141,48 @@
|
||||
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
|
||||
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
|
||||
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh)
|
||||
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
|
||||
DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh),\
|
||||
DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
|
||||
DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
|
||||
DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
|
||||
DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
|
||||
DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
|
||||
DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
|
||||
DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh)
|
||||
|
||||
#define DCCG_MASK_SH_LIST_DCN314(mask_sh) \
|
||||
DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
|
||||
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, mask_sh),\
|
||||
DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_SRC_SEL, mask_sh),\
|
||||
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
|
||||
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
|
||||
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
|
||||
DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\
|
||||
DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh)
|
||||
|
||||
struct dccg *dccg314_create(
|
||||
struct dc_context *ctx,
|
||||
|
@ -343,12 +343,14 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
|
||||
{
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
unsigned int odm_combine_factor = 0;
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
bool two_pix_per_container = false;
|
||||
|
||||
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
|
||||
return odm_combine_factor;
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
*k2_div = PIXEL_RATE_DIV_BY_1;
|
||||
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
|
||||
@ -364,7 +366,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
|
||||
} else {
|
||||
*k1_div = PIXEL_RATE_DIV_BY_1;
|
||||
*k2_div = PIXEL_RATE_DIV_BY_4;
|
||||
if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
|
||||
if (odm_combine_factor == 2)
|
||||
*k2_div = PIXEL_RATE_DIV_BY_2;
|
||||
}
|
||||
}
|
||||
@ -384,21 +386,10 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
|
||||
return;
|
||||
|
||||
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
|
||||
if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
|
||||
|| dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
|
||||
if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1)
|
||||
pix_per_cycle = 2;
|
||||
|
||||
if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
|
||||
pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
|
||||
pix_per_cycle);
|
||||
}
|
||||
|
||||
bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc *dc = pipe_ctx->stream->ctx->dc;
|
||||
|
||||
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
|
||||
dc->debug.enable_dp_dig_pixel_rate_div_policy)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
@ -41,6 +41,4 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
|
||||
|
||||
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
#endif /* __DC_HWSS_DCN314_H__ */
|
||||
|
@ -102,6 +102,10 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
|
||||
.set_backlight_level = dcn21_set_backlight_level,
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.set_pipe = dcn21_set_pipe,
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dce110_disable_link_output,
|
||||
.z10_restore = dcn31_z10_restore,
|
||||
.z10_save_init = dcn31_z10_save_init,
|
||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||
@ -146,7 +150,6 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
|
||||
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
|
||||
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
|
||||
.set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
|
||||
.is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
|
||||
};
|
||||
|
||||
void dcn314_hw_sequencer_construct(struct dc *dc)
|
||||
|
@ -87,6 +87,9 @@
|
||||
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
|
||||
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
|
||||
|
||||
#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
|
||||
#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
|
||||
|
||||
#include "reg_helper.h"
|
||||
#include "dce/dmub_abm.h"
|
||||
#include "dce/dmub_psr.h"
|
||||
@ -579,7 +582,7 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
|
||||
|
||||
#define dsc_regsDCN314(id)\
|
||||
[id] = {\
|
||||
DSC_REG_LIST_DCN314(id)\
|
||||
DSC_REG_LIST_DCN20(id)\
|
||||
}
|
||||
|
||||
static const struct dcn20_dsc_registers dsc_regs[] = {
|
||||
@ -590,11 +593,11 @@ static const struct dcn20_dsc_registers dsc_regs[] = {
|
||||
};
|
||||
|
||||
static const struct dcn20_dsc_shift dsc_shift = {
|
||||
DSC_REG_LIST_SH_MASK_DCN314(__SHIFT)
|
||||
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
|
||||
};
|
||||
|
||||
static const struct dcn20_dsc_mask dsc_mask = {
|
||||
DSC_REG_LIST_SH_MASK_DCN314(_MASK)
|
||||
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
|
||||
};
|
||||
|
||||
static const struct dcn30_mpc_registers mpc_regs = {
|
||||
@ -844,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = {
|
||||
.num_ddc = 5,
|
||||
.num_vmid = 16,
|
||||
.num_mpc_3dlut = 2,
|
||||
.num_dsc = 4,
|
||||
.num_dsc = 3,
|
||||
};
|
||||
|
||||
static const struct dc_plane_cap plane_cap = {
|
||||
|
@ -68,7 +68,7 @@ static void dcn32_init_crb(struct hubbub *hubbub)
|
||||
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
|
||||
}
|
||||
|
||||
static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
|
||||
void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
|
||||
{
|
||||
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
|
||||
|
||||
@ -140,7 +140,7 @@ static uint32_t convert_and_clamp(
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static bool hubbub32_program_urgent_watermarks(
|
||||
bool hubbub32_program_urgent_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
@ -330,7 +330,7 @@ static bool hubbub32_program_urgent_watermarks(
|
||||
return wm_pending;
|
||||
}
|
||||
|
||||
static bool hubbub32_program_stutter_watermarks(
|
||||
bool hubbub32_program_stutter_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
@ -476,7 +476,7 @@ static bool hubbub32_program_stutter_watermarks(
|
||||
}
|
||||
|
||||
|
||||
static bool hubbub32_program_pstate_watermarks(
|
||||
bool hubbub32_program_pstate_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
@ -629,7 +629,7 @@ static bool hubbub32_program_pstate_watermarks(
|
||||
}
|
||||
|
||||
|
||||
static bool hubbub32_program_usr_watermarks(
|
||||
bool hubbub32_program_usr_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
@ -709,7 +709,7 @@ static bool hubbub32_program_usr_watermarks(
|
||||
return wm_pending;
|
||||
}
|
||||
|
||||
static void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow)
|
||||
void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow)
|
||||
{
|
||||
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
|
||||
|
||||
@ -909,7 +909,7 @@ static void hubbub32_wm_read_state(struct hubbub *hubbub,
|
||||
DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change);
|
||||
}
|
||||
|
||||
static void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub)
|
||||
void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub)
|
||||
{
|
||||
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
|
||||
uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
|
||||
|
@ -161,6 +161,35 @@
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
|
||||
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
|
||||
|
||||
bool hubbub32_program_urgent_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
|
||||
bool hubbub32_program_stutter_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
|
||||
bool hubbub32_program_pstate_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
|
||||
bool hubbub32_program_usr_watermarks(
|
||||
struct hubbub *hubbub,
|
||||
struct dcn_watermark_set *watermarks,
|
||||
unsigned int refclk_mhz,
|
||||
bool safe_to_lower);
|
||||
|
||||
void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow);
|
||||
|
||||
void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub);
|
||||
|
||||
void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte);
|
||||
|
||||
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
|
||||
struct dc_context *ctx,
|
||||
|
@ -304,7 +304,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
|
||||
* using the max for calculation
|
||||
*/
|
||||
if (hubp->curs_attr.width > 0) {
|
||||
cursor_size = hubp->curs_attr.width * hubp->curs_attr.height;
|
||||
// Round cursor width to next multiple of 64
|
||||
cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -325,7 +326,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
|
||||
break;
|
||||
}
|
||||
|
||||
if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
|
||||
if (stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor &&
|
||||
cursor_size > 16384) {
|
||||
cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size,
|
||||
plane->address.grph.cursor_cache_addr.quad_part);
|
||||
}
|
||||
@ -345,8 +347,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
|
||||
plane = ctx->stream_status[i].plane_states[j];
|
||||
|
||||
if (stream->cursor_position.enable && plane &&
|
||||
!plane->address.grph.cursor_cache_addr.quad_part &&
|
||||
cursor_size > 16384) {
|
||||
dc->debug.alloc_extra_way_for_cursor &&
|
||||
cursor_size > 16384) {
|
||||
/* Cursor caching is not supported since it won't be on the same line.
|
||||
* So we need an extra line to accommodate it. With large cursors and a single 4k monitor
|
||||
* this case triggers corruption. If we're at the edge, then dont trigger display refresh
|
||||
@ -451,7 +453,6 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
|
||||
*/
|
||||
void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
/*
|
||||
int i;
|
||||
bool enable_subvp = false;
|
||||
|
||||
@ -469,7 +470,6 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
|
||||
}
|
||||
}
|
||||
dc_dmub_setup_subvp_dmub_command(dc, context, enable_subvp);
|
||||
*/
|
||||
}
|
||||
|
||||
/* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and:
|
||||
@ -883,6 +883,7 @@ void dcn32_init_hw(struct dc *dc)
|
||||
if (link->link_enc->funcs->is_dig_enabled &&
|
||||
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
|
||||
link->link_status.link_active = true;
|
||||
link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
|
||||
if (link->link_enc->funcs->fec_is_active &&
|
||||
link->link_enc->funcs->fec_is_active(link->link_enc))
|
||||
link->fec_state = dc_link_fec_enabled;
|
||||
@ -1181,6 +1182,9 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
|
||||
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
|
||||
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
|
||||
return odm_combine_factor;
|
||||
|
||||
if (is_dp_128b_132b_signal(pipe_ctx)) {
|
||||
*k2_div = PIXEL_RATE_DIV_BY_1;
|
||||
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
|
||||
@ -1275,31 +1279,69 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
|
||||
return false;
|
||||
}
|
||||
|
||||
void dcn32_update_phy_state(struct dc_state *state, struct pipe_ctx *pipe_ctx,
|
||||
enum phy_state target_state)
|
||||
static void apply_symclk_on_tx_off_wa(struct dc_link *link)
|
||||
{
|
||||
enum phy_state current_state = pipe_ctx->stream->link->phy_state;
|
||||
/* There are use cases where SYMCLK is referenced by OTG. For instance
|
||||
* for TMDS signal, OTG relies SYMCLK even if TX video output is off.
|
||||
* However current link interface will power off PHY when disabling link
|
||||
* output. This will turn off SYMCLK generated by PHY. The workaround is
|
||||
* to identify such case where SYMCLK is still in use by OTG when we
|
||||
* power off PHY. When this is detected, we will temporarily power PHY
|
||||
* back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
|
||||
* program_pix_clk interface. When OTG is disabled, we will then power
|
||||
* off PHY by calling disable link output again.
|
||||
*
|
||||
* In future dcn generations, we plan to rework transmitter control
|
||||
* interface so that we could have an option to set SYMCLK ON TX OFF
|
||||
* state in one step without this workaround
|
||||
*/
|
||||
|
||||
if (target_state == TX_OFF_SYMCLK_OFF) {
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
pipe_ctx->stream->link->phy_state = TX_OFF_SYMCLK_OFF;
|
||||
} else if (target_state == TX_ON_SYMCLK_ON) {
|
||||
core_link_enable_stream(state, pipe_ctx);
|
||||
pipe_ctx->stream->link->phy_state = TX_ON_SYMCLK_ON;
|
||||
} else if (target_state == TX_OFF_SYMCLK_ON) {
|
||||
if (current_state == TX_ON_SYMCLK_ON) {
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
pipe_ctx->stream->link->phy_state = TX_OFF_SYMCLK_OFF;
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct pipe_ctx *pipe_ctx = NULL;
|
||||
uint8_t i;
|
||||
|
||||
if (link->phy_state.symclk_ref_cnts.otg > 0) {
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
|
||||
pipe_ctx->clock_source->funcs->program_pix_clk(
|
||||
pipe_ctx->clock_source,
|
||||
&pipe_ctx->stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
&pipe_ctx->pll_settings);
|
||||
link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pipe_ctx->clock_source->funcs->program_pix_clk(
|
||||
pipe_ctx->clock_source,
|
||||
&pipe_ctx->stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
&pipe_ctx->pll_settings);
|
||||
pipe_ctx->stream->link->phy_state = TX_OFF_SYMCLK_ON;
|
||||
} else
|
||||
BREAK_TO_DEBUGGER();
|
||||
void dcn32_disable_link_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
|
||||
if (signal == SIGNAL_TYPE_EDP &&
|
||||
link->dc->hwss.edp_backlight_control)
|
||||
link->dc->hwss.edp_backlight_control(link, false);
|
||||
else if (dmcu != NULL && dmcu->funcs->lock_phy)
|
||||
dmcu->funcs->lock_phy(dmcu);
|
||||
|
||||
link_hwss->disable_link_output(link, link_res, signal);
|
||||
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
|
||||
|
||||
if (signal == SIGNAL_TYPE_EDP &&
|
||||
link->dc->hwss.edp_backlight_control)
|
||||
link->dc->hwss.edp_power_control(link, false);
|
||||
else if (dmcu != NULL && dmcu->funcs->lock_phy)
|
||||
dmcu->funcs->unlock_phy(dmcu);
|
||||
|
||||
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
|
||||
|
||||
apply_symclk_on_tx_off_wa(link);
|
||||
}
|
||||
|
||||
/* For SubVP the main pipe can have a viewport position change
|
||||
@ -1310,7 +1352,7 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
struct pipe_ctx *phantom_pipe)
|
||||
{
|
||||
uint8_t i;
|
||||
uint32_t i;
|
||||
struct dc_plane_state *phantom_plane = phantom_pipe->plane_state;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
|
@ -84,8 +84,9 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
|
||||
|
||||
bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
void dcn32_update_phy_state(struct dc_state *state, struct pipe_ctx *pipe_ctx,
|
||||
enum phy_state target_state);
|
||||
void dcn32_disable_link_output(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal);
|
||||
|
||||
void dcn32_update_phantom_vp_position(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
|
@ -99,12 +99,15 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
|
||||
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
|
||||
.hardware_release = dcn30_hardware_release,
|
||||
.set_pipe = dcn21_set_pipe,
|
||||
.enable_lvds_link_output = dce110_enable_lvds_link_output,
|
||||
.enable_tmds_link_output = dce110_enable_tmds_link_output,
|
||||
.enable_dp_link_output = dce110_enable_dp_link_output,
|
||||
.disable_link_output = dcn32_disable_link_output,
|
||||
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.commit_subvp_config = dcn32_commit_subvp_config,
|
||||
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
|
||||
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
|
||||
.update_phy_state = dcn32_update_phy_state,
|
||||
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
|
||||
};
|
||||
|
||||
|
@ -90,29 +90,6 @@
|
||||
#include "dcn20/dcn20_vmid.h"
|
||||
#include "dml/dcn32/dcn32_fpu.h"
|
||||
|
||||
#define DCN_BASE__INST0_SEG1 0x000000C0
|
||||
#define DCN_BASE__INST0_SEG2 0x000034C0
|
||||
#define DCN_BASE__INST0_SEG3 0x00009000
|
||||
#define NBIO_BASE__INST0_SEG1 0x00000014
|
||||
|
||||
#define MAX_INSTANCE 6
|
||||
#define MAX_SEGMENT 6
|
||||
|
||||
struct IP_BASE_INSTANCE {
|
||||
unsigned int segment[MAX_SEGMENT];
|
||||
};
|
||||
|
||||
struct IP_BASE {
|
||||
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
|
||||
};
|
||||
|
||||
static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } } } };
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
|
||||
enum dcn32_clk_src_array_id {
|
||||
@ -743,6 +720,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.enable_single_display_2to1_odm_policy = true,
|
||||
.enable_dp_dig_pixel_rate_div_policy = 1,
|
||||
.allow_sw_cursor_fallback = false,
|
||||
.alloc_extra_way_for_cursor = true,
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
@ -919,10 +897,10 @@ static struct hubp *dcn32_hubp_create(
|
||||
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT hubp_regs
|
||||
hubp_regs_init(0),
|
||||
hubp_regs_init(1),
|
||||
hubp_regs_init(2),
|
||||
hubp_regs_init(3);
|
||||
hubp_regs_init(0),
|
||||
hubp_regs_init(1),
|
||||
hubp_regs_init(2),
|
||||
hubp_regs_init(3);
|
||||
|
||||
if (hubp32_construct(hubp2, ctx, inst,
|
||||
&hubp_regs[inst], &hubp_shift, &hubp_mask))
|
||||
@ -1859,12 +1837,6 @@ validate_out:
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
static bool is_dual_plane(enum surface_pixel_format format)
|
||||
{
|
||||
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
|
||||
}
|
||||
|
||||
int dcn32_populate_dml_pipes_from_context(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
@ -1873,12 +1845,36 @@ int dcn32_populate_dml_pipes_from_context(
|
||||
int i, pipe_cnt;
|
||||
struct resource_context *res_ctx = &context->res_ctx;
|
||||
struct pipe_ctx *pipe;
|
||||
bool subvp_in_use = false, is_pipe_split_expected[MAX_PIPES];
|
||||
bool subvp_in_use = false;
|
||||
int plane_count = 0;
|
||||
struct dc_crtc_timing *timing;
|
||||
|
||||
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
|
||||
|
||||
/* Determine whether we will apply ODM 2to1 policy:
|
||||
* Applies to single display and where the number of planes is less than 3.
|
||||
* For 3 plane case ( 2 MPO planes ), we will not set the policy for the MPO pipes.
|
||||
*
|
||||
* Apply pipe split policy first so we can predict the pipe split correctly
|
||||
* (dcn32_predict_pipe_split).
|
||||
*/
|
||||
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!res_ctx->pipe_ctx[i].stream)
|
||||
continue;
|
||||
pipe = &res_ctx->pipe_ctx[i];
|
||||
timing = &pipe->stream->timing;
|
||||
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
|
||||
if (context->stream_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
|
||||
is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream)) {
|
||||
if (dc->debug.enable_single_display_2to1_odm_policy) {
|
||||
if (!((plane_count > 2) && pipe->top_pipe))
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
|
||||
}
|
||||
}
|
||||
pipe_cnt++;
|
||||
}
|
||||
|
||||
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
|
||||
if (!res_ctx->pipe_ctx[i].stream)
|
||||
@ -1937,31 +1933,6 @@ int dcn32_populate_dml_pipes_from_context(
|
||||
if (pipe->stream && !pipe->prev_odm_pipe &&
|
||||
(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
|
||||
++plane_count;
|
||||
|
||||
DC_FP_START();
|
||||
is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, pipes[i].pipe, i);
|
||||
DC_FP_END();
|
||||
|
||||
pipe_cnt++;
|
||||
}
|
||||
|
||||
/* Determine whether we will apply ODM 2to1 policy
|
||||
* Applies to single display and where the number of planes is less than 3
|
||||
* For 3 plane case ( 2 MPO planes ), we will not set the policy for the MPO pipes
|
||||
*/
|
||||
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!res_ctx->pipe_ctx[i].stream)
|
||||
continue;
|
||||
pipe = &res_ctx->pipe_ctx[i];
|
||||
timing = &pipe->stream->timing;
|
||||
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
|
||||
if (context->stream_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal)) {
|
||||
if (dc->debug.enable_single_display_2to1_odm_policy) {
|
||||
if (!((plane_count > 2) && pipe->top_pipe))
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
|
||||
}
|
||||
}
|
||||
pipe_cnt++;
|
||||
}
|
||||
|
||||
@ -1969,19 +1940,7 @@ int dcn32_populate_dml_pipes_from_context(
|
||||
* the DET available for each pipe). Use the DET override input to maintain our driver
|
||||
* policy.
|
||||
*/
|
||||
if (pipe_cnt == 1 && !is_pipe_split_expected[0]) {
|
||||
pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
|
||||
if (pipe->plane_state && !dc->debug.disable_z9_mpc) {
|
||||
if (!is_dual_plane(pipe->plane_state->format)) {
|
||||
pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
|
||||
pipes[0].pipe.src.unbounded_req_mode = true;
|
||||
if (pipe->plane_state->src_rect.width >= 5120 &&
|
||||
pipe->plane_state->src_rect.height >= 2880)
|
||||
pipes[0].pipe.src.det_size_override = 320; // 5K or higher
|
||||
}
|
||||
}
|
||||
} else
|
||||
dcn32_determine_det_override(context, pipes, is_pipe_split_expected, dc->res_pool->pipe_count);
|
||||
dcn32_set_det_allocations(dc, context, pipes);
|
||||
|
||||
// In general cases we want to keep the dram clock change requirement
|
||||
// (prefer configs that support MCLK switch). Only override to false
|
||||
|
@ -28,6 +28,10 @@
|
||||
|
||||
#include "core_types.h"
|
||||
|
||||
#define DCN3_2_DEFAULT_DET_SIZE 256
|
||||
#define DCN3_2_MAX_DET_SIZE 1152
|
||||
#define DCN3_2_MIN_DET_SIZE 128
|
||||
#define DCN3_2_MIN_COMPBUF_SIZE_KB 128
|
||||
#define DCN3_2_DET_SEG_SIZE 64
|
||||
#define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024
|
||||
#define DCN3_2_MBLK_WIDTH 128
|
||||
@ -109,9 +113,12 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
|
||||
struct dc_stream_state *stream,
|
||||
struct pipe_ctx *head_pipe);
|
||||
|
||||
void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_params_st *pipes,
|
||||
bool *is_pipe_split_expected, int pipe_cnt);
|
||||
void dcn32_determine_det_override(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes);
|
||||
|
||||
void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes);
|
||||
/* definitions for run time init of reg offsets */
|
||||
|
||||
/* CLK SRC */
|
||||
|
@ -28,6 +28,11 @@
|
||||
#include "dcn20/dcn20_resource.h"
|
||||
#include "dml/dcn32/display_mode_vba_util_32.h"
|
||||
|
||||
static bool is_dual_plane(enum surface_pixel_format format)
|
||||
{
|
||||
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
|
||||
}
|
||||
|
||||
/**
|
||||
* ********************************************************************************************
|
||||
* dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
|
||||
@ -66,8 +71,11 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
// Find the phantom pipes
|
||||
if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
|
||||
/* Find the phantom pipes.
|
||||
* - For pipe split case we need to loop through the bottom and next ODM
|
||||
* pipes or only half the viewport size is counted
|
||||
*/
|
||||
if (pipe->stream && pipe->plane_state &&
|
||||
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
struct pipe_ctx *main_pipe = NULL;
|
||||
|
||||
@ -118,9 +126,9 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
|
||||
// (MALL is 64-byte aligned)
|
||||
cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
|
||||
|
||||
// For DCC we must cache the meat surface, so double cache lines required
|
||||
/* For DCC divide by 256 */
|
||||
if (pipe->plane_state->dcc.enable)
|
||||
cache_lines_per_plane *= 2;
|
||||
cache_lines_per_plane = cache_lines_per_plane + (cache_lines_per_plane / 256) + 1;
|
||||
cache_lines_used += cache_lines_per_plane;
|
||||
}
|
||||
}
|
||||
@ -225,36 +233,133 @@ bool dcn32_mpo_in_use(struct dc_state *context)
|
||||
return false;
|
||||
}
|
||||
|
||||
void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_params_st *pipes,
|
||||
bool *is_pipe_split_expected, int pipe_cnt)
|
||||
/**
|
||||
* *******************************************************************************************
|
||||
* dcn32_determine_det_override: Determine DET allocation for each pipe
|
||||
*
|
||||
* This function determines how much DET to allocate for each pipe. The total number of
|
||||
* DET segments will be split equally among each of the streams, and after that the DET
|
||||
* segments per stream will be split equally among the planes for the given stream.
|
||||
*
|
||||
* If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
|
||||
* number of DET for that given plane will be split among the pipes driving that plane.
|
||||
*
|
||||
*
|
||||
* High level algorithm:
|
||||
* 1. Split total DET among number of streams
|
||||
* 2. For each stream, split DET among the planes
|
||||
* 3. For each plane, check if there is a pipe split. If yes, split the DET allocation
|
||||
* among those pipes.
|
||||
* 4. Assign the DET override to the DML pipes.
|
||||
*
|
||||
* @param [in]: dc: Current DC state
|
||||
* @param [in]: context: New DC state to be programmed
|
||||
* @param [in]: pipes: Array of DML pipes
|
||||
*
|
||||
* @return: void
|
||||
*
|
||||
* *******************************************************************************************
|
||||
*/
|
||||
void dcn32_determine_det_override(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes)
|
||||
{
|
||||
int i, j, count, stream_segments, pipe_segments[MAX_PIPES];
|
||||
uint32_t i, j, k;
|
||||
uint8_t pipe_plane_count, stream_segments, plane_segments, pipe_segments[MAX_PIPES] = {0};
|
||||
uint8_t pipe_counted[MAX_PIPES] = {0};
|
||||
uint8_t pipe_cnt = 0;
|
||||
struct dc_plane_state *current_plane = NULL;
|
||||
uint8_t stream_count = 0;
|
||||
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
/* Don't count SubVP streams for DET allocation */
|
||||
if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
|
||||
stream_count++;
|
||||
}
|
||||
}
|
||||
|
||||
if (context->stream_count > 0) {
|
||||
stream_segments = 18 / context->stream_count;
|
||||
stream_segments = 18 / stream_count;
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
count = 0;
|
||||
for (j = 0; j < pipe_cnt; j++) {
|
||||
if (context->res_ctx.pipe_ctx[j].stream == context->streams[i]) {
|
||||
count++;
|
||||
if (is_pipe_split_expected[j])
|
||||
count++;
|
||||
if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
|
||||
continue;
|
||||
if (context->stream_status[i].plane_count > 0)
|
||||
plane_segments = stream_segments / context->stream_status[i].plane_count;
|
||||
else
|
||||
plane_segments = stream_segments;
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
pipe_plane_count = 0;
|
||||
if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] &&
|
||||
pipe_counted[j] != 1) {
|
||||
/* Note: pipe_plane_count indicates the number of pipes to be used for a
|
||||
* given plane. e.g. pipe_plane_count = 1 means single pipe (i.e. not split),
|
||||
* pipe_plane_count = 2 means 2:1 split, etc.
|
||||
*/
|
||||
pipe_plane_count++;
|
||||
pipe_counted[j] = 1;
|
||||
current_plane = context->res_ctx.pipe_ctx[j].plane_state;
|
||||
for (k = 0; k < dc->res_pool->pipe_count; k++) {
|
||||
if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
|
||||
context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
|
||||
pipe_plane_count++;
|
||||
pipe_counted[k] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
pipe_segments[j] = plane_segments / pipe_plane_count;
|
||||
for (k = 0; k < dc->res_pool->pipe_count; k++) {
|
||||
if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
|
||||
context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
|
||||
pipe_segments[k] = plane_segments / pipe_plane_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pipe_segments[i] = stream_segments / count;
|
||||
}
|
||||
|
||||
for (i = 0; i < pipe_cnt; i++) {
|
||||
pipes[i].pipe.src.det_size_override = 0;
|
||||
for (j = 0; j < context->stream_count; j++) {
|
||||
if (context->res_ctx.pipe_ctx[i].stream == context->streams[j]) {
|
||||
pipes[i].pipe.src.det_size_override = pipe_segments[j] * DCN3_2_DET_SEG_SIZE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
pipes[pipe_cnt].pipe.src.det_size_override = pipe_segments[i] * DCN3_2_DET_SEG_SIZE;
|
||||
pipe_cnt++;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < pipe_cnt; i++)
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||
pipes[i].pipe.src.det_size_override = 4 * DCN3_2_DET_SEG_SIZE; //DCN3_2_DEFAULT_DET_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes)
|
||||
{
|
||||
int i, pipe_cnt;
|
||||
struct resource_context *res_ctx = &context->res_ctx;
|
||||
struct pipe_ctx *pipe;
|
||||
|
||||
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
|
||||
if (!res_ctx->pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
pipe = &res_ctx->pipe_ctx[i];
|
||||
pipe_cnt++;
|
||||
}
|
||||
|
||||
/* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
|
||||
* the DET available for each pipe). Use the DET override input to maintain our driver
|
||||
* policy.
|
||||
*/
|
||||
if (pipe_cnt == 1) {
|
||||
pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
|
||||
if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
|
||||
if (!is_dual_plane(pipe->plane_state->format)) {
|
||||
pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
|
||||
pipes[0].pipe.src.unbounded_req_mode = true;
|
||||
if (pipe->plane_state->src_rect.width >= 5120 &&
|
||||
pipe->plane_state->src_rect.height >= 2880)
|
||||
pipes[0].pipe.src.det_size_override = 320; // 5K or higher
|
||||
}
|
||||
}
|
||||
} else
|
||||
dcn32_determine_det_override(dc, context, pipes);
|
||||
}
|
||||
|
@ -93,31 +93,6 @@
|
||||
#include "vm_helper.h"
|
||||
#include "dcn20/dcn20_vmid.h"
|
||||
|
||||
#define DCN_BASE__INST0_SEG1 0x000000C0
|
||||
#define DCN_BASE__INST0_SEG2 0x000034C0
|
||||
#define DCN_BASE__INST0_SEG3 0x00009000
|
||||
#define NBIO_BASE__INST0_SEG1 0x00000014
|
||||
|
||||
#define MAX_INSTANCE 8
|
||||
#define MAX_SEGMENT 6
|
||||
|
||||
struct IP_BASE_INSTANCE {
|
||||
unsigned int segment[MAX_SEGMENT];
|
||||
};
|
||||
|
||||
struct IP_BASE {
|
||||
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
|
||||
};
|
||||
|
||||
static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0, 0, 0, 0, 0, 0 } } } };
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
#define fixed16_to_double(x) (((double)x) / ((double) (1 << 16)))
|
||||
#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
|
||||
@ -745,6 +720,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.enable_single_display_2to1_odm_policy = true,
|
||||
.enable_dp_dig_pixel_rate_div_policy = 1,
|
||||
.allow_sw_cursor_fallback = false,
|
||||
.alloc_extra_way_for_cursor = true,
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
@ -820,11 +796,11 @@ static struct dce_i2c_hw *dcn321_i2c_hw_create(
|
||||
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT i2c_hw_regs
|
||||
i2c_inst_regs_init(1),
|
||||
i2c_inst_regs_init(2),
|
||||
i2c_inst_regs_init(3),
|
||||
i2c_inst_regs_init(4),
|
||||
i2c_inst_regs_init(5);
|
||||
i2c_inst_regs_init(1),
|
||||
i2c_inst_regs_init(2),
|
||||
i2c_inst_regs_init(3),
|
||||
i2c_inst_regs_init(4),
|
||||
i2c_inst_regs_init(5);
|
||||
|
||||
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
|
||||
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
|
||||
@ -922,10 +898,10 @@ static struct hubp *dcn321_hubp_create(
|
||||
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT hubp_regs
|
||||
hubp_regs_init(0),
|
||||
hubp_regs_init(1),
|
||||
hubp_regs_init(2),
|
||||
hubp_regs_init(3);
|
||||
hubp_regs_init(0),
|
||||
hubp_regs_init(1),
|
||||
hubp_regs_init(2),
|
||||
hubp_regs_init(3);
|
||||
|
||||
if (hubp32_construct(hubp2, ctx, inst,
|
||||
&hubp_regs[inst], &hubp_shift, &hubp_mask))
|
||||
@ -1670,10 +1646,10 @@ static bool dcn321_resource_construct(
|
||||
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT abm_regs
|
||||
abm_regs_init(0),
|
||||
abm_regs_init(1),
|
||||
abm_regs_init(2),
|
||||
abm_regs_init(3);
|
||||
abm_regs_init(0),
|
||||
abm_regs_init(1),
|
||||
abm_regs_init(2),
|
||||
abm_regs_init(3);
|
||||
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT dccg_regs
|
||||
|
@ -173,7 +173,8 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
|
||||
(result == 0x0)
|
||||
void dm_helpers_init_panel_settings(
|
||||
struct dc_context *ctx,
|
||||
struct dc_panel_config *config);
|
||||
struct dc_panel_config *config,
|
||||
struct dc_sink *sink);
|
||||
void dm_helpers_override_panel_settings(
|
||||
struct dc_context *ctx,
|
||||
struct dc_panel_config *config);
|
||||
|
@ -2636,7 +2636,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
|
||||
&mode_lib->vba.SrcActiveDrainRate,
|
||||
&mode_lib->vba.TInitXFill,
|
||||
&mode_lib->vba.TslvChk);
|
||||
locals->XFCRemoteSurfaceFlipLatency[k] =
|
||||
locals->XFCRemoteSurfaceFlipLatency[k] =
|
||||
dml_floor(
|
||||
mode_lib->vba.XFCRemoteSurfaceFlipDelay
|
||||
/ (mode_lib->vba.HTotal[k]
|
||||
|
@ -379,6 +379,11 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
||||
dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
|
||||
if ((int)(dcn3_01_soc.dram_clock_change_latency_us * 1000)
|
||||
!= dc->debug.dram_clock_change_latency_ns
|
||||
&& dc->debug.dram_clock_change_latency_ns) {
|
||||
dcn3_01_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000.0;
|
||||
}
|
||||
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
|
||||
}
|
||||
|
||||
|
@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
|
||||
.do_urgent_latency_adjustment = false,
|
||||
.urgent_latency_adjustment_fabric_clock_component_us = 0,
|
||||
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
|
||||
.num_chans = 4,
|
||||
};
|
||||
|
||||
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
|
||||
@ -667,6 +668,12 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
||||
dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
|
||||
|
||||
if ((int)(dcn3_1_soc.dram_clock_change_latency_us * 1000)
|
||||
!= dc->debug.dram_clock_change_latency_ns
|
||||
&& dc->debug.dram_clock_change_latency_ns) {
|
||||
dcn3_1_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
|
||||
}
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
|
||||
dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
|
||||
else
|
||||
@ -682,7 +689,11 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
||||
|
||||
dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
|
||||
dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
|
||||
dcn3_15_soc.num_chans = bw_params->num_channels;
|
||||
|
||||
if (bw_params->num_channels > 0)
|
||||
dcn3_15_soc.num_chans = bw_params->num_channels;
|
||||
if (bw_params->dram_channel_width_bytes > 0)
|
||||
dcn3_15_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
|
||||
|
||||
ASSERT(clk_table->num_entries);
|
||||
|
||||
@ -721,6 +732,12 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
||||
*/
|
||||
dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
|
||||
|
||||
if ((int)(dcn3_15_soc.dram_clock_change_latency_us * 1000)
|
||||
!= dc->debug.dram_clock_change_latency_ns
|
||||
&& dc->debug.dram_clock_change_latency_ns) {
|
||||
dcn3_15_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
|
||||
}
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
|
||||
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
|
||||
else
|
||||
@ -813,6 +830,11 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
||||
dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
|
||||
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
|
||||
}
|
||||
if ((int)(dcn3_16_soc.dram_clock_change_latency_us * 1000)
|
||||
!= dc->debug.dram_clock_change_latency_ns
|
||||
&& dc->debug.dram_clock_change_latency_ns) {
|
||||
dcn3_16_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
|
||||
}
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
|
||||
dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
|
||||
|
@ -1051,10 +1051,10 @@ static bool CalculatePrefetchSchedule(
|
||||
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
|
||||
/*rev 99*/
|
||||
prefetch_bw_pr = dml_min(1, bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane);
|
||||
max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
|
||||
max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
|
||||
prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
|
||||
prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
|
||||
prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
|
||||
prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
|
||||
|
||||
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
|
||||
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
|
||||
|
@ -264,6 +264,11 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
||||
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
|
||||
}
|
||||
|
||||
if ((int)(dcn3_14_soc.dram_clock_change_latency_us * 1000)
|
||||
!= dc->debug.dram_clock_change_latency_ns
|
||||
&& dc->debug.dram_clock_change_latency_ns) {
|
||||
dcn3_14_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
|
||||
}
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
|
||||
dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
|
||||
else
|
||||
@ -318,6 +323,8 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
|
||||
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
|
||||
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
|
||||
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
|
||||
pipes[pipe_cnt].pipe.dest.vblank_nom =
|
||||
dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
|
||||
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
|
||||
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
|
||||
|
||||
|
@ -265,33 +265,13 @@ static void CalculateRowBandwidth(
|
||||
|
||||
static void CalculateFlipSchedule(
|
||||
struct display_mode_lib *mode_lib,
|
||||
unsigned int k,
|
||||
double HostVMInefficiencyFactor,
|
||||
double UrgentExtraLatency,
|
||||
double UrgentLatency,
|
||||
unsigned int GPUVMMaxPageTableLevels,
|
||||
bool HostVMEnable,
|
||||
unsigned int HostVMMaxNonCachedPageTableLevels,
|
||||
bool GPUVMEnable,
|
||||
double HostVMMinPageSize,
|
||||
double PDEAndMetaPTEBytesPerFrame,
|
||||
double MetaRowBytes,
|
||||
double DPTEBytesPerRow,
|
||||
double BandwidthAvailableForImmediateFlip,
|
||||
unsigned int TotImmediateFlipBytes,
|
||||
enum source_format_class SourcePixelFormat,
|
||||
double LineTime,
|
||||
double VRatio,
|
||||
double VRatioChroma,
|
||||
double Tno_bw,
|
||||
bool DCCEnable,
|
||||
unsigned int dpte_row_height,
|
||||
unsigned int meta_row_height,
|
||||
unsigned int dpte_row_height_chroma,
|
||||
unsigned int meta_row_height_chroma,
|
||||
double *DestinationLinesToRequestVMInImmediateFlip,
|
||||
double *DestinationLinesToRequestRowInImmediateFlip,
|
||||
double *final_flip_bw,
|
||||
bool *ImmediateFlipSupportedForPipe);
|
||||
double DPTEBytesPerRow);
|
||||
static double CalculateWriteBackDelay(
|
||||
enum source_format_class WritebackPixelFormat,
|
||||
double WritebackHRatio,
|
||||
@ -325,64 +305,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
|
||||
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
|
||||
struct display_mode_lib *mode_lib,
|
||||
unsigned int PrefetchMode,
|
||||
unsigned int NumberOfActivePlanes,
|
||||
unsigned int MaxLineBufferLines,
|
||||
unsigned int LineBufferSize,
|
||||
unsigned int WritebackInterfaceBufferSize,
|
||||
double DCFCLK,
|
||||
double ReturnBW,
|
||||
bool SynchronizedVBlank,
|
||||
unsigned int dpte_group_bytes[],
|
||||
unsigned int MetaChunkSize,
|
||||
double UrgentLatency,
|
||||
double ExtraLatency,
|
||||
double WritebackLatency,
|
||||
double WritebackChunkSize,
|
||||
double SOCCLK,
|
||||
double DRAMClockChangeLatency,
|
||||
double SRExitTime,
|
||||
double SREnterPlusExitTime,
|
||||
double SRExitZ8Time,
|
||||
double SREnterPlusExitZ8Time,
|
||||
double DCFCLKDeepSleep,
|
||||
unsigned int DETBufferSizeY[],
|
||||
unsigned int DETBufferSizeC[],
|
||||
unsigned int SwathHeightY[],
|
||||
unsigned int SwathHeightC[],
|
||||
unsigned int LBBitPerPixel[],
|
||||
double SwathWidthY[],
|
||||
double SwathWidthC[],
|
||||
double HRatio[],
|
||||
double HRatioChroma[],
|
||||
unsigned int vtaps[],
|
||||
unsigned int VTAPsChroma[],
|
||||
double VRatio[],
|
||||
double VRatioChroma[],
|
||||
unsigned int HTotal[],
|
||||
double PixelClock[],
|
||||
unsigned int BlendingAndTiming[],
|
||||
unsigned int DPPPerPlane[],
|
||||
double BytePerPixelDETY[],
|
||||
double BytePerPixelDETC[],
|
||||
double DSTXAfterScaler[],
|
||||
double DSTYAfterScaler[],
|
||||
bool WritebackEnable[],
|
||||
enum source_format_class WritebackPixelFormat[],
|
||||
double WritebackDestinationWidth[],
|
||||
double WritebackDestinationHeight[],
|
||||
double WritebackSourceHeight[],
|
||||
bool UnboundedRequestEnabled,
|
||||
unsigned int CompressedBufferSizeInkByte,
|
||||
enum clock_change_support *DRAMClockChangeSupport,
|
||||
double *UrgentWatermark,
|
||||
double *WritebackUrgentWatermark,
|
||||
double *DRAMClockChangeWatermark,
|
||||
double *WritebackDRAMClockChangeWatermark,
|
||||
double *StutterExitWatermark,
|
||||
double *StutterEnterPlusExitWatermark,
|
||||
double *Z8StutterExitWatermark,
|
||||
double *Z8StutterEnterPlusExitWatermark,
|
||||
double *MinActiveDRAMClockChangeLatencySupported);
|
||||
double *Z8StutterEnterPlusExitWatermark);
|
||||
|
||||
static void CalculateDCFCLKDeepSleep(
|
||||
struct display_mode_lib *mode_lib,
|
||||
@ -1362,7 +1306,7 @@ static bool CalculatePrefetchSchedule(
|
||||
// - ((NumberOfCursors > 0 || GPUVMEnable || DCCEnable) ?
|
||||
- ((GPUVMEnable || myPipe->DCCEnable) ? (*DestinationLinesToRequestVMInVBlank + 2 * *DestinationLinesToRequestRowInVBlank) : 0.0); // TODO: Did someone else add this??
|
||||
#else
|
||||
LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
|
||||
LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
|
||||
#endif
|
||||
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
@ -2928,33 +2872,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
|
||||
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
|
||||
CalculateFlipSchedule(
|
||||
mode_lib,
|
||||
k,
|
||||
HostVMInefficiencyFactor,
|
||||
v->UrgentExtraLatency,
|
||||
v->UrgentLatency,
|
||||
v->GPUVMMaxPageTableLevels,
|
||||
v->HostVMEnable,
|
||||
v->HostVMMaxNonCachedPageTableLevels,
|
||||
v->GPUVMEnable,
|
||||
v->HostVMMinPageSize,
|
||||
v->PDEAndMetaPTEBytesFrame[k],
|
||||
v->MetaRowByte[k],
|
||||
v->PixelPTEBytesPerRow[k],
|
||||
v->BandwidthAvailableForImmediateFlip,
|
||||
v->TotImmediateFlipBytes,
|
||||
v->SourcePixelFormat[k],
|
||||
v->HTotal[k] / v->PixelClock[k],
|
||||
v->VRatio[k],
|
||||
v->VRatioChroma[k],
|
||||
v->Tno_bw[k],
|
||||
v->DCCEnable[k],
|
||||
v->dpte_row_height[k],
|
||||
v->meta_row_height[k],
|
||||
v->dpte_row_height_chroma[k],
|
||||
v->meta_row_height_chroma[k],
|
||||
&v->DestinationLinesToRequestVMInImmediateFlip[k],
|
||||
&v->DestinationLinesToRequestRowInImmediateFlip[k],
|
||||
&v->final_flip_bw[k],
|
||||
&v->ImmediateFlipSupportedForPipe[k]);
|
||||
v->PixelPTEBytesPerRow[k]);
|
||||
}
|
||||
|
||||
v->total_dcn_read_bw_with_flip = 0.0;
|
||||
@ -3041,64 +2965,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
|
||||
CalculateWatermarksAndDRAMSpeedChangeSupport(
|
||||
mode_lib,
|
||||
PrefetchMode,
|
||||
v->NumberOfActivePlanes,
|
||||
v->MaxLineBufferLines,
|
||||
v->LineBufferSize,
|
||||
v->WritebackInterfaceBufferSize,
|
||||
v->DCFCLK,
|
||||
v->ReturnBW,
|
||||
v->SynchronizedVBlank,
|
||||
v->dpte_group_bytes,
|
||||
v->MetaChunkSize,
|
||||
v->UrgentLatency,
|
||||
v->UrgentExtraLatency,
|
||||
v->WritebackLatency,
|
||||
v->WritebackChunkSize,
|
||||
v->SOCCLK,
|
||||
v->DRAMClockChangeLatency,
|
||||
v->SRExitTime,
|
||||
v->SREnterPlusExitTime,
|
||||
v->SRExitZ8Time,
|
||||
v->SREnterPlusExitZ8Time,
|
||||
v->DCFCLKDeepSleep,
|
||||
v->DETBufferSizeY,
|
||||
v->DETBufferSizeC,
|
||||
v->SwathHeightY,
|
||||
v->SwathHeightC,
|
||||
v->LBBitPerPixel,
|
||||
v->SwathWidthY,
|
||||
v->SwathWidthC,
|
||||
v->HRatio,
|
||||
v->HRatioChroma,
|
||||
v->vtaps,
|
||||
v->VTAPsChroma,
|
||||
v->VRatio,
|
||||
v->VRatioChroma,
|
||||
v->HTotal,
|
||||
v->PixelClock,
|
||||
v->BlendingAndTiming,
|
||||
v->DPPPerPlane,
|
||||
v->BytePerPixelDETY,
|
||||
v->BytePerPixelDETC,
|
||||
v->DSTXAfterScaler,
|
||||
v->DSTYAfterScaler,
|
||||
v->WritebackEnable,
|
||||
v->WritebackPixelFormat,
|
||||
v->WritebackDestinationWidth,
|
||||
v->WritebackDestinationHeight,
|
||||
v->WritebackSourceHeight,
|
||||
v->UnboundedRequestEnabled,
|
||||
v->CompressedBufferSizeInkByte,
|
||||
&DRAMClockChangeSupport,
|
||||
&v->UrgentWatermark,
|
||||
&v->WritebackUrgentWatermark,
|
||||
&v->DRAMClockChangeWatermark,
|
||||
&v->WritebackDRAMClockChangeWatermark,
|
||||
&v->StutterExitWatermark,
|
||||
&v->StutterEnterPlusExitWatermark,
|
||||
&v->Z8StutterExitWatermark,
|
||||
&v->Z8StutterEnterPlusExitWatermark,
|
||||
&v->MinActiveDRAMClockChangeLatencySupported);
|
||||
&v->Z8StutterEnterPlusExitWatermark);
|
||||
|
||||
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
|
||||
if (v->WritebackEnable[k] == true) {
|
||||
@ -3710,61 +3598,43 @@ static void CalculateRowBandwidth(
|
||||
|
||||
static void CalculateFlipSchedule(
|
||||
struct display_mode_lib *mode_lib,
|
||||
unsigned int k,
|
||||
double HostVMInefficiencyFactor,
|
||||
double UrgentExtraLatency,
|
||||
double UrgentLatency,
|
||||
unsigned int GPUVMMaxPageTableLevels,
|
||||
bool HostVMEnable,
|
||||
unsigned int HostVMMaxNonCachedPageTableLevels,
|
||||
bool GPUVMEnable,
|
||||
double HostVMMinPageSize,
|
||||
double PDEAndMetaPTEBytesPerFrame,
|
||||
double MetaRowBytes,
|
||||
double DPTEBytesPerRow,
|
||||
double BandwidthAvailableForImmediateFlip,
|
||||
unsigned int TotImmediateFlipBytes,
|
||||
enum source_format_class SourcePixelFormat,
|
||||
double LineTime,
|
||||
double VRatio,
|
||||
double VRatioChroma,
|
||||
double Tno_bw,
|
||||
bool DCCEnable,
|
||||
unsigned int dpte_row_height,
|
||||
unsigned int meta_row_height,
|
||||
unsigned int dpte_row_height_chroma,
|
||||
unsigned int meta_row_height_chroma,
|
||||
double *DestinationLinesToRequestVMInImmediateFlip,
|
||||
double *DestinationLinesToRequestRowInImmediateFlip,
|
||||
double *final_flip_bw,
|
||||
bool *ImmediateFlipSupportedForPipe)
|
||||
double DPTEBytesPerRow)
|
||||
{
|
||||
struct vba_vars_st *v = &mode_lib->vba;
|
||||
double min_row_time = 0.0;
|
||||
unsigned int HostVMDynamicLevelsTrips;
|
||||
double TimeForFetchingMetaPTEImmediateFlip;
|
||||
double TimeForFetchingRowInVBlankImmediateFlip;
|
||||
double ImmediateFlipBW;
|
||||
double LineTime = v->HTotal[k] / v->PixelClock[k];
|
||||
|
||||
if (GPUVMEnable == true && HostVMEnable == true) {
|
||||
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
|
||||
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
|
||||
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
|
||||
} else {
|
||||
HostVMDynamicLevelsTrips = 0;
|
||||
}
|
||||
|
||||
if (GPUVMEnable == true || DCCEnable == true) {
|
||||
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
|
||||
if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
|
||||
ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
|
||||
}
|
||||
|
||||
if (GPUVMEnable == true) {
|
||||
if (v->GPUVMEnable == true) {
|
||||
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
|
||||
Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
|
||||
UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
|
||||
v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
|
||||
UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
|
||||
LineTime / 4.0);
|
||||
} else {
|
||||
TimeForFetchingMetaPTEImmediateFlip = 0;
|
||||
}
|
||||
|
||||
*DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
|
||||
if ((GPUVMEnable == true || DCCEnable == true)) {
|
||||
v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
|
||||
if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
|
||||
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
|
||||
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
|
||||
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
|
||||
@ -3773,54 +3643,54 @@ static void CalculateFlipSchedule(
|
||||
TimeForFetchingRowInVBlankImmediateFlip = 0;
|
||||
}
|
||||
|
||||
*DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
|
||||
v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
|
||||
|
||||
if (GPUVMEnable == true) {
|
||||
*final_flip_bw = dml_max(
|
||||
PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
|
||||
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
|
||||
} else if ((GPUVMEnable == true || DCCEnable == true)) {
|
||||
*final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
|
||||
if (v->GPUVMEnable == true) {
|
||||
v->final_flip_bw[k] = dml_max(
|
||||
PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
|
||||
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
|
||||
} else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
|
||||
v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
|
||||
} else {
|
||||
*final_flip_bw = 0;
|
||||
v->final_flip_bw[k] = 0;
|
||||
}
|
||||
|
||||
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
|
||||
if (GPUVMEnable == true && DCCEnable != true) {
|
||||
min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
|
||||
} else if (GPUVMEnable != true && DCCEnable == true) {
|
||||
min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
|
||||
if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
|
||||
if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
|
||||
min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
|
||||
} else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
|
||||
min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
|
||||
} else {
|
||||
min_row_time = dml_min4(
|
||||
dpte_row_height * LineTime / VRatio,
|
||||
meta_row_height * LineTime / VRatio,
|
||||
dpte_row_height_chroma * LineTime / VRatioChroma,
|
||||
meta_row_height_chroma * LineTime / VRatioChroma);
|
||||
v->dpte_row_height[k] * LineTime / v->VRatio[k],
|
||||
v->meta_row_height[k] * LineTime / v->VRatio[k],
|
||||
v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
|
||||
v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
|
||||
}
|
||||
} else {
|
||||
if (GPUVMEnable == true && DCCEnable != true) {
|
||||
min_row_time = dpte_row_height * LineTime / VRatio;
|
||||
} else if (GPUVMEnable != true && DCCEnable == true) {
|
||||
min_row_time = meta_row_height * LineTime / VRatio;
|
||||
if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
|
||||
min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
|
||||
} else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
|
||||
min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
|
||||
} else {
|
||||
min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
|
||||
min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
|
||||
}
|
||||
}
|
||||
|
||||
if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
|
||||
if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|
||||
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
|
||||
*ImmediateFlipSupportedForPipe = false;
|
||||
v->ImmediateFlipSupportedForPipe[k] = false;
|
||||
} else {
|
||||
*ImmediateFlipSupportedForPipe = true;
|
||||
v->ImmediateFlipSupportedForPipe[k] = true;
|
||||
}
|
||||
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
|
||||
dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
|
||||
dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
|
||||
dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
|
||||
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
|
||||
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
|
||||
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
|
||||
dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
|
||||
dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
|
||||
#endif
|
||||
|
||||
}
|
||||
@ -5412,33 +5282,13 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
|
||||
for (k = 0; k < v->NumberOfActivePlanes; k++) {
|
||||
CalculateFlipSchedule(
|
||||
mode_lib,
|
||||
k,
|
||||
HostVMInefficiencyFactor,
|
||||
v->ExtraLatency,
|
||||
v->UrgLatency[i],
|
||||
v->GPUVMMaxPageTableLevels,
|
||||
v->HostVMEnable,
|
||||
v->HostVMMaxNonCachedPageTableLevels,
|
||||
v->GPUVMEnable,
|
||||
v->HostVMMinPageSize,
|
||||
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
|
||||
v->MetaRowBytes[i][j][k],
|
||||
v->DPTEBytesPerRow[i][j][k],
|
||||
v->BandwidthAvailableForImmediateFlip,
|
||||
v->TotImmediateFlipBytes,
|
||||
v->SourcePixelFormat[k],
|
||||
v->HTotal[k] / v->PixelClock[k],
|
||||
v->VRatio[k],
|
||||
v->VRatioChroma[k],
|
||||
v->Tno_bw[k],
|
||||
v->DCCEnable[k],
|
||||
v->dpte_row_height[k],
|
||||
v->meta_row_height[k],
|
||||
v->dpte_row_height_chroma[k],
|
||||
v->meta_row_height_chroma[k],
|
||||
&v->DestinationLinesToRequestVMInImmediateFlip[k],
|
||||
&v->DestinationLinesToRequestRowInImmediateFlip[k],
|
||||
&v->final_flip_bw[k],
|
||||
&v->ImmediateFlipSupportedForPipe[k]);
|
||||
v->DPTEBytesPerRow[i][j][k]);
|
||||
}
|
||||
v->total_dcn_read_bw_with_flip = 0.0;
|
||||
for (k = 0; k < v->NumberOfActivePlanes; k++) {
|
||||
@ -5496,64 +5346,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
|
||||
CalculateWatermarksAndDRAMSpeedChangeSupport(
|
||||
mode_lib,
|
||||
v->PrefetchModePerState[i][j],
|
||||
v->NumberOfActivePlanes,
|
||||
v->MaxLineBufferLines,
|
||||
v->LineBufferSize,
|
||||
v->WritebackInterfaceBufferSize,
|
||||
v->DCFCLKState[i][j],
|
||||
v->ReturnBWPerState[i][j],
|
||||
v->SynchronizedVBlank,
|
||||
v->dpte_group_bytes,
|
||||
v->MetaChunkSize,
|
||||
v->UrgLatency[i],
|
||||
v->ExtraLatency,
|
||||
v->WritebackLatency,
|
||||
v->WritebackChunkSize,
|
||||
v->SOCCLKPerState[i],
|
||||
v->DRAMClockChangeLatency,
|
||||
v->SRExitTime,
|
||||
v->SREnterPlusExitTime,
|
||||
v->SRExitZ8Time,
|
||||
v->SREnterPlusExitZ8Time,
|
||||
v->ProjectedDCFCLKDeepSleep[i][j],
|
||||
v->DETBufferSizeYThisState,
|
||||
v->DETBufferSizeCThisState,
|
||||
v->SwathHeightYThisState,
|
||||
v->SwathHeightCThisState,
|
||||
v->LBBitPerPixel,
|
||||
v->SwathWidthYThisState,
|
||||
v->SwathWidthCThisState,
|
||||
v->HRatio,
|
||||
v->HRatioChroma,
|
||||
v->vtaps,
|
||||
v->VTAPsChroma,
|
||||
v->VRatio,
|
||||
v->VRatioChroma,
|
||||
v->HTotal,
|
||||
v->PixelClock,
|
||||
v->BlendingAndTiming,
|
||||
v->NoOfDPPThisState,
|
||||
v->BytePerPixelInDETY,
|
||||
v->BytePerPixelInDETC,
|
||||
v->DSTXAfterScaler,
|
||||
v->DSTYAfterScaler,
|
||||
v->WritebackEnable,
|
||||
v->WritebackPixelFormat,
|
||||
v->WritebackDestinationWidth,
|
||||
v->WritebackDestinationHeight,
|
||||
v->WritebackSourceHeight,
|
||||
UnboundedRequestEnabledThisState,
|
||||
CompressedBufferSizeInkByteThisState,
|
||||
&v->DRAMClockChangeSupport[i][j],
|
||||
&v->UrgentWatermark,
|
||||
&v->WritebackUrgentWatermark,
|
||||
&v->DRAMClockChangeWatermark,
|
||||
&v->WritebackDRAMClockChangeWatermark,
|
||||
&dummy,
|
||||
&dummy,
|
||||
&dummy,
|
||||
&dummy,
|
||||
&v->MinActiveDRAMClockChangeLatencySupported);
|
||||
&dummy);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5679,64 +5493,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
|
||||
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
|
||||
struct display_mode_lib *mode_lib,
|
||||
unsigned int PrefetchMode,
|
||||
unsigned int NumberOfActivePlanes,
|
||||
unsigned int MaxLineBufferLines,
|
||||
unsigned int LineBufferSize,
|
||||
unsigned int WritebackInterfaceBufferSize,
|
||||
double DCFCLK,
|
||||
double ReturnBW,
|
||||
bool SynchronizedVBlank,
|
||||
unsigned int dpte_group_bytes[],
|
||||
unsigned int MetaChunkSize,
|
||||
double UrgentLatency,
|
||||
double ExtraLatency,
|
||||
double WritebackLatency,
|
||||
double WritebackChunkSize,
|
||||
double SOCCLK,
|
||||
double DRAMClockChangeLatency,
|
||||
double SRExitTime,
|
||||
double SREnterPlusExitTime,
|
||||
double SRExitZ8Time,
|
||||
double SREnterPlusExitZ8Time,
|
||||
double DCFCLKDeepSleep,
|
||||
unsigned int DETBufferSizeY[],
|
||||
unsigned int DETBufferSizeC[],
|
||||
unsigned int SwathHeightY[],
|
||||
unsigned int SwathHeightC[],
|
||||
unsigned int LBBitPerPixel[],
|
||||
double SwathWidthY[],
|
||||
double SwathWidthC[],
|
||||
double HRatio[],
|
||||
double HRatioChroma[],
|
||||
unsigned int vtaps[],
|
||||
unsigned int VTAPsChroma[],
|
||||
double VRatio[],
|
||||
double VRatioChroma[],
|
||||
unsigned int HTotal[],
|
||||
double PixelClock[],
|
||||
unsigned int BlendingAndTiming[],
|
||||
unsigned int DPPPerPlane[],
|
||||
double BytePerPixelDETY[],
|
||||
double BytePerPixelDETC[],
|
||||
double DSTXAfterScaler[],
|
||||
double DSTYAfterScaler[],
|
||||
bool WritebackEnable[],
|
||||
enum source_format_class WritebackPixelFormat[],
|
||||
double WritebackDestinationWidth[],
|
||||
double WritebackDestinationHeight[],
|
||||
double WritebackSourceHeight[],
|
||||
bool UnboundedRequestEnabled,
|
||||
unsigned int CompressedBufferSizeInkByte,
|
||||
enum clock_change_support *DRAMClockChangeSupport,
|
||||
double *UrgentWatermark,
|
||||
double *WritebackUrgentWatermark,
|
||||
double *DRAMClockChangeWatermark,
|
||||
double *WritebackDRAMClockChangeWatermark,
|
||||
double *StutterExitWatermark,
|
||||
double *StutterEnterPlusExitWatermark,
|
||||
double *Z8StutterExitWatermark,
|
||||
double *Z8StutterEnterPlusExitWatermark,
|
||||
double *MinActiveDRAMClockChangeLatencySupported)
|
||||
double *Z8StutterEnterPlusExitWatermark)
|
||||
{
|
||||
struct vba_vars_st *v = &mode_lib->vba;
|
||||
double EffectiveLBLatencyHidingY;
|
||||
@ -5756,103 +5534,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
|
||||
double TotalPixelBW = 0.0;
|
||||
int k, j;
|
||||
|
||||
*UrgentWatermark = UrgentLatency + ExtraLatency;
|
||||
v->UrgentWatermark = UrgentLatency + ExtraLatency;
|
||||
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
|
||||
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
|
||||
dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
|
||||
dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
|
||||
#endif
|
||||
|
||||
*DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
|
||||
v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
|
||||
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
|
||||
dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
|
||||
dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
|
||||
dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
|
||||
#endif
|
||||
|
||||
v->TotalActiveWriteback = 0;
|
||||
for (k = 0; k < NumberOfActivePlanes; ++k) {
|
||||
if (WritebackEnable[k] == true) {
|
||||
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
|
||||
if (v->WritebackEnable[k] == true) {
|
||||
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (v->TotalActiveWriteback <= 1) {
|
||||
*WritebackUrgentWatermark = WritebackLatency;
|
||||
v->WritebackUrgentWatermark = v->WritebackLatency;
|
||||
} else {
|
||||
*WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
|
||||
v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
|
||||
}
|
||||
|
||||
if (v->TotalActiveWriteback <= 1) {
|
||||
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
|
||||
v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
|
||||
} else {
|
||||
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
|
||||
v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
|
||||
}
|
||||
|
||||
for (k = 0; k < NumberOfActivePlanes; ++k) {
|
||||
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
|
||||
TotalPixelBW = TotalPixelBW
|
||||
+ DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
|
||||
/ (HTotal[k] / PixelClock[k]);
|
||||
+ DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
|
||||
/ (v->HTotal[k] / v->PixelClock[k]);
|
||||
}
|
||||
|
||||
for (k = 0; k < NumberOfActivePlanes; ++k) {
|
||||
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
|
||||
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
|
||||
|
||||
v->LBLatencyHidingSourceLinesY = dml_min(
|
||||
(double) MaxLineBufferLines,
|
||||
dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
|
||||
(double) v->MaxLineBufferLines,
|
||||
dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
|
||||
|
||||
v->LBLatencyHidingSourceLinesC = dml_min(
|
||||
(double) MaxLineBufferLines,
|
||||
dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
|
||||
(double) v->MaxLineBufferLines,
|
||||
dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
|
||||
|
||||
EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
|
||||
EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
|
||||
|
||||
EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
|
||||
EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
|
||||
|
||||
if (UnboundedRequestEnabled) {
|
||||
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
|
||||
+ CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
|
||||
+ CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
|
||||
}
|
||||
|
||||
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
|
||||
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
|
||||
FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
|
||||
FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
|
||||
if (BytePerPixelDETC[k] > 0) {
|
||||
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
|
||||
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
|
||||
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
|
||||
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
|
||||
} else {
|
||||
LinesInDETC = 0;
|
||||
FullDETBufferingTimeC = 999999;
|
||||
}
|
||||
|
||||
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
|
||||
- ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
|
||||
- ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
|
||||
|
||||
if (NumberOfActivePlanes > 1) {
|
||||
if (v->NumberOfActivePlanes > 1) {
|
||||
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
|
||||
- (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
|
||||
- (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
|
||||
}
|
||||
|
||||
if (BytePerPixelDETC[k] > 0) {
|
||||
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
|
||||
- ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
|
||||
- ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
|
||||
|
||||
if (NumberOfActivePlanes > 1) {
|
||||
if (v->NumberOfActivePlanes > 1) {
|
||||
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
|
||||
- (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
|
||||
- (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
|
||||
}
|
||||
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
|
||||
} else {
|
||||
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
|
||||
}
|
||||
|
||||
if (WritebackEnable[k] == true) {
|
||||
WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
|
||||
/ (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
|
||||
if (WritebackPixelFormat[k] == dm_444_64) {
|
||||
if (v->WritebackEnable[k] == true) {
|
||||
WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
|
||||
/ (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
|
||||
if (v->WritebackPixelFormat[k] == dm_444_64) {
|
||||
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
|
||||
}
|
||||
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
|
||||
@ -5862,14 +5640,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
|
||||
|
||||
v->MinActiveDRAMClockChangeMargin = 999999;
|
||||
PlaneWithMinActiveDRAMClockChangeMargin = 0;
|
||||
for (k = 0; k < NumberOfActivePlanes; ++k) {
|
||||
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
|
||||
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
|
||||
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
|
||||
if (BlendingAndTiming[k] == k) {
|
||||
if (v->BlendingAndTiming[k] == k) {
|
||||
PlaneWithMinActiveDRAMClockChangeMargin = k;
|
||||
} else {
|
||||
for (j = 0; j < NumberOfActivePlanes; ++j) {
|
||||
if (BlendingAndTiming[k] == j) {
|
||||
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
|
||||
if (v->BlendingAndTiming[k] == j) {
|
||||
PlaneWithMinActiveDRAMClockChangeMargin = j;
|
||||
}
|
||||
}
|
||||
@ -5877,11 +5655,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
|
||||
}
|
||||
}
|
||||
|
||||
*MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
|
||||
v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
|
||||
|
||||
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
|
||||
for (k = 0; k < NumberOfActivePlanes; ++k) {
|
||||
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
|
||||
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
|
||||
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
|
||||
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
|
||||
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
|
||||
}
|
||||
@ -5889,25 +5667,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
|
||||
|
||||
v->TotalNumberOfActiveOTG = 0;
|
||||
|
||||
for (k = 0; k < NumberOfActivePlanes; ++k) {
|
||||
if (BlendingAndTiming[k] == k) {
|
||||
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
|
||||
if (v->BlendingAndTiming[k] == k) {
|
||||
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
|
||||
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
|
||||
} else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|
||||
} else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|
||||
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
|
||||
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
|
||||
} else {
|
||||
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
|
||||
}
|
||||
|
||||
*StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
|
||||
*StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
|
||||
*Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
|
||||
*Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
|
||||
*StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
|
||||
*StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
|
||||
*Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
|
||||
*Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
|
||||
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
|
||||
@ -7407,7 +7185,7 @@ static unsigned int CalculateMaxVStartup(
|
||||
double line_time_us = HTotal / PixelClock;
|
||||
unsigned int vblank_actual = VTotal - VActive;
|
||||
unsigned int vblank_nom_default_in_line = dml_floor(VBlankNomDefaultUS / line_time_us, 1.0);
|
||||
unsigned int vblank_nom_input = dml_min(VBlankNom, vblank_nom_default_in_line);
|
||||
unsigned int vblank_nom_input = VBlankNom; //dml_min(VBlankNom, vblank_nom_default_in_line);
|
||||
unsigned int vblank_avail = vblank_nom_input == 0 ? vblank_nom_default_in_line : vblank_nom_input;
|
||||
|
||||
vblank_size = (unsigned int) dml_min(vblank_actual, vblank_avail);
|
||||
|
@ -243,6 +243,50 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds dummy_latency_index when MCLK switching using firmware based
|
||||
* vblank stretch is enabled. This function will iterate through the
|
||||
* table of dummy pstate latencies until the lowest value that allows
|
||||
* dm_allow_self_refresh_and_mclk_switch to happen is found
|
||||
*/
|
||||
int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt,
|
||||
int vlevel)
|
||||
{
|
||||
const int max_latency_table_entries = 4;
|
||||
const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
|
||||
int dummy_latency_index = 0;
|
||||
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
while (dummy_latency_index < max_latency_table_entries) {
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
|
||||
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
|
||||
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
|
||||
|
||||
if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
|
||||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
|
||||
break;
|
||||
|
||||
dummy_latency_index++;
|
||||
}
|
||||
|
||||
if (dummy_latency_index == max_latency_table_entries) {
|
||||
ASSERT(dummy_latency_index != max_latency_table_entries);
|
||||
/* If the execution gets here, it means dummy p_states are
|
||||
* not possible. This should never happen and would mean
|
||||
* something is severely wrong.
|
||||
* Here we reset dummy_latency_index to 3, because it is
|
||||
* better to have underflows than system crashes.
|
||||
*/
|
||||
dummy_latency_index = max_latency_table_entries - 1;
|
||||
}
|
||||
|
||||
return dummy_latency_index;
|
||||
}
|
||||
|
||||
/**
|
||||
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
|
||||
* and populate pipe_ctx with those params.
|
||||
@ -286,41 +330,92 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
bool dcn32_predict_pipe_split(struct dc_state *context, display_pipe_params_st pipe, int index)
|
||||
/**
|
||||
* *******************************************************************************************
|
||||
* dcn32_predict_pipe_split: Predict if pipe split will occur for a given DML pipe
|
||||
*
|
||||
* This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
|
||||
* ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
|
||||
* determined by DPPClk requirements
|
||||
*
|
||||
* This function follows the same policy as DML:
|
||||
* - Check for ODM combine requirements / policy first
|
||||
* - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
|
||||
* MPC is required
|
||||
*
|
||||
* @param [in]: context: New DC state to be programmed
|
||||
* @param [in]: pipe_e2e: DML pipe end to end context
|
||||
*
|
||||
* @return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
|
||||
*
|
||||
* *******************************************************************************************
|
||||
*/
|
||||
uint8_t dcn32_predict_pipe_split(struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipe_e2e)
|
||||
{
|
||||
double pscl_throughput;
|
||||
double pscl_throughput_chroma;
|
||||
double dpp_clk_single_dpp, clock;
|
||||
double clk_frequency = 0.0;
|
||||
double vco_speed = context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz;
|
||||
bool total_available_pipes_support = false;
|
||||
uint32_t number_of_dpp = 0;
|
||||
enum odm_combine_mode odm_mode = dm_odm_combine_mode_disabled;
|
||||
double req_dispclk_per_surface = 0;
|
||||
uint8_t num_splits = 0;
|
||||
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe.scale_ratio_depth.hscl_ratio,
|
||||
pipe.scale_ratio_depth.hscl_ratio_c,
|
||||
pipe.scale_ratio_depth.vscl_ratio,
|
||||
pipe.scale_ratio_depth.vscl_ratio_c,
|
||||
context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk,
|
||||
context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk,
|
||||
pipe.dest.pixel_rate_mhz,
|
||||
pipe.src.source_format,
|
||||
pipe.scale_taps.htaps,
|
||||
pipe.scale_taps.htaps_c,
|
||||
pipe.scale_taps.vtaps,
|
||||
pipe.scale_taps.vtaps_c,
|
||||
/* Output */
|
||||
&pscl_throughput, &pscl_throughput_chroma,
|
||||
&dpp_clk_single_dpp);
|
||||
dml32_CalculateODMMode(context->bw_ctx.dml.ip.maximum_pixels_per_line_per_dsc_unit,
|
||||
pipe_e2e->pipe.dest.hactive,
|
||||
pipe_e2e->dout.output_format,
|
||||
pipe_e2e->dout.output_type,
|
||||
pipe_e2e->pipe.dest.odm_combine_policy,
|
||||
context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
|
||||
context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
|
||||
pipe_e2e->dout.dsc_enable != 0,
|
||||
0, /* TotalNumberOfActiveDPP can be 0 since we're predicting pipe split requirement */
|
||||
context->bw_ctx.dml.ip.max_num_dpp,
|
||||
pipe_e2e->pipe.dest.pixel_rate_mhz,
|
||||
context->bw_ctx.dml.soc.dcn_downspread_percent,
|
||||
context->bw_ctx.dml.ip.dispclk_ramp_margin_percent,
|
||||
context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz,
|
||||
pipe_e2e->dout.dsc_slices,
|
||||
/* Output */
|
||||
&total_available_pipes_support,
|
||||
&number_of_dpp,
|
||||
&odm_mode,
|
||||
&req_dispclk_per_surface);
|
||||
|
||||
dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe_e2e->pipe.scale_ratio_depth.hscl_ratio,
|
||||
pipe_e2e->pipe.scale_ratio_depth.hscl_ratio_c,
|
||||
pipe_e2e->pipe.scale_ratio_depth.vscl_ratio,
|
||||
pipe_e2e->pipe.scale_ratio_depth.vscl_ratio_c,
|
||||
context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk,
|
||||
context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk,
|
||||
pipe_e2e->pipe.dest.pixel_rate_mhz,
|
||||
pipe_e2e->pipe.src.source_format,
|
||||
pipe_e2e->pipe.scale_taps.htaps,
|
||||
pipe_e2e->pipe.scale_taps.htaps_c,
|
||||
pipe_e2e->pipe.scale_taps.vtaps,
|
||||
pipe_e2e->pipe.scale_taps.vtaps_c,
|
||||
/* Output */
|
||||
&pscl_throughput, &pscl_throughput_chroma,
|
||||
&dpp_clk_single_dpp);
|
||||
|
||||
clock = dpp_clk_single_dpp * (1 + context->bw_ctx.dml.soc.dcn_downspread_percent / 100);
|
||||
|
||||
if (clock > 0)
|
||||
clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0));
|
||||
clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0) / clock);
|
||||
|
||||
if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[index].dppclk_mhz)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
if (odm_mode == dm_odm_combine_mode_2to1)
|
||||
num_splits = 1;
|
||||
else if (odm_mode == dm_odm_combine_mode_4to1)
|
||||
num_splits = 3;
|
||||
else if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dppclk_mhz)
|
||||
num_splits = 1;
|
||||
|
||||
return num_splits;
|
||||
}
|
||||
|
||||
static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry)
|
||||
@ -1109,17 +1204,23 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
|
||||
vba->VoltageLevel = *vlevel;
|
||||
}
|
||||
} else {
|
||||
// only call dcn20_validate_apply_pipe_split_flags if we found a supported config
|
||||
memset(split, 0, MAX_PIPES * sizeof(int));
|
||||
memset(merge, 0, MAX_PIPES * sizeof(bool));
|
||||
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
|
||||
vba->VoltageLevel = *vlevel;
|
||||
|
||||
// Most populate phantom DLG params before programming hardware / timing for phantom pipe
|
||||
DC_FP_START();
|
||||
dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt);
|
||||
DC_FP_END();
|
||||
|
||||
/* Call validate_apply_pipe_split flags after calling DML getters for
|
||||
* phantom dlg params, or some of the VBA params indicating pipe split
|
||||
* can be overwritten by the getters.
|
||||
*
|
||||
* When setting up SubVP config, all pipes are merged before attempting to
|
||||
* add phantom pipes. If pipe split (ODM / MPC) is required, both the main
|
||||
* and phantom pipes will be split in the regular pipe splitting sequence.
|
||||
*/
|
||||
memset(split, 0, MAX_PIPES * sizeof(int));
|
||||
memset(merge, 0, MAX_PIPES * sizeof(bool));
|
||||
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
|
||||
vba->VoltageLevel = *vlevel;
|
||||
// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
|
||||
// until driver has acquired the DMCUB lock to do it safely.
|
||||
}
|
||||
@ -1619,8 +1720,20 @@ bool dcn32_internal_validate_bw(struct dc *dc,
|
||||
goto validate_fail;
|
||||
}
|
||||
|
||||
if (repopulate_pipes)
|
||||
if (repopulate_pipes) {
|
||||
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
|
||||
|
||||
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
|
||||
* we have to re-calculate the DET allocation and run through DML once more to
|
||||
* ensure all the params are calculated correctly. We do not need to run the
|
||||
* pipe split check again after this call (pipes are already split / merged).
|
||||
* */
|
||||
if (!fast_validate) {
|
||||
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
|
||||
dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
|
||||
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
}
|
||||
}
|
||||
*vlevel_out = vlevel;
|
||||
*pipe_cnt_out = pipe_cnt;
|
||||
|
||||
@ -1666,7 +1779,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
|
||||
dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
|
||||
|
||||
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
|
||||
dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
|
||||
dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
|
||||
context, pipes, pipe_cnt, vlevel);
|
||||
|
||||
/* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
|
||||
@ -2140,6 +2253,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
||||
if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
|
||||
&& dc->bb_overrides.urgent_latency_ns) {
|
||||
dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
|
||||
dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
|
||||
}
|
||||
|
||||
if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
|
||||
|
@ -29,11 +29,6 @@
|
||||
|
||||
#include "clk_mgr_internal.h"
|
||||
|
||||
#define DCN3_2_DEFAULT_DET_SIZE 256
|
||||
#define DCN3_2_MAX_DET_SIZE 1152
|
||||
#define DCN3_2_MIN_DET_SIZE 128
|
||||
#define DCN3_2_MIN_COMPBUF_SIZE_KB 128
|
||||
|
||||
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr);
|
||||
|
||||
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
|
||||
@ -41,9 +36,8 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt);
|
||||
|
||||
bool dcn32_predict_pipe_split(struct dc_state *context,
|
||||
display_pipe_params_st pipe,
|
||||
int index);
|
||||
uint8_t dcn32_predict_pipe_split(struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipe_e2e);
|
||||
|
||||
void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
|
||||
unsigned int *num_entries,
|
||||
@ -71,4 +65,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
|
||||
|
||||
void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
|
||||
|
||||
int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt,
|
||||
int vlevel);
|
||||
|
||||
#endif
|
||||
|
@ -1992,6 +1992,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
dml32_CalculateODMMode(
|
||||
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
|
||||
mode_lib->vba.HActive[k],
|
||||
mode_lib->vba.OutputFormat[k],
|
||||
mode_lib->vba.Output[k],
|
||||
mode_lib->vba.ODMUse[k],
|
||||
mode_lib->vba.MaxDispclk[i],
|
||||
@ -2014,6 +2015,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
dml32_CalculateODMMode(
|
||||
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
|
||||
mode_lib->vba.HActive[k],
|
||||
mode_lib->vba.OutputFormat[k],
|
||||
mode_lib->vba.Output[k],
|
||||
mode_lib->vba.ODMUse[k],
|
||||
mode_lib->vba.MaxDispclk[i],
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include "display_mode_vba_32.h"
|
||||
#include "../display_mode_lib.h"
|
||||
|
||||
#define DCN32_MAX_FMT_420_BUFFER_WIDTH 4096
|
||||
|
||||
unsigned int dml32_dscceComputeDelay(
|
||||
unsigned int bpc,
|
||||
double BPP,
|
||||
@ -1179,6 +1181,7 @@ void dml32_CalculateDETBufferSize(
|
||||
void dml32_CalculateODMMode(
|
||||
unsigned int MaximumPixelsPerLinePerDSCUnit,
|
||||
unsigned int HActive,
|
||||
enum output_format_class OutFormat,
|
||||
enum output_encoder_class Output,
|
||||
enum odm_combine_policy ODMUse,
|
||||
double StateDispclk,
|
||||
@ -1253,6 +1256,29 @@ void dml32_CalculateODMMode(
|
||||
else
|
||||
*TotalAvailablePipesSupport = false;
|
||||
}
|
||||
if (OutFormat == dm_420 && HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH &&
|
||||
ODMUse != dm_odm_combine_policy_4to1) {
|
||||
if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 4) {
|
||||
*ODMMode = dm_odm_combine_mode_disabled;
|
||||
*NumberOfDPP = 0;
|
||||
*TotalAvailablePipesSupport = false;
|
||||
} else if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 2 ||
|
||||
*ODMMode == dm_odm_combine_mode_4to1) {
|
||||
*ODMMode = dm_odm_combine_mode_4to1;
|
||||
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
|
||||
*NumberOfDPP = 4;
|
||||
} else {
|
||||
*ODMMode = dm_odm_combine_mode_2to1;
|
||||
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
|
||||
*NumberOfDPP = 2;
|
||||
}
|
||||
}
|
||||
if (Output == dm_hdmi && OutFormat == dm_420 &&
|
||||
HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH) {
|
||||
*ODMMode = dm_odm_combine_mode_disabled;
|
||||
*NumberOfDPP = 0;
|
||||
*TotalAvailablePipesSupport = false;
|
||||
}
|
||||
}
|
||||
|
||||
double dml32_CalculateRequiredDispclk(
|
||||
|
@ -216,6 +216,7 @@ void dml32_CalculateDETBufferSize(
|
||||
void dml32_CalculateODMMode(
|
||||
unsigned int MaximumPixelsPerLinePerDSCUnit,
|
||||
unsigned int HActive,
|
||||
enum output_format_class OutFormat,
|
||||
enum output_encoder_class Output,
|
||||
enum odm_combine_policy ODMUse,
|
||||
double StateDispclk,
|
||||
|
@ -489,6 +489,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
||||
if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
|
||||
&& dc->bb_overrides.urgent_latency_ns) {
|
||||
dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
|
||||
dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
|
||||
}
|
||||
|
||||
if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000)
|
||||
|
@ -510,6 +510,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
|
||||
unsigned int htotal;
|
||||
unsigned int vtotal;
|
||||
unsigned int vfront_porch;
|
||||
unsigned int vblank_nom;
|
||||
unsigned int vactive;
|
||||
unsigned int hactive;
|
||||
unsigned int vstartup_start;
|
||||
|
@ -597,6 +597,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
|
||||
mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
|
||||
mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
|
||||
mode_lib->vba.VFrontPorch[mode_lib->vba.NumberOfActivePlanes] = dst->vfront_porch;
|
||||
mode_lib->vba.VBlankNom[mode_lib->vba.NumberOfActivePlanes] = dst->vblank_nom;
|
||||
mode_lib->vba.DCCFractionOfZeroSizeRequestsLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_luma;
|
||||
mode_lib->vba.DCCFractionOfZeroSizeRequestsChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_chroma;
|
||||
mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
|
||||
|
@ -268,10 +268,18 @@ enum dc_lut_mode {
|
||||
LUT_RAM_B
|
||||
};
|
||||
|
||||
enum phy_state {
|
||||
TX_OFF_SYMCLK_OFF,
|
||||
TX_ON_SYMCLK_ON,
|
||||
TX_OFF_SYMCLK_ON
|
||||
enum symclk_state {
|
||||
SYMCLK_OFF_TX_OFF,
|
||||
SYMCLK_ON_TX_ON,
|
||||
SYMCLK_ON_TX_OFF,
|
||||
};
|
||||
|
||||
struct phy_state {
|
||||
struct {
|
||||
uint8_t otg : 1;
|
||||
uint8_t reserved : 7;
|
||||
} symclk_ref_cnts;
|
||||
enum symclk_state symclk_state;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -48,6 +48,7 @@ struct dc_phy_addr_space_config;
|
||||
struct dc_virtual_addr_space_config;
|
||||
struct dpp;
|
||||
struct dce_hwseq;
|
||||
struct link_resource;
|
||||
|
||||
struct hw_sequencer_funcs {
|
||||
void (*hardware_release)(struct dc *dc);
|
||||
@ -218,6 +219,25 @@ struct hw_sequencer_funcs {
|
||||
|
||||
void (*set_pipe)(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
void (*enable_dp_link_output)(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal,
|
||||
enum clock_source_id clock_source,
|
||||
const struct dc_link_settings *link_settings);
|
||||
void (*enable_tmds_link_output)(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal,
|
||||
enum clock_source_id clock_source,
|
||||
enum dc_color_depth color_depth,
|
||||
uint32_t pixel_clock);
|
||||
void (*enable_lvds_link_output)(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum clock_source_id clock_source,
|
||||
uint32_t pixel_clock);
|
||||
void (*disable_link_output)(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal);
|
||||
|
||||
void (*get_dcc_en_bits)(struct dc *dc, int *dcc_en_bits);
|
||||
|
||||
/* Idle Optimization Related */
|
||||
@ -245,9 +265,6 @@ struct hw_sequencer_funcs {
|
||||
struct tg_color *color,
|
||||
int mpcc_id);
|
||||
|
||||
void (*update_phy_state)(struct dc_state *state, struct pipe_ctx *pipe_ctx, enum phy_state target_state);
|
||||
|
||||
|
||||
void (*update_phantom_vp_position)(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
struct pipe_ctx *phantom_pipe);
|
||||
|
@ -55,9 +55,6 @@ struct link_hwss_ext {
|
||||
enum signal_type signal,
|
||||
enum clock_source_id clock_source,
|
||||
const struct dc_link_settings *link_settings);
|
||||
void (*disable_dp_link_output)(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal);
|
||||
void (*set_dp_link_test_pattern)(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
struct encoder_set_dp_phy_pattern_param *tp_params);
|
||||
@ -79,6 +76,9 @@ struct link_hwss {
|
||||
void (*setup_stream_encoder)(struct pipe_ctx *pipe_ctx);
|
||||
void (*reset_stream_encoder)(struct pipe_ctx *pipe_ctx);
|
||||
void (*setup_stream_attribute)(struct pipe_ctx *pipe_ctx);
|
||||
void (*disable_link_output)(struct dc_link *link,
|
||||
const struct link_resource *link_res,
|
||||
enum signal_type signal);
|
||||
};
|
||||
#endif /* __DC_LINK_HWSS_H__ */
|
||||
|
||||
|
@ -219,9 +219,15 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
uint8_t disabled_master_pipe_idx);
|
||||
|
||||
void reset_sync_context_for_pipe(const struct dc *dc,
|
||||
struct dc_state *context,
|
||||
uint8_t pipe_idx);
|
||||
|
||||
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
|
||||
|
||||
const struct link_hwss *get_link_hwss(const struct dc_link *link,
|
||||
const struct link_resource *link_res);
|
||||
|
||||
bool is_h_timing_divisible_by_2(struct dc_stream_state *stream);
|
||||
|
||||
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user