|
|
|
@@ -4100,56 +4100,22 @@ static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_st
|
|
|
|
|
return hdisplay;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc,
|
|
|
|
|
u8 active_pipes);
|
|
|
|
|
|
|
|
|
|
static int intel_crtc_dbuf_weights(struct intel_atomic_state *state,
|
|
|
|
|
struct intel_crtc *for_crtc,
|
|
|
|
|
unsigned int *weight_start,
|
|
|
|
|
unsigned int *weight_end,
|
|
|
|
|
unsigned int *weight_total)
|
|
|
|
|
static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
|
|
|
|
|
enum pipe for_pipe,
|
|
|
|
|
unsigned int *weight_start,
|
|
|
|
|
unsigned int *weight_end,
|
|
|
|
|
unsigned int *weight_total)
|
|
|
|
|
{
|
|
|
|
|
const struct intel_dbuf_state *old_dbuf_state =
|
|
|
|
|
intel_atomic_get_old_dbuf_state(state);
|
|
|
|
|
struct intel_dbuf_state *new_dbuf_state =
|
|
|
|
|
intel_atomic_get_new_dbuf_state(state);
|
|
|
|
|
u8 active_pipes = new_dbuf_state->active_pipes;
|
|
|
|
|
enum pipe for_pipe = for_crtc->pipe;
|
|
|
|
|
const struct intel_crtc_state *crtc_state;
|
|
|
|
|
struct intel_crtc *crtc;
|
|
|
|
|
u8 dbuf_slice_mask;
|
|
|
|
|
u8 total_slice_mask;
|
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Get allowed DBuf slices for correspondent pipe and platform.
|
|
|
|
|
*/
|
|
|
|
|
dbuf_slice_mask = skl_compute_dbuf_slices(for_crtc, active_pipes);
|
|
|
|
|
total_slice_mask = dbuf_slice_mask;
|
|
|
|
|
struct drm_i915_private *dev_priv =
|
|
|
|
|
to_i915(dbuf_state->base.state->base.dev);
|
|
|
|
|
enum pipe pipe;
|
|
|
|
|
|
|
|
|
|
*weight_start = 0;
|
|
|
|
|
*weight_end = 0;
|
|
|
|
|
*weight_total = 0;
|
|
|
|
|
|
|
|
|
|
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
|
|
|
|
|
enum pipe pipe = crtc->pipe;
|
|
|
|
|
unsigned int weight;
|
|
|
|
|
u8 pipe_dbuf_slice_mask;
|
|
|
|
|
|
|
|
|
|
if (!crtc_state->hw.active)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
pipe_dbuf_slice_mask =
|
|
|
|
|
skl_compute_dbuf_slices(crtc, active_pipes);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* According to BSpec pipe can share one dbuf slice with another
|
|
|
|
|
* pipes or pipe can use multiple dbufs, in both cases we
|
|
|
|
|
* account for other pipes only if they have exactly same mask.
|
|
|
|
|
* However we need to account how many slices we should enable
|
|
|
|
|
* in total.
|
|
|
|
|
*/
|
|
|
|
|
total_slice_mask |= pipe_dbuf_slice_mask;
|
|
|
|
|
for_each_pipe(dev_priv, pipe) {
|
|
|
|
|
int weight = dbuf_state->weight[pipe];
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Do not account pipes using other slice sets
|
|
|
|
@@ -4158,12 +4124,10 @@ static int intel_crtc_dbuf_weights(struct intel_atomic_state *state,
|
|
|
|
|
* i.e no partial intersection), so it is enough to check for
|
|
|
|
|
* equality for now.
|
|
|
|
|
*/
|
|
|
|
|
if (dbuf_slice_mask != pipe_dbuf_slice_mask)
|
|
|
|
|
if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
weight = intel_crtc_ddb_weight(crtc_state);
|
|
|
|
|
*weight_total += weight;
|
|
|
|
|
|
|
|
|
|
if (pipe < for_pipe) {
|
|
|
|
|
*weight_start += weight;
|
|
|
|
|
*weight_end += weight;
|
|
|
|
@@ -4171,89 +4135,67 @@ static int intel_crtc_dbuf_weights(struct intel_atomic_state *state,
|
|
|
|
|
*weight_end += weight;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* FIXME: For now we always enable slice S1 as per
|
|
|
|
|
* the Bspec display initialization sequence.
|
|
|
|
|
*/
|
|
|
|
|
new_dbuf_state->enabled_slices = total_slice_mask | BIT(DBUF_S1);
|
|
|
|
|
|
|
|
|
|
if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
|
|
|
|
|
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
|
|
|
|
|
struct intel_crtc_state *crtc_state,
|
|
|
|
|
const u64 total_data_rate,
|
|
|
|
|
struct skl_ddb_entry *alloc, /* out */
|
|
|
|
|
int *num_active /* out */)
|
|
|
|
|
skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
|
|
|
|
|
{
|
|
|
|
|
struct intel_atomic_state *state =
|
|
|
|
|
to_intel_atomic_state(crtc_state->uapi.state);
|
|
|
|
|
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
|
|
|
|
unsigned int weight_start, weight_end, weight_total;
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
|
|
|
|
unsigned int weight_total, weight_start, weight_end;
|
|
|
|
|
const struct intel_dbuf_state *old_dbuf_state =
|
|
|
|
|
intel_atomic_get_old_dbuf_state(state);
|
|
|
|
|
struct intel_dbuf_state *new_dbuf_state =
|
|
|
|
|
intel_atomic_get_new_dbuf_state(state);
|
|
|
|
|
u8 active_pipes = new_dbuf_state->active_pipes;
|
|
|
|
|
struct intel_crtc_state *crtc_state;
|
|
|
|
|
struct skl_ddb_entry ddb_slices;
|
|
|
|
|
enum pipe pipe = crtc->pipe;
|
|
|
|
|
u32 ddb_range_size;
|
|
|
|
|
u32 dbuf_slice_mask;
|
|
|
|
|
u32 start, end;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
*num_active = hweight8(active_pipes);
|
|
|
|
|
|
|
|
|
|
if (!crtc_state->hw.active) {
|
|
|
|
|
alloc->start = 0;
|
|
|
|
|
alloc->end = 0;
|
|
|
|
|
crtc_state->wm.skl.ddb = *alloc;
|
|
|
|
|
return 0;
|
|
|
|
|
if (new_dbuf_state->weight[pipe] == 0) {
|
|
|
|
|
new_dbuf_state->ddb[pipe].start = 0;
|
|
|
|
|
new_dbuf_state->ddb[pipe].end = 0;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If the state doesn't change the active CRTC's or there is no
|
|
|
|
|
* modeset request, then there's no need to recalculate;
|
|
|
|
|
* the existing pipe allocation limits should remain unchanged.
|
|
|
|
|
* Note that we're safe from racing commits since any racing commit
|
|
|
|
|
* that changes the active CRTC list or do modeset would need to
|
|
|
|
|
* grab _all_ crtc locks, including the one we currently hold.
|
|
|
|
|
*/
|
|
|
|
|
if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes &&
|
|
|
|
|
!dev_priv->wm.distrust_bios_wm)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Get allowed DBuf slices for correspondent pipe and platform.
|
|
|
|
|
*/
|
|
|
|
|
dbuf_slice_mask = skl_compute_dbuf_slices(crtc, active_pipes);
|
|
|
|
|
dbuf_slice_mask = new_dbuf_state->slices[pipe];
|
|
|
|
|
|
|
|
|
|
skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
|
|
|
|
|
ddb_range_size = skl_ddb_entry_size(&ddb_slices);
|
|
|
|
|
|
|
|
|
|
ret = intel_crtc_dbuf_weights(state, crtc,
|
|
|
|
|
&weight_start, &weight_end, &weight_total);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
intel_crtc_dbuf_weights(new_dbuf_state, pipe,
|
|
|
|
|
&weight_start, &weight_end, &weight_total);
|
|
|
|
|
|
|
|
|
|
start = ddb_range_size * weight_start / weight_total;
|
|
|
|
|
end = ddb_range_size * weight_end / weight_total;
|
|
|
|
|
|
|
|
|
|
alloc->start = ddb_slices.start + start;
|
|
|
|
|
alloc->end = ddb_slices.start + end;
|
|
|
|
|
crtc_state->wm.skl.ddb = *alloc;
|
|
|
|
|
new_dbuf_state->ddb[pipe].start = ddb_slices.start + start;
|
|
|
|
|
new_dbuf_state->ddb[pipe].end = ddb_slices.start + end;
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
if (skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
|
|
|
|
|
&new_dbuf_state->ddb[pipe]))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
|
|
|
|
|
if (IS_ERR(crtc_state))
|
|
|
|
|
return PTR_ERR(crtc_state);
|
|
|
|
|
|
|
|
|
|
crtc_state->wm.skl.ddb = new_dbuf_state->ddb[pipe];
|
|
|
|
|
|
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
|
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x\n",
|
|
|
|
|
"[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
|
|
|
|
|
crtc->base.base.id, crtc->base.name,
|
|
|
|
|
dbuf_slice_mask, alloc->start, alloc->end, active_pipes);
|
|
|
|
|
old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
|
|
|
|
|
old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
|
|
|
|
|
new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
|
|
|
|
|
old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
@@ -4819,58 +4761,30 @@ skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
skl_allocate_pipe_ddb(struct intel_atomic_state *state,
|
|
|
|
|
struct intel_crtc *crtc)
|
|
|
|
|
skl_allocate_plane_ddb(struct intel_atomic_state *state,
|
|
|
|
|
struct intel_crtc *crtc)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
|
|
|
|
struct intel_crtc_state *crtc_state =
|
|
|
|
|
intel_atomic_get_new_crtc_state(state, crtc);
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
|
|
|
|
struct intel_dbuf_state *dbuf_state =
|
|
|
|
|
const struct intel_dbuf_state *dbuf_state =
|
|
|
|
|
intel_atomic_get_new_dbuf_state(state);
|
|
|
|
|
struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
|
|
|
|
|
const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
|
|
|
|
|
int num_active = hweight8(dbuf_state->active_pipes);
|
|
|
|
|
u16 alloc_size, start = 0;
|
|
|
|
|
u16 total[I915_MAX_PLANES] = {};
|
|
|
|
|
u16 uv_total[I915_MAX_PLANES] = {};
|
|
|
|
|
u64 total_data_rate;
|
|
|
|
|
enum plane_id plane_id;
|
|
|
|
|
int num_active;
|
|
|
|
|
u32 blocks;
|
|
|
|
|
int level;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
/* Clear the partitioning for disabled planes. */
|
|
|
|
|
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
|
|
|
|
|
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
|
|
|
|
|
|
|
|
|
|
if (!crtc_state->hw.active) {
|
|
|
|
|
struct intel_atomic_state *state =
|
|
|
|
|
to_intel_atomic_state(crtc_state->uapi.state);
|
|
|
|
|
struct intel_dbuf_state *new_dbuf_state =
|
|
|
|
|
intel_atomic_get_new_dbuf_state(state);
|
|
|
|
|
const struct intel_dbuf_state *old_dbuf_state =
|
|
|
|
|
intel_atomic_get_old_dbuf_state(state);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* FIXME hack to make sure we compute this sensibly when
|
|
|
|
|
* turning off all the pipes. Otherwise we leave it at
|
|
|
|
|
* whatever we had previously, and then runtime PM will
|
|
|
|
|
* mess it up by turning off all but S1. Remove this
|
|
|
|
|
* once the dbuf state computation flow becomes sane.
|
|
|
|
|
*/
|
|
|
|
|
if (new_dbuf_state->active_pipes == 0) {
|
|
|
|
|
new_dbuf_state->enabled_slices = BIT(DBUF_S1);
|
|
|
|
|
|
|
|
|
|
if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
|
|
|
|
|
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
alloc->start = alloc->end = 0;
|
|
|
|
|
crtc_state->wm.skl.ddb = *alloc;
|
|
|
|
|
if (!crtc_state->hw.active)
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (INTEL_GEN(dev_priv) >= 11)
|
|
|
|
|
total_data_rate =
|
|
|
|
@@ -4879,12 +4793,6 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
|
|
|
|
|
total_data_rate =
|
|
|
|
|
skl_get_total_relative_data_rate(state, crtc);
|
|
|
|
|
|
|
|
|
|
ret = skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
|
|
|
|
|
total_data_rate,
|
|
|
|
|
alloc, &num_active);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
alloc_size = skl_ddb_entry_size(alloc);
|
|
|
|
|
if (alloc_size == 0)
|
|
|
|
|
return 0;
|
|
|
|
@@ -5799,20 +5707,106 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
|
|
|
|
|
u8 enabled_slices;
|
|
|
|
|
enum pipe pipe;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* FIXME: For now we always enable slice S1 as per
|
|
|
|
|
* the Bspec display initialization sequence.
|
|
|
|
|
*/
|
|
|
|
|
enabled_slices = BIT(DBUF_S1);
|
|
|
|
|
|
|
|
|
|
for_each_pipe(dev_priv, pipe)
|
|
|
|
|
enabled_slices |= dbuf_state->slices[pipe];
|
|
|
|
|
|
|
|
|
|
return enabled_slices;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
skl_compute_ddb(struct intel_atomic_state *state)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
|
|
|
|
const struct intel_dbuf_state *old_dbuf_state;
|
|
|
|
|
const struct intel_dbuf_state *new_dbuf_state;
|
|
|
|
|
struct intel_dbuf_state *new_dbuf_state = NULL;
|
|
|
|
|
const struct intel_crtc_state *old_crtc_state;
|
|
|
|
|
struct intel_crtc_state *new_crtc_state;
|
|
|
|
|
struct intel_crtc *crtc;
|
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
|
|
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
|
|
|
|
new_dbuf_state = intel_atomic_get_dbuf_state(state);
|
|
|
|
|
if (IS_ERR(new_dbuf_state))
|
|
|
|
|
return PTR_ERR(new_dbuf_state);
|
|
|
|
|
|
|
|
|
|
old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!new_dbuf_state)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
new_dbuf_state->active_pipes =
|
|
|
|
|
intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
|
|
|
|
|
|
|
|
|
|
if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
|
|
|
|
|
ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
|
|
|
|
enum pipe pipe = crtc->pipe;
|
|
|
|
|
|
|
|
|
|
new_dbuf_state->slices[pipe] =
|
|
|
|
|
skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
|
|
|
|
|
|
|
|
|
|
if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
|
|
|
|
|
|
|
|
|
|
if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices) {
|
|
|
|
|
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
|
"Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
|
|
|
|
|
old_dbuf_state->enabled_slices,
|
|
|
|
|
new_dbuf_state->enabled_slices,
|
|
|
|
|
INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
|
|
|
|
enum pipe pipe = crtc->pipe;
|
|
|
|
|
|
|
|
|
|
new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
|
|
|
|
|
|
|
|
|
|
if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
|
|
|
|
ret = skl_crtc_allocate_ddb(state, crtc);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
|
|
|
|
|
new_crtc_state, i) {
|
|
|
|
|
ret = skl_allocate_pipe_ddb(state, crtc);
|
|
|
|
|
ret = skl_allocate_plane_ddb(state, crtc);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
@@ -5822,17 +5816,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
|
|
|
|
|
new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
|
|
|
|
|
|
|
|
|
|
if (new_dbuf_state &&
|
|
|
|
|
new_dbuf_state->enabled_slices != old_dbuf_state->enabled_slices)
|
|
|
|
|
drm_dbg_kms(&dev_priv->drm,
|
|
|
|
|
"Enabled dbuf slices 0x%x -> 0x%x (out of %d dbuf slices)\n",
|
|
|
|
|
old_dbuf_state->enabled_slices,
|
|
|
|
|
new_dbuf_state->enabled_slices,
|
|
|
|
|
INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -5968,83 +5951,6 @@ skl_print_wm_changes(struct intel_atomic_state *state)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int intel_add_affected_pipes(struct intel_atomic_state *state,
|
|
|
|
|
u8 pipe_mask)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
|
|
|
|
struct intel_crtc *crtc;
|
|
|
|
|
|
|
|
|
|
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
|
|
|
|
struct intel_crtc_state *crtc_state;
|
|
|
|
|
|
|
|
|
|
if ((pipe_mask & BIT(crtc->pipe)) == 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
|
|
|
|
|
if (IS_ERR(crtc_state))
|
|
|
|
|
return PTR_ERR(crtc_state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
|
|
|
|
struct intel_crtc_state *crtc_state;
|
|
|
|
|
struct intel_crtc *crtc;
|
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
|
|
if (dev_priv->wm.distrust_bios_wm) {
|
|
|
|
|
/*
|
|
|
|
|
* skl_ddb_get_pipe_allocation_limits() currently requires
|
|
|
|
|
* all active pipes to be included in the state so that
|
|
|
|
|
* it can redistribute the dbuf among them, and it really
|
|
|
|
|
* wants to recompute things when distrust_bios_wm is set
|
|
|
|
|
* so we add all the pipes to the state.
|
|
|
|
|
*/
|
|
|
|
|
ret = intel_add_affected_pipes(state, ~0);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
|
|
|
|
|
struct intel_dbuf_state *new_dbuf_state;
|
|
|
|
|
const struct intel_dbuf_state *old_dbuf_state;
|
|
|
|
|
|
|
|
|
|
new_dbuf_state = intel_atomic_get_dbuf_state(state);
|
|
|
|
|
if (IS_ERR(new_dbuf_state))
|
|
|
|
|
return PTR_ERR(new_dbuf_state);
|
|
|
|
|
|
|
|
|
|
old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
|
|
|
|
|
|
|
|
|
|
new_dbuf_state->active_pipes =
|
|
|
|
|
intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
|
|
|
|
|
|
|
|
|
|
if (old_dbuf_state->active_pipes == new_dbuf_state->active_pipes)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* skl_ddb_get_pipe_allocation_limits() currently requires
|
|
|
|
|
* all active pipes to be included in the state so that
|
|
|
|
|
* it can redistribute the dbuf among them.
|
|
|
|
|
*/
|
|
|
|
|
ret = intel_add_affected_pipes(state,
|
|
|
|
|
new_dbuf_state->active_pipes);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* To make sure the cursor watermark registers are always consistent
|
|
|
|
|
* with our computed state the following scenario needs special
|
|
|
|
@@ -6112,15 +6018,6 @@ skl_compute_wm(struct intel_atomic_state *state)
|
|
|
|
|
struct intel_crtc_state *new_crtc_state;
|
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
|
|
ret = skl_ddb_add_affected_pipes(state);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Calculate WM's for all pipes that are part of this transaction.
|
|
|
|
|
* Note that skl_ddb_add_affected_pipes may have added more CRTC's that
|
|
|
|
|
* weren't otherwise being modified if pipe allocations had to change.
|
|
|
|
|
*/
|
|
|
|
|
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
|
|
|
|
ret = skl_build_pipe_wm(state, crtc);
|
|
|
|
|
if (ret)
|
|
|
|
@@ -6288,11 +6185,6 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
|
|
|
|
|
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
|
|
|
|
|
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dev_priv->active_pipes) {
|
|
|
|
|
/* Fully recompute DDB on first atomic commit */
|
|
|
|
|
dev_priv->wm.distrust_bios_wm = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
|
|
|
|
|