diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 3b9d6fa50d17..14c3c1907b95 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4328,6 +4328,14 @@ bool dc_update_planes_and_stream(struct dc *dc, update_type, context); } else { + if (!stream_update && + dc->hwss.is_pipe_topology_transition_seamless && + !dc->hwss.is_pipe_topology_transition_seamless( + dc, dc->current_state, context)) { + + DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); + BREAK_TO_DEBUGGER(); + } commit_planes_for_stream( dc, srf_updates, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index cae5e1e68c86..018376146d97 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -1619,3 +1619,55 @@ void dcn32_blank_phantom(struct dc *dc, if (tg->funcs->is_tg_enabled(tg)) hws->funcs.wait_for_blank_complete(opp); } + +bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, + const struct dc_state *cur_ctx, + const struct dc_state *new_ctx) +{ + int i; + const struct pipe_ctx *cur_pipe, *new_pipe; + bool is_seamless = true; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + cur_pipe = &cur_ctx->res_ctx.pipe_ctx[i]; + new_pipe = &new_ctx->res_ctx.pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, FREE_PIPE) || + resource_is_pipe_type(new_pipe, FREE_PIPE)) + /* adding or removing free pipes is always seamless */ + continue; + else if (resource_is_pipe_type(cur_pipe, OTG_MASTER)) { + if (resource_is_pipe_type(new_pipe, OTG_MASTER)) + if (cur_pipe->stream->stream_id == new_pipe->stream->stream_id) + /* OTG master with the same stream is seamless */ + continue; + } else if (resource_is_pipe_type(cur_pipe, OPP_HEAD)) { + if (resource_is_pipe_type(new_pipe, OPP_HEAD)) { + if (cur_pipe->stream_res.tg == new_pipe->stream_res.tg) + /* + * OPP heads sharing the same timing + * generator is seamless + */ + continue; + } + } else if (resource_is_pipe_type(cur_pipe, DPP_PIPE)) { + if (resource_is_pipe_type(new_pipe, DPP_PIPE)) { + if (cur_pipe->stream_res.opp == new_pipe->stream_res.opp) + /* + * DPP pipes sharing the same OPP head is + * seamless + */ + continue; + } + } + + /* + * This pipe's transition doesn't fall under any seamless + * conditions + */ + is_seamless = false; + break; + } + + return is_seamless; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h index 616d5219119e..9992e40acd21 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h @@ -120,4 +120,8 @@ void dcn32_blank_phantom(struct dc *dc, int width, int height); +bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, + const struct dc_state *cur_ctx, + const struct dc_state *new_ctx); + #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index eb4227926006..1edadff39a5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -116,6 +116,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .update_dsc_pg = dcn32_update_dsc_pg, .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, .blank_phantom = dcn32_blank_phantom, + .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, }; static const struct hwseq_private_funcs dcn32_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 7a702e216e53..66e680902c95 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -401,6 +401,9 @@ struct hw_sequencer_funcs { struct dc_state *context, struct pipe_ctx *phantom_pipe); void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe); + bool (*is_pipe_topology_transition_seamless)(struct dc *dc, + const struct dc_state *cur_ctx, + const struct dc_state *new_ctx); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);