sched/uclamp: Make asym_fits_capacity() use util_fits_cpu()

commit a2e7f03ed28fce26c78b985f87913b6ce3accf9d upstream.

Use the new util_fits_cpu() to ensure migration margin and capacity
pressure are taken into account correctly when uclamp is being used
otherwise we will fail to consider CPUs as fitting in scenarios where
they should.

s/asym_fits_capacity/asym_fits_cpu/ to better reflect what it does now.

Fixes: b4c9c9f15649 ("sched/fair: Prefer prev cpu in asymmetric wakeup path")
Signed-off-by: Qais Yousef <qais.yousef@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220804143609.515789-6-qais.yousef@arm.com
(cherry picked from commit a2e7f03ed28fce26c78b985f87913b6ce3accf9d)
[Conflict in kernel/sched/fair.c due different name of static key
wrapper function and slightly different if condition block in one of the
asym_fits_cpu() call sites]
Signed-off-by: Qais Yousef (Google) <qyousef@layalina.io>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Qais Yousef 2023-04-18 15:09:37 +01:00 committed by Greg Kroah-Hartman
parent 2fd1c194e6
commit 07750955e9

View File

@ -6422,10 +6422,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
return best_cpu;
}
static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
static inline bool asym_fits_cpu(unsigned long util,
unsigned long util_min,
unsigned long util_max,
int cpu)
{
if (static_branch_unlikely(&sched_asym_cpucapacity))
return fits_capacity(task_util, capacity_of(cpu));
return util_fits_cpu(util, util_min, util_max, cpu);
return true;
}
@ -6436,7 +6439,7 @@ static inline bool asym_fits_capacity(unsigned long task_util, int cpu)
static int select_idle_sibling(struct task_struct *p, int prev, int target)
{
struct sched_domain *sd;
unsigned long task_util;
unsigned long task_util, util_min, util_max;
int i, recent_used_cpu;
/*
@ -6445,11 +6448,13 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
sync_entity_load_avg(&p->se);
task_util = uclamp_task_util(p);
task_util = task_util_est(p);
util_min = uclamp_eff_value(p, UCLAMP_MIN);
util_max = uclamp_eff_value(p, UCLAMP_MAX);
}
if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
asym_fits_capacity(task_util, target))
asym_fits_cpu(task_util, util_min, util_max, target))
return target;
/*
@ -6457,7 +6462,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
*/
if (prev != target && cpus_share_cache(prev, target) &&
(available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
asym_fits_capacity(task_util, prev))
asym_fits_cpu(task_util, util_min, util_max, prev))
return prev;
/*
@ -6472,7 +6477,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
in_task() &&
prev == smp_processor_id() &&
this_rq()->nr_running <= 1 &&
asym_fits_capacity(task_util, prev)) {
asym_fits_cpu(task_util, util_min, util_max, prev)) {
return prev;
}
@ -6483,7 +6488,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
cpus_share_cache(recent_used_cpu, target) &&
(available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
asym_fits_capacity(task_util, recent_used_cpu)) {
asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake: