sched: Unify runtime accounting across classes
All classes use sched_entity::exec_start to track runtime and have copies of the exact same code around to compute runtime. Collapse all that. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Daniel Bristot de Oliveira <bristot@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Phil Auld <pauld@redhat.com> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org> Link: https://lkml.kernel.org/r/54d148a144f26d9559698c4dd82d8859038a7380.1699095159.git.bristot@kernel.org
This commit is contained in:
parent
ee4373dc90
commit
5d69eca542
@ -523,7 +523,7 @@ struct sched_statistics {
|
||||
u64 block_max;
|
||||
s64 sum_block_runtime;
|
||||
|
||||
u64 exec_max;
|
||||
s64 exec_max;
|
||||
u64 slice_max;
|
||||
|
||||
u64 nr_migrations_cold;
|
||||
|
@ -1275,9 +1275,8 @@ static void update_curr_dl(struct rq *rq)
|
||||
{
|
||||
struct task_struct *curr = rq->curr;
|
||||
struct sched_dl_entity *dl_se = &curr->dl;
|
||||
u64 delta_exec, scaled_delta_exec;
|
||||
s64 delta_exec, scaled_delta_exec;
|
||||
int cpu = cpu_of(rq);
|
||||
u64 now;
|
||||
|
||||
if (!dl_task(curr) || !on_dl_rq(dl_se))
|
||||
return;
|
||||
@ -1290,21 +1289,13 @@ static void update_curr_dl(struct rq *rq)
|
||||
* natural solution, but the full ramifications of this
|
||||
* approach need further study.
|
||||
*/
|
||||
now = rq_clock_task(rq);
|
||||
delta_exec = now - curr->se.exec_start;
|
||||
if (unlikely((s64)delta_exec <= 0)) {
|
||||
delta_exec = update_curr_common(rq);
|
||||
if (unlikely(delta_exec <= 0)) {
|
||||
if (unlikely(dl_se->dl_yielded))
|
||||
goto throttle;
|
||||
return;
|
||||
}
|
||||
|
||||
schedstat_set(curr->stats.exec_max,
|
||||
max(curr->stats.exec_max, delta_exec));
|
||||
|
||||
trace_sched_stat_runtime(curr, delta_exec, 0);
|
||||
|
||||
update_current_exec_runtime(curr, now, delta_exec);
|
||||
|
||||
if (dl_entity_is_special(dl_se))
|
||||
return;
|
||||
|
||||
|
@ -1103,23 +1103,17 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* Update the current task's runtime statistics.
|
||||
*/
|
||||
static void update_curr(struct cfs_rq *cfs_rq)
|
||||
static s64 update_curr_se(struct rq *rq, struct sched_entity *curr)
|
||||
{
|
||||
struct sched_entity *curr = cfs_rq->curr;
|
||||
u64 now = rq_clock_task(rq_of(cfs_rq));
|
||||
u64 delta_exec;
|
||||
|
||||
if (unlikely(!curr))
|
||||
return;
|
||||
u64 now = rq_clock_task(rq);
|
||||
s64 delta_exec;
|
||||
|
||||
delta_exec = now - curr->exec_start;
|
||||
if (unlikely((s64)delta_exec <= 0))
|
||||
return;
|
||||
if (unlikely(delta_exec <= 0))
|
||||
return delta_exec;
|
||||
|
||||
curr->exec_start = now;
|
||||
curr->sum_exec_runtime += delta_exec;
|
||||
|
||||
if (schedstat_enabled()) {
|
||||
struct sched_statistics *stats;
|
||||
@ -1129,8 +1123,43 @@ static void update_curr(struct cfs_rq *cfs_rq)
|
||||
max(delta_exec, stats->exec_max));
|
||||
}
|
||||
|
||||
curr->sum_exec_runtime += delta_exec;
|
||||
schedstat_add(cfs_rq->exec_clock, delta_exec);
|
||||
return delta_exec;
|
||||
}
|
||||
|
||||
/*
|
||||
* Used by other classes to account runtime.
|
||||
*/
|
||||
s64 update_curr_common(struct rq *rq)
|
||||
{
|
||||
struct task_struct *curr = rq->curr;
|
||||
s64 delta_exec;
|
||||
|
||||
delta_exec = update_curr_se(rq, &curr->se);
|
||||
if (unlikely(delta_exec <= 0))
|
||||
return delta_exec;
|
||||
|
||||
trace_sched_stat_runtime(curr, delta_exec, 0);
|
||||
|
||||
account_group_exec_runtime(curr, delta_exec);
|
||||
cgroup_account_cputime(curr, delta_exec);
|
||||
|
||||
return delta_exec;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the current task's runtime statistics.
|
||||
*/
|
||||
static void update_curr(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct sched_entity *curr = cfs_rq->curr;
|
||||
s64 delta_exec;
|
||||
|
||||
if (unlikely(!curr))
|
||||
return;
|
||||
|
||||
delta_exec = update_curr_se(rq_of(cfs_rq), curr);
|
||||
if (unlikely(delta_exec <= 0))
|
||||
return;
|
||||
|
||||
curr->vruntime += calc_delta_fair(delta_exec, curr);
|
||||
update_deadline(cfs_rq, curr);
|
||||
|
@ -1002,24 +1002,15 @@ static void update_curr_rt(struct rq *rq)
|
||||
{
|
||||
struct task_struct *curr = rq->curr;
|
||||
struct sched_rt_entity *rt_se = &curr->rt;
|
||||
u64 delta_exec;
|
||||
u64 now;
|
||||
s64 delta_exec;
|
||||
|
||||
if (curr->sched_class != &rt_sched_class)
|
||||
return;
|
||||
|
||||
now = rq_clock_task(rq);
|
||||
delta_exec = now - curr->se.exec_start;
|
||||
if (unlikely((s64)delta_exec <= 0))
|
||||
delta_exec = update_curr_common(rq);
|
||||
if (unlikely(delta_exec <= 0))
|
||||
return;
|
||||
|
||||
schedstat_set(curr->stats.exec_max,
|
||||
max(curr->stats.exec_max, delta_exec));
|
||||
|
||||
trace_sched_stat_runtime(curr, delta_exec, 0);
|
||||
|
||||
update_current_exec_runtime(curr, now, delta_exec);
|
||||
|
||||
if (!rt_bandwidth_enabled())
|
||||
return;
|
||||
|
||||
|
@ -2212,6 +2212,8 @@ struct affinity_context {
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
extern s64 update_curr_common(struct rq *rq);
|
||||
|
||||
struct sched_class {
|
||||
|
||||
#ifdef CONFIG_UCLAMP_TASK
|
||||
@ -3262,16 +3264,6 @@ extern int sched_dynamic_mode(const char *str);
|
||||
extern void sched_dynamic_update(int mode);
|
||||
#endif
|
||||
|
||||
static inline void update_current_exec_runtime(struct task_struct *curr,
|
||||
u64 now, u64 delta_exec)
|
||||
{
|
||||
curr->se.sum_exec_runtime += delta_exec;
|
||||
account_group_exec_runtime(curr, delta_exec);
|
||||
|
||||
curr->se.exec_start = now;
|
||||
cgroup_account_cputime(curr, delta_exec);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_MM_CID
|
||||
|
||||
#define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */
|
||||
|
@ -70,18 +70,7 @@ static void yield_task_stop(struct rq *rq)
|
||||
|
||||
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
|
||||
{
|
||||
struct task_struct *curr = rq->curr;
|
||||
u64 now, delta_exec;
|
||||
|
||||
now = rq_clock_task(rq);
|
||||
delta_exec = now - curr->se.exec_start;
|
||||
if (unlikely((s64)delta_exec < 0))
|
||||
delta_exec = 0;
|
||||
|
||||
schedstat_set(curr->stats.exec_max,
|
||||
max(curr->stats.exec_max, delta_exec));
|
||||
|
||||
update_current_exec_runtime(curr, now, delta_exec);
|
||||
update_curr_common(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user