sched/nohz: Remove the 1 Hz tick code
Now that the 1Hz tick is offloaded to workqueues, we can safely remove the residual code that used to handle it locally. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Christoph Lameter <cl@linux.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Wanpeng Li <kernellwp@gmail.com> Link: http://lkml.kernel.org/r/1519186649-3242-7-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
committed by
Ingo Molnar
parent
d84b31313e
commit
dcdedb2415
@@ -37,8 +37,4 @@ extern void wake_up_nohz_cpu(int cpu);
|
|||||||
static inline void wake_up_nohz_cpu(int cpu) { }
|
static inline void wake_up_nohz_cpu(int cpu) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ_FULL
|
|
||||||
extern u64 scheduler_tick_max_deferment(void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* _LINUX_SCHED_NOHZ_H */
|
#endif /* _LINUX_SCHED_NOHZ_H */
|
||||||
|
@@ -3096,35 +3096,9 @@ void scheduler_tick(void)
|
|||||||
rq->idle_balance = idle_cpu(cpu);
|
rq->idle_balance = idle_cpu(cpu);
|
||||||
trigger_load_balance(rq);
|
trigger_load_balance(rq);
|
||||||
#endif
|
#endif
|
||||||
rq_last_tick_reset(rq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ_FULL
|
#ifdef CONFIG_NO_HZ_FULL
|
||||||
/**
|
|
||||||
* scheduler_tick_max_deferment
|
|
||||||
*
|
|
||||||
* Keep at least one tick per second when a single
|
|
||||||
* active task is running because the scheduler doesn't
|
|
||||||
* yet completely support full dynticks environment.
|
|
||||||
*
|
|
||||||
* This makes sure that uptime, CFS vruntime, load
|
|
||||||
* balancing, etc... continue to move forward, even
|
|
||||||
* with a very low granularity.
|
|
||||||
*
|
|
||||||
* Return: Maximum deferment in nanoseconds.
|
|
||||||
*/
|
|
||||||
u64 scheduler_tick_max_deferment(void)
|
|
||||||
{
|
|
||||||
struct rq *rq = this_rq();
|
|
||||||
unsigned long next, now = READ_ONCE(jiffies);
|
|
||||||
|
|
||||||
next = rq->last_sched_tick + HZ;
|
|
||||||
|
|
||||||
if (time_before_eq(next, now))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return jiffies_to_nsecs(next - now);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct tick_work {
|
struct tick_work {
|
||||||
int cpu;
|
int cpu;
|
||||||
@@ -6116,9 +6090,6 @@ void __init sched_init(void)
|
|||||||
rq->last_load_update_tick = jiffies;
|
rq->last_load_update_tick = jiffies;
|
||||||
rq->nohz_flags = 0;
|
rq->nohz_flags = 0;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_NO_HZ_FULL
|
|
||||||
rq->last_sched_tick = 0;
|
|
||||||
#endif
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
hrtick_rq_init(rq);
|
hrtick_rq_init(rq);
|
||||||
atomic_set(&rq->nr_iowait, 0);
|
atomic_set(&rq->nr_iowait, 0);
|
||||||
|
@@ -48,7 +48,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
|
|||||||
|
|
||||||
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
||||||
{
|
{
|
||||||
rq_last_tick_reset(rq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -727,9 +727,7 @@ struct rq {
|
|||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
unsigned long nohz_flags;
|
unsigned long nohz_flags;
|
||||||
#endif /* CONFIG_NO_HZ_COMMON */
|
#endif /* CONFIG_NO_HZ_COMMON */
|
||||||
#ifdef CONFIG_NO_HZ_FULL
|
|
||||||
unsigned long last_sched_tick;
|
|
||||||
#endif
|
|
||||||
/* capture load from *all* tasks on this cpu: */
|
/* capture load from *all* tasks on this cpu: */
|
||||||
struct load_weight load;
|
struct load_weight load;
|
||||||
unsigned long nr_load_updates;
|
unsigned long nr_load_updates;
|
||||||
@@ -1626,13 +1624,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
|
|||||||
sched_update_tick_dependency(rq);
|
sched_update_tick_dependency(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rq_last_tick_reset(struct rq *rq)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_NO_HZ_FULL
|
|
||||||
rq->last_sched_tick = jiffies;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void update_rq_clock(struct rq *rq);
|
extern void update_rq_clock(struct rq *rq);
|
||||||
|
|
||||||
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
|
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
|
||||||
|
@@ -748,12 +748,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
|||||||
delta = KTIME_MAX;
|
delta = KTIME_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ_FULL
|
|
||||||
/* Limit the tick delta to the maximum scheduler deferment */
|
|
||||||
if (!ts->inidle)
|
|
||||||
delta = min(delta, scheduler_tick_max_deferment());
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Calculate the next expiry time */
|
/* Calculate the next expiry time */
|
||||||
if (delta < (KTIME_MAX - basemono))
|
if (delta < (KTIME_MAX - basemono))
|
||||||
expires = basemono + delta;
|
expires = basemono + delta;
|
||||||
|
Reference in New Issue
Block a user