Merge branch 'pm-cpuidle'
* pm-cpuidle: cpuidle: Add a kerneldoc comment to cpuidle_use_deepest_state() cpuidle: fix improper return value on error intel_idle: Convert to hotplug state machine intel_idle: Remove superfluous SMP fuction call MAINTAINERS: Add Jacob Pan as a new intel_idle maintainer MAINTAINERS: Add bug tracking system location entries for cpuidle x86/intel_idle: Add Knights Mill CPUID x86/intel_idle: Add CPU model 0x4a (Atom Z34xx series) thermal/intel_powerclamp: stop sched tick in forced idle thermal/intel_powerclamp: Convert to CPU hotplug state thermal/intel_powerclamp: Convert the kthread to kthread worker API thermal/intel_powerclamp: Remove duplicated code that starts the kthread sched/idle: Add support for tasks that inject idle cpuidle: Allow enforcing deepest idle state selection cpuidle/powernv: staticise powernv_idle_driver cpuidle: dt: assign ->enter_freeze to same as ->enter callback function cpuidle: governors: Remove remaining old module code
This commit is contained in:
@ -1540,7 +1540,7 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
goto bad_fork_cleanup_count;
|
||||
|
||||
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
|
||||
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
|
||||
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
|
||||
p->flags |= PF_FORKNOEXEC;
|
||||
INIT_LIST_HEAD(&p->children);
|
||||
INIT_LIST_HEAD(&p->sibling);
|
||||
|
@ -5279,6 +5279,7 @@ void init_idle(struct task_struct *idle, int cpu)
|
||||
__sched_fork(0, idle);
|
||||
idle->state = TASK_RUNNING;
|
||||
idle->se.exec_start = sched_clock();
|
||||
idle->flags |= PF_IDLE;
|
||||
|
||||
kasan_unpoison_task_stack(idle);
|
||||
|
||||
|
@ -164,11 +164,14 @@ static void cpuidle_idle_call(void)
|
||||
* timekeeping to prevent timer interrupts from kicking us out of idle
|
||||
* until a proper wakeup interrupt happens.
|
||||
*/
|
||||
if (idle_should_freeze()) {
|
||||
entered_state = cpuidle_enter_freeze(drv, dev);
|
||||
if (entered_state > 0) {
|
||||
local_irq_enable();
|
||||
goto exit_idle;
|
||||
|
||||
if (idle_should_freeze() || dev->use_deepest_state) {
|
||||
if (idle_should_freeze()) {
|
||||
entered_state = cpuidle_enter_freeze(drv, dev);
|
||||
if (entered_state > 0) {
|
||||
local_irq_enable();
|
||||
goto exit_idle;
|
||||
}
|
||||
}
|
||||
|
||||
next_state = cpuidle_find_deepest_state(drv, dev);
|
||||
@ -202,76 +205,65 @@ exit_idle:
|
||||
*
|
||||
* Called with polling cleared.
|
||||
*/
|
||||
static void cpu_idle_loop(void)
|
||||
static void do_idle(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
/*
|
||||
* If the arch has a polling bit, we maintain an invariant:
|
||||
*
|
||||
* Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
|
||||
* rq->idle). This means that, if rq->idle has the polling bit set,
|
||||
* then setting need_resched is guaranteed to cause the CPU to
|
||||
* reschedule.
|
||||
*/
|
||||
|
||||
while (1) {
|
||||
/*
|
||||
* If the arch has a polling bit, we maintain an invariant:
|
||||
*
|
||||
* Our polling bit is clear if we're not scheduled (i.e. if
|
||||
* rq->curr != rq->idle). This means that, if rq->idle has
|
||||
* the polling bit set, then setting need_resched is
|
||||
* guaranteed to cause the cpu to reschedule.
|
||||
*/
|
||||
__current_set_polling();
|
||||
tick_nohz_idle_enter();
|
||||
|
||||
__current_set_polling();
|
||||
quiet_vmstat();
|
||||
tick_nohz_idle_enter();
|
||||
while (!need_resched()) {
|
||||
check_pgt_cache();
|
||||
rmb();
|
||||
|
||||
while (!need_resched()) {
|
||||
check_pgt_cache();
|
||||
rmb();
|
||||
|
||||
if (cpu_is_offline(cpu)) {
|
||||
cpuhp_report_idle_dead();
|
||||
arch_cpu_idle_dead();
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
arch_cpu_idle_enter();
|
||||
|
||||
/*
|
||||
* In poll mode we reenable interrupts and spin.
|
||||
*
|
||||
* Also if we detected in the wakeup from idle
|
||||
* path that the tick broadcast device expired
|
||||
* for us, we don't want to go deep idle as we
|
||||
* know that the IPI is going to arrive right
|
||||
* away
|
||||
*/
|
||||
if (cpu_idle_force_poll || tick_check_broadcast_expired())
|
||||
cpu_idle_poll();
|
||||
else
|
||||
cpuidle_idle_call();
|
||||
|
||||
arch_cpu_idle_exit();
|
||||
if (cpu_is_offline(smp_processor_id())) {
|
||||
cpuhp_report_idle_dead();
|
||||
arch_cpu_idle_dead();
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we fell out of the loop above, we know
|
||||
* TIF_NEED_RESCHED must be set, propagate it into
|
||||
* PREEMPT_NEED_RESCHED.
|
||||
*
|
||||
* This is required because for polling idle loops we will
|
||||
* not have had an IPI to fold the state for us.
|
||||
*/
|
||||
preempt_set_need_resched();
|
||||
tick_nohz_idle_exit();
|
||||
__current_clr_polling();
|
||||
local_irq_disable();
|
||||
arch_cpu_idle_enter();
|
||||
|
||||
/*
|
||||
* We promise to call sched_ttwu_pending and reschedule
|
||||
* if need_resched is set while polling is set. That
|
||||
* means that clearing polling needs to be visible
|
||||
* before doing these things.
|
||||
* In poll mode we reenable interrupts and spin. Also if we
|
||||
* detected in the wakeup from idle path that the tick
|
||||
* broadcast device expired for us, we don't want to go deep
|
||||
* idle as we know that the IPI is going to arrive right away.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
|
||||
sched_ttwu_pending();
|
||||
schedule_preempt_disabled();
|
||||
if (cpu_idle_force_poll || tick_check_broadcast_expired())
|
||||
cpu_idle_poll();
|
||||
else
|
||||
cpuidle_idle_call();
|
||||
arch_cpu_idle_exit();
|
||||
}
|
||||
|
||||
/*
|
||||
* Since we fell out of the loop above, we know TIF_NEED_RESCHED must
|
||||
* be set, propagate it into PREEMPT_NEED_RESCHED.
|
||||
*
|
||||
* This is required because for polling idle loops we will not have had
|
||||
* an IPI to fold the state for us.
|
||||
*/
|
||||
preempt_set_need_resched();
|
||||
tick_nohz_idle_exit();
|
||||
__current_clr_polling();
|
||||
|
||||
/*
|
||||
* We promise to call sched_ttwu_pending() and reschedule if
|
||||
* need_resched() is set while polling is set. That means that clearing
|
||||
* polling needs to be visible before doing these things.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
|
||||
sched_ttwu_pending();
|
||||
schedule_preempt_disabled();
|
||||
}
|
||||
|
||||
bool cpu_in_idle(unsigned long pc)
|
||||
@ -280,6 +272,56 @@ bool cpu_in_idle(unsigned long pc)
|
||||
pc < (unsigned long)__cpuidle_text_end;
|
||||
}
|
||||
|
||||
struct idle_timer {
|
||||
struct hrtimer timer;
|
||||
int done;
|
||||
};
|
||||
|
||||
static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
|
||||
{
|
||||
struct idle_timer *it = container_of(timer, struct idle_timer, timer);
|
||||
|
||||
WRITE_ONCE(it->done, 1);
|
||||
set_tsk_need_resched(current);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
void play_idle(unsigned long duration_ms)
|
||||
{
|
||||
struct idle_timer it;
|
||||
|
||||
/*
|
||||
* Only FIFO tasks can disable the tick since they don't need the forced
|
||||
* preemption.
|
||||
*/
|
||||
WARN_ON_ONCE(current->policy != SCHED_FIFO);
|
||||
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
|
||||
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
|
||||
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
|
||||
WARN_ON_ONCE(!duration_ms);
|
||||
|
||||
rcu_sleep_check();
|
||||
preempt_disable();
|
||||
current->flags |= PF_IDLE;
|
||||
cpuidle_use_deepest_state(true);
|
||||
|
||||
it.done = 0;
|
||||
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
it.timer.function = idle_inject_timer_fn;
|
||||
hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
|
||||
|
||||
while (!READ_ONCE(it.done))
|
||||
do_idle();
|
||||
|
||||
cpuidle_use_deepest_state(false);
|
||||
current->flags &= ~PF_IDLE;
|
||||
|
||||
preempt_fold_need_resched();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(play_idle);
|
||||
|
||||
void cpu_startup_entry(enum cpuhp_state state)
|
||||
{
|
||||
/*
|
||||
@ -299,5 +341,6 @@ void cpu_startup_entry(enum cpuhp_state state)
|
||||
#endif
|
||||
arch_cpu_idle_prepare();
|
||||
cpuhp_online_idle(state);
|
||||
cpu_idle_loop();
|
||||
while (1)
|
||||
do_idle();
|
||||
}
|
||||
|
Reference in New Issue
Block a user