[ARM] idle: clean up pm_idle calling, obey hlt_counter
pm_idle is used by infrastructure (eg, cpuidle) which expects architectures to call it in a certain way. Arrange for ARM to follow x86's lead on this and call pm_idle() with interrupts already disabled. However, we expect pm_idle() to enable interrupts before it returns. Also, OMAP wants to be able to disable hlt-ing, so allow hlt_counter to prevent all calls to pm_idle. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
915166d96f
commit
9ccdac3662
@ -114,9 +114,6 @@ void arm_machine_restart(char mode, const char *cmd)
|
||||
/*
|
||||
* Function pointers to optional machine specific functions
|
||||
*/
|
||||
void (*pm_idle)(void);
|
||||
EXPORT_SYMBOL(pm_idle);
|
||||
|
||||
void (*pm_power_off)(void);
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
@ -130,20 +127,19 @@ EXPORT_SYMBOL_GPL(arm_pm_restart);
|
||||
*/
|
||||
static void default_idle(void)
|
||||
{
|
||||
if (hlt_counter)
|
||||
cpu_relax();
|
||||
else {
|
||||
local_irq_disable();
|
||||
if (!need_resched())
|
||||
arch_idle();
|
||||
local_irq_enable();
|
||||
}
|
||||
if (!need_resched())
|
||||
arch_idle();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
void (*pm_idle)(void) = default_idle;
|
||||
EXPORT_SYMBOL(pm_idle);
|
||||
|
||||
/*
|
||||
* The idle thread. We try to conserve power, while trying to keep
|
||||
* overall latency low. The architecture specific idle is passed
|
||||
* a value to indicate the level of "idleness" of the system.
|
||||
* The idle thread, has rather strange semantics for calling pm_idle,
|
||||
* but this is what x86 does and we need to do the same, so that
|
||||
* things like cpuidle get called in the same way. The only difference
|
||||
* is that we always respect 'hlt_counter' to prevent low power idle.
|
||||
*/
|
||||
void cpu_idle(void)
|
||||
{
|
||||
@ -151,21 +147,31 @@ void cpu_idle(void)
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
void (*idle)(void) = pm_idle;
|
||||
|
||||
tick_nohz_stop_sched_tick(1);
|
||||
leds_event(led_idle_start);
|
||||
while (!need_resched()) {
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (cpu_is_offline(smp_processor_id())) {
|
||||
leds_event(led_idle_start);
|
||||
cpu_die();
|
||||
}
|
||||
if (cpu_is_offline(smp_processor_id()))
|
||||
cpu_die();
|
||||
#endif
|
||||
|
||||
if (!idle)
|
||||
idle = default_idle;
|
||||
leds_event(led_idle_start);
|
||||
tick_nohz_stop_sched_tick(1);
|
||||
while (!need_resched())
|
||||
idle();
|
||||
local_irq_disable();
|
||||
if (hlt_counter) {
|
||||
local_irq_enable();
|
||||
cpu_relax();
|
||||
} else {
|
||||
stop_critical_timings();
|
||||
pm_idle();
|
||||
start_critical_timings();
|
||||
/*
|
||||
* This will eventually be removed - pm_idle
|
||||
* functions should always return with IRQs
|
||||
* enabled.
|
||||
*/
|
||||
WARN_ON(irqs_disabled());
|
||||
local_irq_enable();
|
||||
}
|
||||
}
|
||||
leds_event(led_idle_end);
|
||||
tick_nohz_restart_sched_tick();
|
||||
preempt_enable_no_resched();
|
||||
|
Loading…
Reference in New Issue
Block a user