2014-01-26 23:42:01 -05:00
/*
* Generic entry point for the idle threads
*/
# include <linux/sched.h>
# include <linux/cpu.h>
# include <linux/cpuidle.h>
# include <linux/tick.h>
# include <linux/mm.h>
# include <linux/stackprotector.h>
# include <asm/tlb.h>
# include <trace/events/power.h>
static int __read_mostly cpu_idle_force_poll ;
void cpu_idle_poll_ctrl ( bool enable )
{
if ( enable ) {
cpu_idle_force_poll + + ;
} else {
cpu_idle_force_poll - - ;
WARN_ON_ONCE ( cpu_idle_force_poll < 0 ) ;
}
}
# ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
static int __init cpu_idle_poll_setup ( char * __unused )
{
cpu_idle_force_poll = 1 ;
return 1 ;
}
__setup ( " nohlt " , cpu_idle_poll_setup ) ;
static int __init cpu_idle_nopoll_setup ( char * __unused )
{
cpu_idle_force_poll = 0 ;
return 1 ;
}
__setup ( " hlt " , cpu_idle_nopoll_setup ) ;
# endif
static inline int cpu_idle_poll ( void )
{
rcu_idle_enter ( ) ;
trace_cpu_idle_rcuidle ( 0 , smp_processor_id ( ) ) ;
local_irq_enable ( ) ;
while ( ! tif_need_resched ( ) )
cpu_relax ( ) ;
trace_cpu_idle_rcuidle ( PWR_EVENT_EXIT , smp_processor_id ( ) ) ;
rcu_idle_exit ( ) ;
return 1 ;
}
/* Weak implementations for optional arch specific functions */
void __weak arch_cpu_idle_prepare ( void ) { }
void __weak arch_cpu_idle_enter ( void ) { }
void __weak arch_cpu_idle_exit ( void ) { }
void __weak arch_cpu_idle_dead ( void ) { }
void __weak arch_cpu_idle ( void )
{
cpu_idle_force_poll = 1 ;
local_irq_enable ( ) ;
}
2014-03-03 08:48:51 +01:00
/**
* cpuidle_idle_call - the main idle function
*
* NOTE : no locks or semaphores should be used here
* return non - zero on failure
*/
static int cpuidle_idle_call ( void )
{
struct cpuidle_device * dev = __this_cpu_read ( cpuidle_devices ) ;
struct cpuidle_driver * drv = cpuidle_get_cpu_driver ( dev ) ;
int next_state , entered_state , ret ;
bool broadcast ;
2014-03-03 08:48:54 +01:00
/*
* Check if the idle task must be rescheduled . If it is the
* case , exit the function after re - enabling the local irq and
* set again the polling flag
*/
2014-03-03 08:48:53 +01:00
if ( current_clr_polling_and_test ( ) ) {
local_irq_enable ( ) ;
__current_set_polling ( ) ;
return 0 ;
}
2014-03-03 08:48:54 +01:00
/*
* During the idle period , stop measuring the disabled irqs
* critical sections latencies
*/
2014-03-03 08:48:52 +01:00
stop_critical_timings ( ) ;
2014-03-03 08:48:54 +01:00
/*
* Tell the RCU framework we are entering an idle section ,
* so no more rcu read side critical sections and one more
* step to the grace period
*/
2014-03-03 08:48:52 +01:00
rcu_idle_enter ( ) ;
2014-03-03 08:48:54 +01:00
/*
* Check if the cpuidle framework is ready , otherwise fallback
* to the default arch specific idle method
*/
2014-03-03 08:48:51 +01:00
ret = cpuidle_enabled ( drv , dev ) ;
2014-03-03 08:48:53 +01:00
if ( ! ret ) {
2014-03-03 08:48:54 +01:00
/*
* Ask the governor to choose an idle state it thinks
* it is convenient to go to . There is * always * a
* convenient idle state
*/
2014-03-03 08:48:53 +01:00
next_state = cpuidle_select ( drv , dev ) ;
2014-03-03 08:48:51 +01:00
2014-03-03 08:48:54 +01:00
/*
* The idle task must be scheduled , it is pointless to
* go to idle , just update no idle residency and get
* out of this function
*/
2014-03-03 08:48:53 +01:00
if ( current_clr_polling_and_test ( ) ) {
dev - > last_residency = 0 ;
entered_state = next_state ;
local_irq_enable ( ) ;
} else {
broadcast = ! ! ( drv - > states [ next_state ] . flags &
CPUIDLE_FLAG_TIMER_STOP ) ;
2014-03-03 08:48:51 +01:00
2014-03-03 08:48:53 +01:00
if ( broadcast )
2014-03-03 08:48:54 +01:00
/*
* Tell the time framework to switch
* to a broadcast timer because our
* local timer will be shutdown . If a
* local timer is used from another
* cpu as a broadcast timer , this call
* may fail if it is not available
*/
2014-03-03 08:48:53 +01:00
ret = clockevents_notify (
CLOCK_EVT_NOTIFY_BROADCAST_ENTER ,
& dev - > cpu ) ;
2014-03-03 08:48:51 +01:00
2014-03-03 08:48:53 +01:00
if ( ! ret ) {
trace_cpu_idle_rcuidle ( next_state , dev - > cpu ) ;
2014-03-03 08:48:51 +01:00
2014-03-03 08:48:54 +01:00
/*
* Enter the idle state previously
* returned by the governor
* decision . This function will block
* until an interrupt occurs and will
* take care of re - enabling the local
* interrupts
*/
2014-03-03 08:48:53 +01:00
entered_state = cpuidle_enter ( drv , dev ,
next_state ) ;
2014-03-03 08:48:51 +01:00
2014-03-03 08:48:53 +01:00
trace_cpu_idle_rcuidle ( PWR_EVENT_EXIT ,
dev - > cpu ) ;
2014-03-03 08:48:51 +01:00
2014-03-03 08:48:53 +01:00
if ( broadcast )
clockevents_notify (
CLOCK_EVT_NOTIFY_BROADCAST_EXIT ,
& dev - > cpu ) ;
2014-03-03 08:48:51 +01:00
2014-03-03 08:48:54 +01:00
/*
* Give the governor an opportunity to reflect on the
* outcome
*/
2014-03-03 08:48:53 +01:00
cpuidle_reflect ( dev , entered_state ) ;
}
}
}
2014-03-03 08:48:54 +01:00
/*
* We can ' t use the cpuidle framework , let ' s use the default
* idle routine
*/
2014-03-03 08:48:53 +01:00
if ( ret )
arch_cpu_idle ( ) ;
__current_set_polling ( ) ;
2014-03-03 08:48:51 +01:00
2014-03-03 08:48:54 +01:00
/*
* It is up to the idle functions to enable back the local
* interrupt
*/
2014-03-03 08:48:52 +01:00
if ( WARN_ON_ONCE ( irqs_disabled ( ) ) )
local_irq_enable ( ) ;
rcu_idle_exit ( ) ;
start_critical_timings ( ) ;
2014-03-03 08:48:51 +01:00
return 0 ;
}
2014-01-26 23:42:01 -05:00
/*
* Generic idle loop implementation
*/
static void cpu_idle_loop ( void )
{
while ( 1 ) {
tick_nohz_idle_enter ( ) ;
while ( ! need_resched ( ) ) {
check_pgt_cache ( ) ;
rmb ( ) ;
if ( cpu_is_offline ( smp_processor_id ( ) ) )
arch_cpu_idle_dead ( ) ;
local_irq_disable ( ) ;
arch_cpu_idle_enter ( ) ;
/*
* In poll mode we reenable interrupts and spin .
*
* Also if we detected in the wakeup from idle
* path that the tick broadcast device expired
* for us , we don ' t want to go deep idle as we
* know that the IPI is going to arrive right
* away
*/
2014-03-03 08:48:53 +01:00
if ( cpu_idle_force_poll | | tick_check_broadcast_expired ( ) )
2014-01-26 23:42:01 -05:00
cpu_idle_poll ( ) ;
2014-03-03 08:48:53 +01:00
else
cpuidle_idle_call ( ) ;
2014-01-26 23:42:01 -05:00
arch_cpu_idle_exit ( ) ;
}
2014-02-24 18:22:07 +01:00
/*
* Since we fell out of the loop above , we know
* TIF_NEED_RESCHED must be set , propagate it into
* PREEMPT_NEED_RESCHED .
*
* This is required because for polling idle loops we will
* not have had an IPI to fold the state for us .
*/
preempt_set_need_resched ( ) ;
2014-01-26 23:42:01 -05:00
tick_nohz_idle_exit ( ) ;
schedule_preempt_disabled ( ) ;
}
}
void cpu_startup_entry ( enum cpuhp_state state )
{
/*
* This # ifdef needs to die , but it ' s too late in the cycle to
* make this generic ( arm and sh have never invoked the canary
* init for the non boot cpus ! ) . Will be fixed in 3.11
*/
# ifdef CONFIG_X86
/*
* If we ' re the non - boot CPU , nothing set the stack canary up
* for us . The boot CPU already has it initialized but no harm
* in doing it again . This is a good place for updating it , as
* we wont ever return from this function ( so the invalid
* canaries already on the stack wont ever trigger ) .
*/
boot_init_stack_canary ( ) ;
# endif
__current_set_polling ( ) ;
arch_cpu_idle_prepare ( ) ;
cpu_idle_loop ( ) ;
}