2014-01-27 08:42:01 +04:00
/*
* Generic entry point for the idle threads
*/
# include <linux/sched.h>
# include <linux/cpu.h>
# include <linux/cpuidle.h>
# include <linux/tick.h>
# include <linux/mm.h>
# include <linux/stackprotector.h>
2015-02-13 01:33:15 +03:00
# include <linux/suspend.h>
2014-01-27 08:42:01 +04:00
# include <asm/tlb.h>
# include <trace/events/power.h>
2014-06-04 21:31:18 +04:00
# include "sched.h"
2015-05-10 02:18:03 +03:00
/**
* sched_idle_set_state - Record idle state for the current CPU .
* @ idle_state : State to record .
*/
void sched_idle_set_state ( struct cpuidle_state * idle_state )
{
idle_set_state ( this_rq ( ) , idle_state ) ;
}
2014-01-27 08:42:01 +04:00
static int __read_mostly cpu_idle_force_poll ;
void cpu_idle_poll_ctrl ( bool enable )
{
if ( enable ) {
cpu_idle_force_poll + + ;
} else {
cpu_idle_force_poll - - ;
WARN_ON_ONCE ( cpu_idle_force_poll < 0 ) ;
}
}
# ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
static int __init cpu_idle_poll_setup ( char * __unused )
{
cpu_idle_force_poll = 1 ;
return 1 ;
}
__setup ( " nohlt " , cpu_idle_poll_setup ) ;
static int __init cpu_idle_nopoll_setup ( char * __unused )
{
cpu_idle_force_poll = 0 ;
return 1 ;
}
__setup ( " hlt " , cpu_idle_nopoll_setup ) ;
# endif
static inline int cpu_idle_poll ( void )
{
rcu_idle_enter ( ) ;
trace_cpu_idle_rcuidle ( 0 , smp_processor_id ( ) ) ;
local_irq_enable ( ) ;
2015-10-08 21:36:06 +03:00
stop_critical_timings ( ) ;
2015-01-21 13:57:25 +03:00
while ( ! tif_need_resched ( ) & &
( cpu_idle_force_poll | | tick_check_broadcast_expired ( ) ) )
2014-01-27 08:42:01 +04:00
cpu_relax ( ) ;
2015-10-08 21:36:06 +03:00
start_critical_timings ( ) ;
2014-01-27 08:42:01 +04:00
trace_cpu_idle_rcuidle ( PWR_EVENT_EXIT , smp_processor_id ( ) ) ;
rcu_idle_exit ( ) ;
return 1 ;
}
/* Weak implementations for optional arch specific functions */
void __weak arch_cpu_idle_prepare ( void ) { }
void __weak arch_cpu_idle_enter ( void ) { }
void __weak arch_cpu_idle_exit ( void ) { }
void __weak arch_cpu_idle_dead ( void ) { }
void __weak arch_cpu_idle ( void )
{
cpu_idle_force_poll = 1 ;
local_irq_enable ( ) ;
}
2015-05-10 02:18:46 +03:00
/**
* default_idle_call - Default CPU idle routine .
*
* To use when the cpuidle framework cannot be used .
*/
void default_idle_call ( void )
2015-05-04 23:53:22 +03:00
{
2015-07-20 19:34:50 +03:00
if ( current_clr_polling_and_test ( ) ) {
2015-05-04 23:53:22 +03:00
local_irq_enable ( ) ;
2015-07-20 19:34:50 +03:00
} else {
stop_critical_timings ( ) ;
2015-05-04 23:53:22 +03:00
arch_cpu_idle ( ) ;
2015-07-20 19:34:50 +03:00
start_critical_timings ( ) ;
}
2015-05-04 23:53:22 +03:00
}
2015-05-04 23:53:35 +03:00
static int call_cpuidle ( struct cpuidle_driver * drv , struct cpuidle_device * dev ,
int next_state )
{
/*
* The idle task must be scheduled , it is pointless to go to idle , just
* update no idle residency and return .
*/
if ( current_clr_polling_and_test ( ) ) {
dev - > last_residency = 0 ;
local_irq_enable ( ) ;
return - EBUSY ;
}
/*
* Enter the idle state previously returned by the governor decision .
* This function will block until an interrupt occurs and will take
* care of re - enabling the local interrupts
*/
2015-05-10 02:18:46 +03:00
return cpuidle_enter ( drv , dev , next_state ) ;
2015-05-04 23:53:35 +03:00
}
2014-03-03 11:48:51 +04:00
/**
* cpuidle_idle_call - the main idle function
*
* NOTE : no locks or semaphores should be used here
2014-06-04 21:31:16 +04:00
*
* On archs that support TIF_POLLING_NRFLAG , is called with polling
* set , and it returns with polling set . If it ever stops polling , it
* must clear the polling bit .
2014-03-03 11:48:51 +04:00
*/
2014-04-21 03:26:58 +04:00
static void cpuidle_idle_call ( void )
2014-03-03 11:48:51 +04:00
{
struct cpuidle_device * dev = __this_cpu_read ( cpuidle_devices ) ;
struct cpuidle_driver * drv = cpuidle_get_cpu_driver ( dev ) ;
2014-04-11 15:55:48 +04:00
int next_state , entered_state ;
2014-03-03 11:48:51 +04:00
2014-03-03 11:48:54 +04:00
/*
* Check if the idle task must be rescheduled . If it is the
2014-04-11 15:47:16 +04:00
* case , exit the function after re - enabling the local irq .
2014-03-03 11:48:54 +04:00
*/
2014-04-11 15:47:16 +04:00
if ( need_resched ( ) ) {
2014-03-03 11:48:53 +04:00
local_irq_enable ( ) ;
2014-04-21 03:26:58 +04:00
return ;
2014-03-03 11:48:53 +04:00
}
2014-03-03 11:48:54 +04:00
/*
* Tell the RCU framework we are entering an idle section ,
* so no more rcu read side critical sections and one more
* step to the grace period
*/
2014-03-03 11:48:52 +04:00
rcu_idle_enter ( ) ;
2015-05-04 23:53:22 +03:00
if ( cpuidle_not_available ( drv , dev ) ) {
default_idle_call ( ) ;
goto exit_idle ;
}
2015-03-03 00:26:55 +03:00
2015-02-13 01:33:15 +03:00
/*
* Suspend - to - idle ( " freeze " ) is a system state in which all user space
* has been frozen , all I / O devices have been suspended and the only
* activity happens here and in iterrupts ( if any ) . In that case bypass
* the cpuidle governor and go stratight for the deepest idle state
* available . Possibly also suspend the local tick and the entire
* timekeeping to prevent timer interrupts from kicking us out of idle
* until a proper wakeup interrupt happens .
*/
if ( idle_should_freeze ( ) ) {
2015-03-03 00:26:55 +03:00
entered_state = cpuidle_enter_freeze ( drv , dev ) ;
2016-01-21 14:19:29 +03:00
if ( entered_state > 0 ) {
2015-03-03 00:26:55 +03:00
local_irq_enable ( ) ;
goto exit_idle ;
}
next_state = cpuidle_find_deepest_state ( drv , dev ) ;
2015-05-04 23:53:35 +03:00
call_cpuidle ( drv , dev , next_state ) ;
2015-03-03 00:26:55 +03:00
} else {
/*
* Ask the cpuidle framework to choose a convenient idle state .
*/
next_state = cpuidle_select ( drv , dev ) ;
2015-05-04 23:53:35 +03:00
entered_state = call_cpuidle ( drv , dev , next_state ) ;
/*
* Give the governor an opportunity to reflect on the outcome
*/
2015-03-03 00:26:55 +03:00
cpuidle_reflect ( dev , entered_state ) ;
2015-05-04 23:53:35 +03:00
}
2014-04-11 15:55:48 +04:00
exit_idle :
2014-03-03 11:48:53 +04:00
__current_set_polling ( ) ;
2014-03-03 11:48:51 +04:00
2014-03-03 11:48:54 +04:00
/*
2014-04-11 15:55:48 +04:00
* It is up to the idle functions to reenable local interrupts
2014-03-03 11:48:54 +04:00
*/
2014-03-03 11:48:52 +04:00
if ( WARN_ON_ONCE ( irqs_disabled ( ) ) )
local_irq_enable ( ) ;
rcu_idle_exit ( ) ;
2014-03-03 11:48:51 +04:00
}
2015-01-29 01:09:43 +03:00
DEFINE_PER_CPU ( bool , cpu_dead_idle ) ;
2014-01-27 08:42:01 +04:00
/*
* Generic idle loop implementation
2014-06-04 21:31:16 +04:00
*
* Called with polling cleared .
2014-01-27 08:42:01 +04:00
*/
static void cpu_idle_loop ( void )
{
while ( 1 ) {
2014-06-04 21:31:16 +04:00
/*
* If the arch has a polling bit , we maintain an invariant :
*
* Our polling bit is clear if we ' re not scheduled ( i . e . if
* rq - > curr ! = rq - > idle ) . This means that , if rq - > idle has
* the polling bit set , then setting need_resched is
* guaranteed to cause the cpu to reschedule .
*/
__current_set_polling ( ) ;
2016-01-15 02:21:40 +03:00
quiet_vmstat ( ) ;
2014-01-27 08:42:01 +04:00
tick_nohz_idle_enter ( ) ;
while ( ! need_resched ( ) ) {
check_pgt_cache ( ) ;
rmb ( ) ;
2015-01-29 01:09:43 +03:00
if ( cpu_is_offline ( smp_processor_id ( ) ) ) {
2015-01-29 01:42:09 +03:00
rcu_cpu_notify ( NULL , CPU_DYING_IDLE ,
( void * ) ( long ) smp_processor_id ( ) ) ;
2015-01-29 01:09:43 +03:00
smp_mb ( ) ; /* all activity before dead. */
this_cpu_write ( cpu_dead_idle , true ) ;
2014-01-27 08:42:01 +04:00
arch_cpu_idle_dead ( ) ;
2015-01-29 01:09:43 +03:00
}
2014-01-27 08:42:01 +04:00
local_irq_disable ( ) ;
arch_cpu_idle_enter ( ) ;
/*
* In poll mode we reenable interrupts and spin .
*
* Also if we detected in the wakeup from idle
* path that the tick broadcast device expired
* for us , we don ' t want to go deep idle as we
* know that the IPI is going to arrive right
* away
*/
2014-03-03 11:48:53 +04:00
if ( cpu_idle_force_poll | | tick_check_broadcast_expired ( ) )
2014-01-27 08:42:01 +04:00
cpu_idle_poll ( ) ;
2014-03-03 11:48:53 +04:00
else
cpuidle_idle_call ( ) ;
2014-01-27 08:42:01 +04:00
arch_cpu_idle_exit ( ) ;
}
2014-02-24 21:22:07 +04:00
/*
* Since we fell out of the loop above , we know
* TIF_NEED_RESCHED must be set , propagate it into
* PREEMPT_NEED_RESCHED .
*
* This is required because for polling idle loops we will
* not have had an IPI to fold the state for us .
*/
preempt_set_need_resched ( ) ;
2014-01-27 08:42:01 +04:00
tick_nohz_idle_exit ( ) ;
2014-06-04 21:31:16 +04:00
__current_clr_polling ( ) ;
/*
2014-06-04 21:31:18 +04:00
* We promise to call sched_ttwu_pending and reschedule
* if need_resched is set while polling is set . That
* means that clearing polling needs to be visible
* before doing these things .
2014-06-04 21:31:16 +04:00
*/
smp_mb__after_atomic ( ) ;
2014-06-04 21:31:18 +04:00
sched_ttwu_pending ( ) ;
2014-01-27 08:42:01 +04:00
schedule_preempt_disabled ( ) ;
}
}
void cpu_startup_entry ( enum cpuhp_state state )
{
/*
* This # ifdef needs to die , but it ' s too late in the cycle to
* make this generic ( arm and sh have never invoked the canary
* init for the non boot cpus ! ) . Will be fixed in 3.11
*/
# ifdef CONFIG_X86
/*
* If we ' re the non - boot CPU , nothing set the stack canary up
* for us . The boot CPU already has it initialized but no harm
* in doing it again . This is a good place for updating it , as
* we wont ever return from this function ( so the invalid
* canaries already on the stack wont ever trigger ) .
*/
boot_init_stack_canary ( ) ;
# endif
arch_cpu_idle_prepare ( ) ;
cpu_idle_loop ( ) ;
}