2008-11-26 15:52:44 +09:00
/*
* The idle loop for all SuperH platforms .
*
2009-06-19 14:40:51 +09:00
* Copyright ( C ) 2002 - 2009 Paul Mundt
2008-11-26 15:52:44 +09:00
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/pm.h>
# include <linux/tick.h>
# include <linux/preempt.h>
# include <linux/thread_info.h>
# include <linux/irqflags.h>
2009-06-19 14:40:51 +09:00
# include <linux/smp.h>
2011-04-01 19:34:59 -04:00
# include <linux/cpuidle.h>
2008-11-26 15:52:44 +09:00
# include <asm/pgalloc.h>
# include <asm/system.h>
2011-07-26 16:09:06 -07:00
# include <linux/atomic.h>
2010-04-26 19:08:55 +09:00
# include <asm/smp.h>
2008-11-26 15:52:44 +09:00
2011-08-08 16:30:11 +09:00
void ( * pm_idle ) ( void ) ;
2010-01-20 16:42:52 +09:00
static int hlt_counter ;
2008-11-26 15:52:44 +09:00
static int __init nohlt_setup ( char * __unused )
{
hlt_counter = 1 ;
return 1 ;
}
__setup ( " nohlt " , nohlt_setup ) ;
static int __init hlt_setup ( char * __unused )
{
hlt_counter = 0 ;
return 1 ;
}
__setup ( " hlt " , hlt_setup ) ;
2009-10-16 17:20:58 +09:00
static inline int hlt_works ( void )
{
return ! hlt_counter ;
}
/*
* On SMP it ' s slightly faster ( but much more power - consuming ! )
* to poll the - > work . need_resched flag instead of waiting for the
* cross - CPU IPI to arrive . Use this option with caution .
*/
static void poll_idle ( void )
{
local_irq_enable ( ) ;
while ( ! need_resched ( ) )
cpu_relax ( ) ;
}
2009-04-02 13:08:31 +09:00
void default_idle ( void )
2008-11-26 15:52:44 +09:00
{
2009-10-16 17:20:58 +09:00
if ( hlt_works ( ) ) {
2008-11-26 15:52:44 +09:00
clear_thread_flag ( TIF_POLLING_NRFLAG ) ;
smp_mb__after_clear_bit ( ) ;
2009-12-18 14:40:56 +09:00
set_bl_bit ( ) ;
2009-10-16 17:20:58 +09:00
if ( ! need_resched ( ) ) {
local_irq_enable ( ) ;
2008-11-26 15:52:44 +09:00
cpu_sleep ( ) ;
2009-10-16 17:55:59 +09:00
} else
local_irq_enable ( ) ;
2008-11-26 15:52:44 +09:00
set_thread_flag ( TIF_POLLING_NRFLAG ) ;
2009-12-18 14:40:56 +09:00
clear_bl_bit ( ) ;
2008-11-26 15:52:44 +09:00
} else
2009-10-16 17:20:58 +09:00
poll_idle ( ) ;
2008-11-26 15:52:44 +09:00
}
2009-10-16 17:20:58 +09:00
/*
* The idle thread . There ' s no useful work to be done , so just try to conserve
* power and have a low exit latency ( ie sit in a loop waiting for somebody to
* say that they ' d like to reschedule )
*/
2008-11-26 15:52:44 +09:00
void cpu_idle ( void )
{
2009-10-16 17:20:58 +09:00
unsigned int cpu = smp_processor_id ( ) ;
2008-11-26 15:52:44 +09:00
set_thread_flag ( TIF_POLLING_NRFLAG ) ;
/* endless idle loop with no priority at all */
while ( 1 ) {
2011-11-17 18:48:14 +01:00
tick_nohz_idle_enter ( ) ;
rcu_idle_enter ( ) ;
2008-11-26 15:52:44 +09:00
2010-04-26 19:08:55 +09:00
while ( ! need_resched ( ) ) {
2009-10-16 17:27:58 +09:00
check_pgt_cache ( ) ;
rmb ( ) ;
2010-04-26 19:08:55 +09:00
if ( cpu_is_offline ( cpu ) )
play_dead ( ) ;
2009-10-16 17:20:58 +09:00
local_irq_disable ( ) ;
/* Don't trace irqs off for idle */
stop_critical_timings ( ) ;
2011-08-04 09:24:31 -07:00
if ( cpuidle_idle_call ( ) )
2011-04-01 19:34:59 -04:00
pm_idle ( ) ;
2009-10-16 17:20:58 +09:00
/*
* Sanity check to ensure that pm_idle ( ) returns
* with IRQs enabled
*/
WARN_ON ( irqs_disabled ( ) ) ;
start_critical_timings ( ) ;
}
2008-11-26 15:52:44 +09:00
2011-11-17 18:48:14 +01:00
rcu_idle_exit ( ) ;
tick_nohz_idle_exit ( ) ;
2008-11-26 15:52:44 +09:00
preempt_enable_no_resched ( ) ;
schedule ( ) ;
preempt_disable ( ) ;
}
}
2009-06-19 14:40:51 +09:00
2010-03-23 17:06:47 +09:00
void __init select_idle_routine ( void )
2009-10-16 17:20:58 +09:00
{
/*
* If a platform has set its own idle routine , leave it alone .
*/
if ( pm_idle )
return ;
if ( hlt_works ( ) )
pm_idle = default_idle ;
else
pm_idle = poll_idle ;
}
2009-06-19 14:40:51 +09:00
static void do_nothing ( void * unused )
{
}
2010-01-20 16:42:52 +09:00
void stop_this_cpu ( void * unused )
{
local_irq_disable ( ) ;
2010-04-26 18:39:50 +09:00
set_cpu_online ( smp_processor_id ( ) , false ) ;
2010-01-20 16:42:52 +09:00
for ( ; ; )
cpu_sleep ( ) ;
}
2009-06-19 14:40:51 +09:00
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value . Required while changing pm_idle
* handler on SMP systems .
*
* Caller must have changed pm_idle to the new value before the call . Old
* pm_idle value will not be used by any CPU after the return of this function .
*/
void cpu_idle_wait ( void )
{
smp_mb ( ) ;
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function ( do_nothing , NULL , 1 ) ;
}
EXPORT_SYMBOL_GPL ( cpu_idle_wait ) ;