2005-04-17 02:20:36 +04:00
/* CPU control.
* ( C ) 2001 , 2002 , 2003 , 2004 Rusty Russell
*
* This code is licenced under the GPL .
*/
# include <linux/proc_fs.h>
# include <linux/smp.h>
# include <linux/init.h>
# include <linux/notifier.h>
# include <linux/sched.h>
# include <linux/unistd.h>
# include <linux/cpu.h>
# include <linux/module.h>
# include <linux/kthread.h>
# include <linux/stop_machine.h>
2006-06-26 11:24:32 +04:00
# include <linux/mutex.h>
2005-04-17 02:20:36 +04:00
/* This protects CPUs going up and down... */
2006-07-23 23:12:16 +04:00
static DEFINE_MUTEX ( cpu_add_remove_lock ) ;
static DEFINE_MUTEX ( cpu_bitmask_lock ) ;
2005-04-17 02:20:36 +04:00
2006-10-17 11:10:35 +04:00
static __cpuinitdata RAW_NOTIFIER_HEAD ( cpu_chain ) ;
2005-04-17 02:20:36 +04:00
2006-09-26 10:32:48 +04:00
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
*/
static int cpu_hotplug_disabled ;
2005-11-29 00:43:46 +03:00
# ifdef CONFIG_HOTPLUG_CPU
2005-11-09 08:34:24 +03:00
2006-07-23 23:12:16 +04:00
/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
static struct task_struct * recursive ;
static int recursive_depth ;
2005-11-09 08:34:24 +03:00
2005-11-29 00:43:46 +03:00
void lock_cpu_hotplug ( void )
{
2006-07-23 23:12:16 +04:00
struct task_struct * tsk = current ;
if ( tsk = = recursive ) {
static int warnings = 10 ;
if ( warnings ) {
printk ( KERN_ERR " Lukewarm IQ detected in hotplug locking \n " ) ;
WARN_ON ( 1 ) ;
warnings - - ;
}
recursive_depth + + ;
return ;
}
mutex_lock ( & cpu_bitmask_lock ) ;
recursive = tsk ;
2005-11-29 00:43:46 +03:00
}
EXPORT_SYMBOL_GPL ( lock_cpu_hotplug ) ;
2005-11-09 08:34:24 +03:00
2005-11-29 00:43:46 +03:00
void unlock_cpu_hotplug ( void )
{
2006-07-23 23:12:16 +04:00
WARN_ON ( recursive ! = current ) ;
if ( recursive_depth ) {
recursive_depth - - ;
return ;
2005-11-29 00:43:46 +03:00
}
2006-07-23 23:12:16 +04:00
recursive = NULL ;
2006-11-06 10:52:04 +03:00
mutex_unlock ( & cpu_bitmask_lock ) ;
2005-11-29 00:43:46 +03:00
}
EXPORT_SYMBOL_GPL ( unlock_cpu_hotplug ) ;
# endif /* CONFIG_HOTPLUG_CPU */
2005-11-09 08:34:24 +03:00
2005-04-17 02:20:36 +04:00
/* Need to know about CPUs going up/down? */
2006-06-27 13:54:08 +04:00
int __cpuinit register_cpu_notifier ( struct notifier_block * nb )
2005-04-17 02:20:36 +04:00
{
2006-10-17 11:10:35 +04:00
int ret ;
mutex_lock ( & cpu_add_remove_lock ) ;
ret = raw_notifier_chain_register ( & cpu_chain , nb ) ;
mutex_unlock ( & cpu_add_remove_lock ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2006-06-27 13:54:08 +04:00
# ifdef CONFIG_HOTPLUG_CPU
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( register_cpu_notifier ) ;
void unregister_cpu_notifier ( struct notifier_block * nb )
{
2006-10-17 11:10:35 +04:00
mutex_lock ( & cpu_add_remove_lock ) ;
raw_notifier_chain_unregister ( & cpu_chain , nb ) ;
mutex_unlock ( & cpu_add_remove_lock ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( unregister_cpu_notifier ) ;
static inline void check_for_tasks ( int cpu )
{
struct task_struct * p ;
write_lock_irq ( & tasklist_lock ) ;
for_each_process ( p ) {
if ( task_cpu ( p ) = = cpu & &
( ! cputime_eq ( p - > utime , cputime_zero ) | |
! cputime_eq ( p - > stime , cputime_zero ) ) )
printk ( KERN_WARNING " Task %s (pid = %d) is on cpu %d \
2007-05-09 13:34:04 +04:00
( state = % ld , flags = % x ) \ n " ,
2007-10-19 10:40:40 +04:00
p - > comm , task_pid_nr ( p ) , cpu ,
p - > state , p - > flags ) ;
2005-04-17 02:20:36 +04:00
}
write_unlock_irq ( & tasklist_lock ) ;
}
2007-05-24 13:23:10 +04:00
struct take_cpu_down_param {
unsigned long mod ;
void * hcpu ;
} ;
2005-04-17 02:20:36 +04:00
/* Take this CPU down. */
2007-05-24 13:23:10 +04:00
static int take_cpu_down ( void * _param )
2005-04-17 02:20:36 +04:00
{
2007-05-24 13:23:10 +04:00
struct take_cpu_down_param * param = _param ;
2005-04-17 02:20:36 +04:00
int err ;
2007-05-24 13:23:10 +04:00
raw_notifier_call_chain ( & cpu_chain , CPU_DYING | param - > mod ,
param - > hcpu ) ;
2005-04-17 02:20:36 +04:00
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable ( ) ;
if ( err < 0 )
2005-06-26 01:54:50 +04:00
return err ;
2005-04-17 02:20:36 +04:00
2005-06-26 01:54:50 +04:00
/* Force idle task to run as soon as we yield: it should
immediately notice cpu is offline and die quickly . */
sched_idle_next ( ) ;
return 0 ;
2005-04-17 02:20:36 +04:00
}
2006-09-26 10:32:48 +04:00
/* Requires cpu_add_remove_lock to be held */
2007-05-09 13:35:10 +04:00
static int _cpu_down ( unsigned int cpu , int tasks_frozen )
2005-04-17 02:20:36 +04:00
{
2007-05-09 13:34:04 +04:00
int err , nr_calls = 0 ;
2005-04-17 02:20:36 +04:00
struct task_struct * p ;
cpumask_t old_allowed , tmp ;
2007-05-09 13:34:04 +04:00
void * hcpu = ( void * ) ( long ) cpu ;
2007-05-09 13:35:10 +04:00
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0 ;
2007-05-24 13:23:10 +04:00
struct take_cpu_down_param tcd_param = {
. mod = mod ,
. hcpu = hcpu ,
} ;
2005-04-17 02:20:36 +04:00
2006-09-26 10:32:48 +04:00
if ( num_online_cpus ( ) = = 1 )
return - EBUSY ;
2005-04-17 02:20:36 +04:00
2006-09-26 10:32:48 +04:00
if ( ! cpu_online ( cpu ) )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
2007-05-09 13:34:04 +04:00
raw_notifier_call_chain ( & cpu_chain , CPU_LOCK_ACQUIRE , hcpu ) ;
2007-05-09 13:35:10 +04:00
err = __raw_notifier_call_chain ( & cpu_chain , CPU_DOWN_PREPARE | mod ,
2007-05-09 13:34:04 +04:00
hcpu , - 1 , & nr_calls ) ;
2005-04-17 02:20:36 +04:00
if ( err = = NOTIFY_BAD ) {
2007-10-18 14:05:12 +04:00
nr_calls - - ;
2007-05-09 13:35:10 +04:00
__raw_notifier_call_chain ( & cpu_chain , CPU_DOWN_FAILED | mod ,
hcpu , nr_calls , NULL ) ;
2005-04-17 02:20:36 +04:00
printk ( " %s: attempt to take down CPU %u failed \n " ,
__FUNCTION__ , cpu ) ;
2007-05-09 13:34:03 +04:00
err = - EINVAL ;
goto out_release ;
2005-04-17 02:20:36 +04:00
}
/* Ensure that we are not runnable on dying cpu */
old_allowed = current - > cpus_allowed ;
tmp = CPU_MASK_ALL ;
cpu_clear ( cpu , tmp ) ;
set_cpus_allowed ( current , tmp ) ;
2006-07-23 23:12:16 +04:00
mutex_lock ( & cpu_bitmask_lock ) ;
2007-05-24 13:23:10 +04:00
p = __stop_machine_run ( take_cpu_down , & tcd_param , cpu ) ;
2006-07-23 23:12:16 +04:00
mutex_unlock ( & cpu_bitmask_lock ) ;
2006-10-28 21:38:57 +04:00
if ( IS_ERR ( p ) | | cpu_online ( cpu ) ) {
2005-04-17 02:20:36 +04:00
/* CPU didn't die: tell everyone. Can't complain. */
2007-05-09 13:35:10 +04:00
if ( raw_notifier_call_chain ( & cpu_chain , CPU_DOWN_FAILED | mod ,
2007-05-09 13:34:04 +04:00
hcpu ) = = NOTIFY_BAD )
2005-04-17 02:20:36 +04:00
BUG ( ) ;
2006-10-28 21:38:57 +04:00
if ( IS_ERR ( p ) ) {
err = PTR_ERR ( p ) ;
goto out_allowed ;
}
2005-04-17 02:20:36 +04:00
goto out_thread ;
2006-10-28 21:38:57 +04:00
}
2005-04-17 02:20:36 +04:00
/* Wait for it to sleep (leaving idle task). */
while ( ! idle_cpu ( cpu ) )
yield ( ) ;
/* This actually kills the CPU. */
__cpu_die ( cpu ) ;
/* CPU is completely dead: tell everyone. Too late to complain. */
2007-05-09 13:35:10 +04:00
if ( raw_notifier_call_chain ( & cpu_chain , CPU_DEAD | mod ,
hcpu ) = = NOTIFY_BAD )
2005-04-17 02:20:36 +04:00
BUG ( ) ;
check_for_tasks ( cpu ) ;
out_thread :
err = kthread_stop ( p ) ;
out_allowed :
set_cpus_allowed ( current , old_allowed ) ;
2007-05-09 13:34:03 +04:00
out_release :
2007-05-09 13:35:10 +04:00
raw_notifier_call_chain ( & cpu_chain , CPU_LOCK_RELEASE , hcpu ) ;
2006-09-26 10:32:48 +04:00
return err ;
}
int cpu_down ( unsigned int cpu )
{
int err = 0 ;
mutex_lock ( & cpu_add_remove_lock ) ;
if ( cpu_hotplug_disabled )
err = - EBUSY ;
else
2007-05-09 13:35:10 +04:00
err = _cpu_down ( cpu , 0 ) ;
2006-09-26 10:32:48 +04:00
2006-07-23 23:12:16 +04:00
mutex_unlock ( & cpu_add_remove_lock ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
# endif /*CONFIG_HOTPLUG_CPU*/
2006-09-26 10:32:48 +04:00
/* Requires cpu_add_remove_lock to be held */
2007-05-09 13:35:10 +04:00
static int __cpuinit _cpu_up ( unsigned int cpu , int tasks_frozen )
2005-04-17 02:20:36 +04:00
{
2007-05-09 13:34:03 +04:00
int ret , nr_calls = 0 ;
2005-04-17 02:20:36 +04:00
void * hcpu = ( void * ) ( long ) cpu ;
2007-05-09 13:35:10 +04:00
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0 ;
2005-04-17 02:20:36 +04:00
2006-09-26 10:32:48 +04:00
if ( cpu_online ( cpu ) | | ! cpu_present ( cpu ) )
return - EINVAL ;
2005-11-09 08:34:24 +03:00
2007-05-09 13:34:03 +04:00
raw_notifier_call_chain ( & cpu_chain , CPU_LOCK_ACQUIRE , hcpu ) ;
2007-05-09 13:35:10 +04:00
ret = __raw_notifier_call_chain ( & cpu_chain , CPU_UP_PREPARE | mod , hcpu ,
2007-05-09 13:34:03 +04:00
- 1 , & nr_calls ) ;
2005-04-17 02:20:36 +04:00
if ( ret = = NOTIFY_BAD ) {
2007-10-18 14:05:12 +04:00
nr_calls - - ;
2005-04-17 02:20:36 +04:00
printk ( " %s: attempt to bring up CPU %u failed \n " ,
__FUNCTION__ , cpu ) ;
ret = - EINVAL ;
goto out_notify ;
}
/* Arch-specific enabling code. */
2006-07-23 23:12:16 +04:00
mutex_lock ( & cpu_bitmask_lock ) ;
2005-04-17 02:20:36 +04:00
ret = __cpu_up ( cpu ) ;
2006-07-23 23:12:16 +04:00
mutex_unlock ( & cpu_bitmask_lock ) ;
2005-04-17 02:20:36 +04:00
if ( ret ! = 0 )
goto out_notify ;
2006-03-24 20:45:21 +03:00
BUG_ON ( ! cpu_online ( cpu ) ) ;
2005-04-17 02:20:36 +04:00
/* Now call notifier in preparation. */
2007-05-09 13:35:10 +04:00
raw_notifier_call_chain ( & cpu_chain , CPU_ONLINE | mod , hcpu ) ;
2005-04-17 02:20:36 +04:00
out_notify :
if ( ret ! = 0 )
2007-05-09 13:34:03 +04:00
__raw_notifier_call_chain ( & cpu_chain ,
2007-05-09 13:35:10 +04:00
CPU_UP_CANCELED | mod , hcpu , nr_calls , NULL ) ;
2007-05-09 13:34:03 +04:00
raw_notifier_call_chain ( & cpu_chain , CPU_LOCK_RELEASE , hcpu ) ;
2006-09-26 10:32:48 +04:00
return ret ;
}
2007-01-11 10:15:34 +03:00
int __cpuinit cpu_up ( unsigned int cpu )
2006-09-26 10:32:48 +04:00
{
int err = 0 ;
mutex_lock ( & cpu_add_remove_lock ) ;
if ( cpu_hotplug_disabled )
err = - EBUSY ;
else
2007-05-09 13:35:10 +04:00
err = _cpu_up ( cpu , 0 ) ;
2006-09-26 10:32:48 +04:00
mutex_unlock ( & cpu_add_remove_lock ) ;
return err ;
}
2007-08-31 10:56:29 +04:00
# ifdef CONFIG_PM_SLEEP_SMP
2006-09-26 10:32:48 +04:00
static cpumask_t frozen_cpus ;
int disable_nonboot_cpus ( void )
{
2006-12-23 18:55:29 +03:00
int cpu , first_cpu , error = 0 ;
2006-09-26 10:32:48 +04:00
mutex_lock ( & cpu_add_remove_lock ) ;
2007-04-02 10:49:49 +04:00
first_cpu = first_cpu ( cpu_online_map ) ;
2006-09-26 10:32:48 +04:00
/* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpus_clear ( frozen_cpus ) ;
printk ( " Disabling non-boot CPUs ... \n " ) ;
for_each_online_cpu ( cpu ) {
if ( cpu = = first_cpu )
continue ;
2007-05-09 13:35:10 +04:00
error = _cpu_down ( cpu , 1 ) ;
2006-09-26 10:32:48 +04:00
if ( ! error ) {
cpu_set ( cpu , frozen_cpus ) ;
printk ( " CPU%d is down \n " , cpu ) ;
} else {
printk ( KERN_ERR " Error taking CPU%d down: %d \n " ,
cpu , error ) ;
break ;
}
}
if ( ! error ) {
BUG_ON ( num_online_cpus ( ) > 1 ) ;
/* Make sure the CPUs won't be enabled by someone else */
cpu_hotplug_disabled = 1 ;
} else {
2006-12-23 18:55:29 +03:00
printk ( KERN_ERR " Non-boot CPUs are not disabled \n " ) ;
2006-09-26 10:32:48 +04:00
}
2006-07-23 23:12:16 +04:00
mutex_unlock ( & cpu_add_remove_lock ) ;
2006-09-26 10:32:48 +04:00
return error ;
}
void enable_nonboot_cpus ( void )
{
int cpu , error ;
/* Allow everyone to use the CPU hotplug again */
mutex_lock ( & cpu_add_remove_lock ) ;
cpu_hotplug_disabled = 0 ;
2007-02-10 12:43:32 +03:00
if ( cpus_empty ( frozen_cpus ) )
2007-04-02 10:49:49 +04:00
goto out ;
2006-09-26 10:32:48 +04:00
printk ( " Enabling non-boot CPUs ... \n " ) ;
for_each_cpu_mask ( cpu , frozen_cpus ) {
2007-05-09 13:35:10 +04:00
error = _cpu_up ( cpu , 1 ) ;
2006-09-26 10:32:48 +04:00
if ( ! error ) {
printk ( " CPU%d is up \n " , cpu ) ;
continue ;
}
2007-04-02 10:49:49 +04:00
printk ( KERN_WARNING " Error taking CPU%d up: %d \n " , cpu , error ) ;
2006-09-26 10:32:48 +04:00
}
cpus_clear ( frozen_cpus ) ;
2007-04-02 10:49:49 +04:00
out :
mutex_unlock ( & cpu_add_remove_lock ) ;
2005-04-17 02:20:36 +04:00
}
2007-08-31 10:56:29 +04:00
# endif /* CONFIG_PM_SLEEP_SMP */