2005-04-17 02:20:36 +04:00
/* CPU control.
* ( C ) 2001 , 2002 , 2003 , 2004 Rusty Russell
*
* This code is licenced under the GPL .
*/
# include <linux/proc_fs.h>
# include <linux/smp.h>
# include <linux/init.h>
# include <linux/notifier.h>
# include <linux/sched.h>
# include <linux/unistd.h>
# include <linux/cpu.h>
# include <linux/module.h>
# include <linux/kthread.h>
# include <linux/stop_machine.h>
2006-06-26 11:24:32 +04:00
# include <linux/mutex.h>
2005-04-17 02:20:36 +04:00
2008-05-29 22:17:02 +04:00
/*
* Represents all cpu ' s present in the system
* In systems capable of hotplug , this map could dynamically grow
* as new cpu ' s are detected in the system via any platform specific
* method , such as ACPI for e . g .
*/
cpumask_t cpu_present_map __read_mostly ;
EXPORT_SYMBOL ( cpu_present_map ) ;
# ifndef CONFIG_SMP
/*
* Represents all cpu ' s that are currently online .
*/
cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL ;
EXPORT_SYMBOL ( cpu_online_map ) ;
cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL ;
EXPORT_SYMBOL ( cpu_possible_map ) ;
# else /* CONFIG_SMP */
2008-01-25 23:08:01 +03:00
/* Serializes the updates to cpu_online_map, cpu_present_map */
2006-07-23 23:12:16 +04:00
static DEFINE_MUTEX ( cpu_add_remove_lock ) ;
2005-04-17 02:20:36 +04:00
2006-10-17 11:10:35 +04:00
static __cpuinitdata RAW_NOTIFIER_HEAD ( cpu_chain ) ;
2005-04-17 02:20:36 +04:00
2006-09-26 10:32:48 +04:00
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
*/
static int cpu_hotplug_disabled ;
2008-01-25 23:08:01 +03:00
static struct {
struct task_struct * active_writer ;
struct mutex lock ; /* Synchronizes accesses to refcount, */
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation .
*/
int refcount ;
} cpu_hotplug ;
2005-11-09 08:34:24 +03:00
2008-01-25 23:08:01 +03:00
void __init cpu_hotplug_init ( void )
{
cpu_hotplug . active_writer = NULL ;
mutex_init ( & cpu_hotplug . lock ) ;
cpu_hotplug . refcount = 0 ;
}
2008-07-15 15:43:49 +04:00
cpumask_t cpu_active_map ;
2008-01-25 23:08:01 +03:00
# ifdef CONFIG_HOTPLUG_CPU
2005-11-09 08:34:24 +03:00
2008-01-25 23:08:02 +03:00
void get_online_cpus ( void )
2005-11-29 00:43:46 +03:00
{
2008-01-25 23:08:01 +03:00
might_sleep ( ) ;
if ( cpu_hotplug . active_writer = = current )
2006-07-23 23:12:16 +04:00
return ;
2008-01-25 23:08:01 +03:00
mutex_lock ( & cpu_hotplug . lock ) ;
cpu_hotplug . refcount + + ;
mutex_unlock ( & cpu_hotplug . lock ) ;
2005-11-29 00:43:46 +03:00
}
2008-01-25 23:08:02 +03:00
EXPORT_SYMBOL_GPL ( get_online_cpus ) ;
2005-11-09 08:34:24 +03:00
2008-01-25 23:08:02 +03:00
void put_online_cpus ( void )
2005-11-29 00:43:46 +03:00
{
2008-01-25 23:08:01 +03:00
if ( cpu_hotplug . active_writer = = current )
2006-07-23 23:12:16 +04:00
return ;
2008-01-25 23:08:01 +03:00
mutex_lock ( & cpu_hotplug . lock ) ;
2008-04-29 12:00:29 +04:00
if ( ! - - cpu_hotplug . refcount & & unlikely ( cpu_hotplug . active_writer ) )
wake_up_process ( cpu_hotplug . active_writer ) ;
2008-01-25 23:08:01 +03:00
mutex_unlock ( & cpu_hotplug . lock ) ;
2005-11-29 00:43:46 +03:00
}
2008-01-25 23:08:02 +03:00
EXPORT_SYMBOL_GPL ( put_online_cpus ) ;
2005-11-29 00:43:46 +03:00
# endif /* CONFIG_HOTPLUG_CPU */
2005-11-09 08:34:24 +03:00
2008-01-25 23:08:01 +03:00
/*
* The following two API ' s must be used when attempting
* to serialize the updates to cpu_online_map , cpu_present_map .
*/
void cpu_maps_update_begin ( void )
{
mutex_lock ( & cpu_add_remove_lock ) ;
}
void cpu_maps_update_done ( void )
{
mutex_unlock ( & cpu_add_remove_lock ) ;
}
/*
* This ensures that the hotplug operation can begin only when the
* refcount goes to zero .
*
* Note that during a cpu - hotplug operation , the new readers , if any ,
* will be blocked by the cpu_hotplug . lock
*
2008-04-29 12:00:29 +04:00
* Since cpu_hotplug_begin ( ) is always called after invoking
* cpu_maps_update_begin ( ) , we can be sure that only one writer is active .
2008-01-25 23:08:01 +03:00
*
* Note that theoretically , there is a possibility of a livelock :
* - Refcount goes to zero , last reader wakes up the sleeping
* writer .
* - Last reader unlocks the cpu_hotplug . lock .
* - A new reader arrives at this moment , bumps up the refcount .
* - The writer acquires the cpu_hotplug . lock finds the refcount
* non zero and goes to sleep again .
*
* However , this is very difficult to achieve in practice since
2008-01-25 23:08:02 +03:00
* get_online_cpus ( ) not an api which is called all that often .
2008-01-25 23:08:01 +03:00
*
*/
static void cpu_hotplug_begin ( void )
{
cpu_hotplug . active_writer = current ;
2008-04-29 12:00:29 +04:00
for ( ; ; ) {
mutex_lock ( & cpu_hotplug . lock ) ;
if ( likely ( ! cpu_hotplug . refcount ) )
break ;
__set_current_state ( TASK_UNINTERRUPTIBLE ) ;
2008-01-25 23:08:01 +03:00
mutex_unlock ( & cpu_hotplug . lock ) ;
schedule ( ) ;
}
}
static void cpu_hotplug_done ( void )
{
cpu_hotplug . active_writer = NULL ;
mutex_unlock ( & cpu_hotplug . lock ) ;
}
2005-04-17 02:20:36 +04:00
/* Need to know about CPUs going up/down? */
2008-04-29 11:58:51 +04:00
int __ref register_cpu_notifier ( struct notifier_block * nb )
2005-04-17 02:20:36 +04:00
{
2006-10-17 11:10:35 +04:00
int ret ;
2008-01-25 23:08:01 +03:00
cpu_maps_update_begin ( ) ;
2006-10-17 11:10:35 +04:00
ret = raw_notifier_chain_register ( & cpu_chain , nb ) ;
2008-01-25 23:08:01 +03:00
cpu_maps_update_done ( ) ;
2006-10-17 11:10:35 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2006-06-27 13:54:08 +04:00
# ifdef CONFIG_HOTPLUG_CPU
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( register_cpu_notifier ) ;
2008-04-29 11:58:48 +04:00
void __ref unregister_cpu_notifier ( struct notifier_block * nb )
2005-04-17 02:20:36 +04:00
{
2008-01-25 23:08:01 +03:00
cpu_maps_update_begin ( ) ;
2006-10-17 11:10:35 +04:00
raw_notifier_chain_unregister ( & cpu_chain , nb ) ;
2008-01-25 23:08:01 +03:00
cpu_maps_update_done ( ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( unregister_cpu_notifier ) ;
static inline void check_for_tasks ( int cpu )
{
struct task_struct * p ;
write_lock_irq ( & tasklist_lock ) ;
for_each_process ( p ) {
if ( task_cpu ( p ) = = cpu & &
( ! cputime_eq ( p - > utime , cputime_zero ) | |
! cputime_eq ( p - > stime , cputime_zero ) ) )
printk ( KERN_WARNING " Task %s (pid = %d) is on cpu %d \
2007-05-09 13:34:04 +04:00
( state = % ld , flags = % x ) \ n " ,
2007-10-19 10:40:40 +04:00
p - > comm , task_pid_nr ( p ) , cpu ,
p - > state , p - > flags ) ;
2005-04-17 02:20:36 +04:00
}
write_unlock_irq ( & tasklist_lock ) ;
}
2007-05-24 13:23:10 +04:00
struct take_cpu_down_param {
unsigned long mod ;
void * hcpu ;
} ;
2005-04-17 02:20:36 +04:00
/* Take this CPU down. */
2008-04-29 11:58:50 +04:00
static int __ref take_cpu_down ( void * _param )
2005-04-17 02:20:36 +04:00
{
2007-05-24 13:23:10 +04:00
struct take_cpu_down_param * param = _param ;
2005-04-17 02:20:36 +04:00
int err ;
2007-05-24 13:23:10 +04:00
raw_notifier_call_chain ( & cpu_chain , CPU_DYING | param - > mod ,
param - > hcpu ) ;
2005-04-17 02:20:36 +04:00
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable ( ) ;
if ( err < 0 )
2005-06-26 01:54:50 +04:00
return err ;
2005-04-17 02:20:36 +04:00
2005-06-26 01:54:50 +04:00
/* Force idle task to run as soon as we yield: it should
immediately notice cpu is offline and die quickly . */
sched_idle_next ( ) ;
return 0 ;
2005-04-17 02:20:36 +04:00
}
2006-09-26 10:32:48 +04:00
/* Requires cpu_add_remove_lock to be held */
2008-04-29 11:58:50 +04:00
static int __ref _cpu_down ( unsigned int cpu , int tasks_frozen )
2005-04-17 02:20:36 +04:00
{
2007-05-09 13:34:04 +04:00
int err , nr_calls = 0 ;
2005-04-17 02:20:36 +04:00
struct task_struct * p ;
cpumask_t old_allowed , tmp ;
2007-05-09 13:34:04 +04:00
void * hcpu = ( void * ) ( long ) cpu ;
2007-05-09 13:35:10 +04:00
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0 ;
2007-05-24 13:23:10 +04:00
struct take_cpu_down_param tcd_param = {
. mod = mod ,
. hcpu = hcpu ,
} ;
2005-04-17 02:20:36 +04:00
2006-09-26 10:32:48 +04:00
if ( num_online_cpus ( ) = = 1 )
return - EBUSY ;
2005-04-17 02:20:36 +04:00
2006-09-26 10:32:48 +04:00
if ( ! cpu_online ( cpu ) )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
2008-01-25 23:08:01 +03:00
cpu_hotplug_begin ( ) ;
2007-05-09 13:35:10 +04:00
err = __raw_notifier_call_chain ( & cpu_chain , CPU_DOWN_PREPARE | mod ,
2007-05-09 13:34:04 +04:00
hcpu , - 1 , & nr_calls ) ;
2005-04-17 02:20:36 +04:00
if ( err = = NOTIFY_BAD ) {
2007-10-18 14:05:12 +04:00
nr_calls - - ;
2007-05-09 13:35:10 +04:00
__raw_notifier_call_chain ( & cpu_chain , CPU_DOWN_FAILED | mod ,
hcpu , nr_calls , NULL ) ;
2005-04-17 02:20:36 +04:00
printk ( " %s: attempt to take down CPU %u failed \n " ,
2008-04-30 11:55:08 +04:00
__func__ , cpu ) ;
2007-05-09 13:34:03 +04:00
err = - EINVAL ;
goto out_release ;
2005-04-17 02:20:36 +04:00
}
/* Ensure that we are not runnable on dying cpu */
old_allowed = current - > cpus_allowed ;
2008-04-05 05:11:06 +04:00
cpus_setall ( tmp ) ;
2005-04-17 02:20:36 +04:00
cpu_clear ( cpu , tmp ) ;
2008-04-05 05:11:06 +04:00
set_cpus_allowed_ptr ( current , & tmp ) ;
2005-04-17 02:20:36 +04:00
2007-05-24 13:23:10 +04:00
p = __stop_machine_run ( take_cpu_down , & tcd_param , cpu ) ;
2006-07-23 23:12:16 +04:00
2006-10-28 21:38:57 +04:00
if ( IS_ERR ( p ) | | cpu_online ( cpu ) ) {
2005-04-17 02:20:36 +04:00
/* CPU didn't die: tell everyone. Can't complain. */
2007-05-09 13:35:10 +04:00
if ( raw_notifier_call_chain ( & cpu_chain , CPU_DOWN_FAILED | mod ,
2007-05-09 13:34:04 +04:00
hcpu ) = = NOTIFY_BAD )
2005-04-17 02:20:36 +04:00
BUG ( ) ;
2006-10-28 21:38:57 +04:00
if ( IS_ERR ( p ) ) {
err = PTR_ERR ( p ) ;
goto out_allowed ;
}
2005-04-17 02:20:36 +04:00
goto out_thread ;
2006-10-28 21:38:57 +04:00
}
2005-04-17 02:20:36 +04:00
/* Wait for it to sleep (leaving idle task). */
while ( ! idle_cpu ( cpu ) )
yield ( ) ;
/* This actually kills the CPU. */
__cpu_die ( cpu ) ;
/* CPU is completely dead: tell everyone. Too late to complain. */
2007-05-09 13:35:10 +04:00
if ( raw_notifier_call_chain ( & cpu_chain , CPU_DEAD | mod ,
hcpu ) = = NOTIFY_BAD )
2005-04-17 02:20:36 +04:00
BUG ( ) ;
check_for_tasks ( cpu ) ;
out_thread :
err = kthread_stop ( p ) ;
out_allowed :
2008-04-05 05:11:06 +04:00
set_cpus_allowed_ptr ( current , & old_allowed ) ;
2007-05-09 13:34:03 +04:00
out_release :
2008-01-25 23:08:01 +03:00
cpu_hotplug_done ( ) ;
2008-07-25 12:47:50 +04:00
if ( ! err ) {
if ( raw_notifier_call_chain ( & cpu_chain , CPU_POST_DEAD | mod ,
hcpu ) = = NOTIFY_BAD )
BUG ( ) ;
}
2006-09-26 10:32:48 +04:00
return err ;
}
2008-04-29 11:58:50 +04:00
int __ref cpu_down ( unsigned int cpu )
2006-09-26 10:32:48 +04:00
{
int err = 0 ;
2008-01-25 23:08:01 +03:00
cpu_maps_update_begin ( ) ;
2008-07-15 15:43:49 +04:00
if ( cpu_hotplug_disabled ) {
2006-09-26 10:32:48 +04:00
err = - EBUSY ;
2008-07-15 15:43:49 +04:00
goto out ;
}
cpu_clear ( cpu , cpu_active_map ) ;
2008-07-16 07:56:26 +04:00
/*
* Make sure the all cpus did the reschedule and are not
* using stale version of the cpu_active_map .
* This is not strictly necessary becuase stop_machine ( )
* that we run down the line already provides the required
* synchronization . But it ' s really a side effect and we do not
* want to depend on the innards of the stop_machine here .
*/
synchronize_sched ( ) ;
2006-09-26 10:32:48 +04:00
2008-07-15 15:43:49 +04:00
err = _cpu_down ( cpu , 0 ) ;
2006-09-26 10:32:48 +04:00
2008-07-15 15:43:49 +04:00
if ( cpu_online ( cpu ) )
cpu_set ( cpu , cpu_active_map ) ;
out :
2008-01-25 23:08:01 +03:00
cpu_maps_update_done ( ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
2008-04-29 10:35:56 +04:00
EXPORT_SYMBOL ( cpu_down ) ;
2005-04-17 02:20:36 +04:00
# endif /*CONFIG_HOTPLUG_CPU*/
2006-09-26 10:32:48 +04:00
/* Requires cpu_add_remove_lock to be held */
2007-05-09 13:35:10 +04:00
static int __cpuinit _cpu_up ( unsigned int cpu , int tasks_frozen )
2005-04-17 02:20:36 +04:00
{
2007-05-09 13:34:03 +04:00
int ret , nr_calls = 0 ;
2005-04-17 02:20:36 +04:00
void * hcpu = ( void * ) ( long ) cpu ;
2007-05-09 13:35:10 +04:00
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0 ;
2005-04-17 02:20:36 +04:00
2006-09-26 10:32:48 +04:00
if ( cpu_online ( cpu ) | | ! cpu_present ( cpu ) )
return - EINVAL ;
2005-11-09 08:34:24 +03:00
2008-01-25 23:08:01 +03:00
cpu_hotplug_begin ( ) ;
2007-05-09 13:35:10 +04:00
ret = __raw_notifier_call_chain ( & cpu_chain , CPU_UP_PREPARE | mod , hcpu ,
2007-05-09 13:34:03 +04:00
- 1 , & nr_calls ) ;
2005-04-17 02:20:36 +04:00
if ( ret = = NOTIFY_BAD ) {
2007-10-18 14:05:12 +04:00
nr_calls - - ;
2005-04-17 02:20:36 +04:00
printk ( " %s: attempt to bring up CPU %u failed \n " ,
2008-04-30 11:55:08 +04:00
__func__ , cpu ) ;
2005-04-17 02:20:36 +04:00
ret = - EINVAL ;
goto out_notify ;
}
/* Arch-specific enabling code. */
ret = __cpu_up ( cpu ) ;
if ( ret ! = 0 )
goto out_notify ;
2006-03-24 20:45:21 +03:00
BUG_ON ( ! cpu_online ( cpu ) ) ;
2005-04-17 02:20:36 +04:00
/* Now call notifier in preparation. */
2007-05-09 13:35:10 +04:00
raw_notifier_call_chain ( & cpu_chain , CPU_ONLINE | mod , hcpu ) ;
2005-04-17 02:20:36 +04:00
out_notify :
if ( ret ! = 0 )
2007-05-09 13:34:03 +04:00
__raw_notifier_call_chain ( & cpu_chain ,
2007-05-09 13:35:10 +04:00
CPU_UP_CANCELED | mod , hcpu , nr_calls , NULL ) ;
2008-01-25 23:08:01 +03:00
cpu_hotplug_done ( ) ;
2006-09-26 10:32:48 +04:00
return ret ;
}
2007-01-11 10:15:34 +03:00
int __cpuinit cpu_up ( unsigned int cpu )
2006-09-26 10:32:48 +04:00
{
int err = 0 ;
2007-10-19 10:40:47 +04:00
if ( ! cpu_isset ( cpu , cpu_possible_map ) ) {
printk ( KERN_ERR " can't online cpu %d because it is not "
" configured as may-hotadd at boot time \n " , cpu ) ;
# if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
printk ( KERN_ERR " please check additional_cpus= boot "
" parameter \n " ) ;
# endif
return - EINVAL ;
}
2006-09-26 10:32:48 +04:00
2008-01-25 23:08:01 +03:00
cpu_maps_update_begin ( ) ;
2008-07-15 15:43:49 +04:00
if ( cpu_hotplug_disabled ) {
2006-09-26 10:32:48 +04:00
err = - EBUSY ;
2008-07-15 15:43:49 +04:00
goto out ;
}
err = _cpu_up ( cpu , 0 ) ;
if ( cpu_online ( cpu ) )
cpu_set ( cpu , cpu_active_map ) ;
2006-09-26 10:32:48 +04:00
2008-07-15 15:43:49 +04:00
out :
2008-01-25 23:08:01 +03:00
cpu_maps_update_done ( ) ;
2006-09-26 10:32:48 +04:00
return err ;
}
2007-08-31 10:56:29 +04:00
# ifdef CONFIG_PM_SLEEP_SMP
2006-09-26 10:32:48 +04:00
static cpumask_t frozen_cpus ;
int disable_nonboot_cpus ( void )
{
2006-12-23 18:55:29 +03:00
int cpu , first_cpu , error = 0 ;
2006-09-26 10:32:48 +04:00
2008-01-25 23:08:01 +03:00
cpu_maps_update_begin ( ) ;
2007-04-02 10:49:49 +04:00
first_cpu = first_cpu ( cpu_online_map ) ;
2006-09-26 10:32:48 +04:00
/* We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpus_clear ( frozen_cpus ) ;
printk ( " Disabling non-boot CPUs ... \n " ) ;
for_each_online_cpu ( cpu ) {
if ( cpu = = first_cpu )
continue ;
2007-05-09 13:35:10 +04:00
error = _cpu_down ( cpu , 1 ) ;
2006-09-26 10:32:48 +04:00
if ( ! error ) {
cpu_set ( cpu , frozen_cpus ) ;
printk ( " CPU%d is down \n " , cpu ) ;
} else {
printk ( KERN_ERR " Error taking CPU%d down: %d \n " ,
cpu , error ) ;
break ;
}
}
if ( ! error ) {
BUG_ON ( num_online_cpus ( ) > 1 ) ;
/* Make sure the CPUs won't be enabled by someone else */
cpu_hotplug_disabled = 1 ;
} else {
2006-12-23 18:55:29 +03:00
printk ( KERN_ERR " Non-boot CPUs are not disabled \n " ) ;
2006-09-26 10:32:48 +04:00
}
2008-01-25 23:08:01 +03:00
cpu_maps_update_done ( ) ;
2006-09-26 10:32:48 +04:00
return error ;
}
2008-02-08 15:21:55 +03:00
void __ref enable_nonboot_cpus ( void )
2006-09-26 10:32:48 +04:00
{
int cpu , error ;
/* Allow everyone to use the CPU hotplug again */
2008-01-25 23:08:01 +03:00
cpu_maps_update_begin ( ) ;
2006-09-26 10:32:48 +04:00
cpu_hotplug_disabled = 0 ;
2007-02-10 12:43:32 +03:00
if ( cpus_empty ( frozen_cpus ) )
2007-04-02 10:49:49 +04:00
goto out ;
2006-09-26 10:32:48 +04:00
printk ( " Enabling non-boot CPUs ... \n " ) ;
2008-05-12 23:21:13 +04:00
for_each_cpu_mask_nr ( cpu , frozen_cpus ) {
2007-05-09 13:35:10 +04:00
error = _cpu_up ( cpu , 1 ) ;
2006-09-26 10:32:48 +04:00
if ( ! error ) {
printk ( " CPU%d is up \n " , cpu ) ;
continue ;
}
2007-04-02 10:49:49 +04:00
printk ( KERN_WARNING " Error taking CPU%d up: %d \n " , cpu , error ) ;
2006-09-26 10:32:48 +04:00
}
cpus_clear ( frozen_cpus ) ;
2007-04-02 10:49:49 +04:00
out :
2008-01-25 23:08:01 +03:00
cpu_maps_update_done ( ) ;
2005-04-17 02:20:36 +04:00
}
2007-08-31 10:56:29 +04:00
# endif /* CONFIG_PM_SLEEP_SMP */
2008-05-29 22:17:02 +04:00
# endif /* CONFIG_SMP */