2012-04-20 17:05:44 +04:00
/*
* Common SMP CPU bringup / teardown functions
*/
2012-07-16 14:42:36 +04:00
# include <linux/cpu.h>
2012-04-20 17:05:45 +04:00
# include <linux/err.h>
# include <linux/smp.h>
2012-04-20 17:05:44 +04:00
# include <linux/init.h>
2012-07-16 14:42:36 +04:00
# include <linux/list.h>
# include <linux/slab.h>
2012-04-20 17:05:45 +04:00
# include <linux/sched.h>
2012-07-16 14:42:36 +04:00
# include <linux/export.h>
2012-04-20 17:05:45 +04:00
# include <linux/percpu.h>
2012-07-16 14:42:36 +04:00
# include <linux/kthread.h>
# include <linux/smpboot.h>
2012-04-20 17:05:44 +04:00
# include "smpboot.h"
2012-07-12 12:55:54 +04:00
# ifdef CONFIG_SMP
2012-04-20 17:05:45 +04:00
# ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
/*
* For the hotplug case we keep the task structs around and reuse
* them .
*/
static DEFINE_PER_CPU ( struct task_struct * , idle_threads ) ;
2012-04-21 04:08:50 +04:00
struct task_struct * __cpuinit idle_thread_get ( unsigned int cpu )
2012-04-20 17:05:45 +04:00
{
struct task_struct * tsk = per_cpu ( idle_threads , cpu ) ;
if ( ! tsk )
2012-04-21 04:08:50 +04:00
return ERR_PTR ( - ENOMEM ) ;
2012-04-20 17:05:45 +04:00
init_idle ( tsk , cpu ) ;
return tsk ;
}
2012-04-21 04:08:50 +04:00
void __init idle_thread_set_boot_cpu ( void )
2012-04-20 17:05:45 +04:00
{
2012-04-21 04:08:50 +04:00
per_cpu ( idle_threads , smp_processor_id ( ) ) = current ;
2012-04-20 17:05:45 +04:00
}
2012-05-24 19:11:00 +04:00
/**
* idle_init - Initialize the idle thread for a cpu
* @ cpu : The cpu for which the idle thread should be initialized
*
* Creates the thread if it does not exist .
*/
2012-04-21 04:08:50 +04:00
static inline void idle_init ( unsigned int cpu )
2012-04-20 17:05:45 +04:00
{
2012-04-21 04:08:50 +04:00
struct task_struct * tsk = per_cpu ( idle_threads , cpu ) ;
if ( ! tsk ) {
tsk = fork_idle ( cpu ) ;
if ( IS_ERR ( tsk ) )
pr_err ( " SMP: fork_idle() failed for CPU %u \n " , cpu ) ;
else
per_cpu ( idle_threads , cpu ) = tsk ;
}
2012-04-20 17:05:45 +04:00
}
/**
2012-05-24 19:11:00 +04:00
* idle_threads_init - Initialize idle threads for all cpus
2012-04-20 17:05:45 +04:00
*/
2012-04-21 04:08:50 +04:00
void __init idle_threads_init ( void )
2012-04-20 17:05:45 +04:00
{
2012-05-24 19:10:55 +04:00
unsigned int cpu , boot_cpu ;
boot_cpu = smp_processor_id ( ) ;
2012-04-20 17:05:45 +04:00
2012-04-21 04:08:50 +04:00
for_each_possible_cpu ( cpu ) {
2012-05-24 19:10:55 +04:00
if ( cpu ! = boot_cpu )
2012-04-21 04:08:50 +04:00
idle_init ( cpu ) ;
2012-04-20 17:05:45 +04:00
}
}
# endif
2012-07-16 14:42:36 +04:00
2012-07-12 12:55:54 +04:00
# endif /* #ifdef CONFIG_SMP */
2012-07-16 14:42:36 +04:00
static LIST_HEAD ( hotplug_threads ) ;
static DEFINE_MUTEX ( smpboot_threads_lock ) ;
struct smpboot_thread_data {
unsigned int cpu ;
unsigned int status ;
struct smp_hotplug_thread * ht ;
} ;
enum {
HP_THREAD_NONE = 0 ,
HP_THREAD_ACTIVE ,
HP_THREAD_PARKED ,
} ;
/**
* smpboot_thread_fn - percpu hotplug thread loop function
* @ data : thread data pointer
*
* Checks for thread stop and park conditions . Calls the necessary
* setup , cleanup , park and unpark functions for the registered
* thread .
*
* Returns 1 when the thread should exit , 0 otherwise .
*/
static int smpboot_thread_fn ( void * data )
{
struct smpboot_thread_data * td = data ;
struct smp_hotplug_thread * ht = td - > ht ;
while ( 1 ) {
set_current_state ( TASK_INTERRUPTIBLE ) ;
preempt_disable ( ) ;
if ( kthread_should_stop ( ) ) {
set_current_state ( TASK_RUNNING ) ;
preempt_enable ( ) ;
if ( ht - > cleanup )
ht - > cleanup ( td - > cpu , cpu_online ( td - > cpu ) ) ;
kfree ( td ) ;
return 0 ;
}
if ( kthread_should_park ( ) ) {
__set_current_state ( TASK_RUNNING ) ;
preempt_enable ( ) ;
if ( ht - > park & & td - > status = = HP_THREAD_ACTIVE ) {
BUG_ON ( td - > cpu ! = smp_processor_id ( ) ) ;
ht - > park ( td - > cpu ) ;
td - > status = HP_THREAD_PARKED ;
}
kthread_parkme ( ) ;
/* We might have been woken for stop */
continue ;
}
BUG_ON ( td - > cpu ! = smp_processor_id ( ) ) ;
/* Check for state change setup */
switch ( td - > status ) {
case HP_THREAD_NONE :
preempt_enable ( ) ;
if ( ht - > setup )
ht - > setup ( td - > cpu ) ;
td - > status = HP_THREAD_ACTIVE ;
preempt_disable ( ) ;
break ;
case HP_THREAD_PARKED :
preempt_enable ( ) ;
if ( ht - > unpark )
ht - > unpark ( td - > cpu ) ;
td - > status = HP_THREAD_ACTIVE ;
preempt_disable ( ) ;
break ;
}
if ( ! ht - > thread_should_run ( td - > cpu ) ) {
preempt_enable ( ) ;
schedule ( ) ;
} else {
set_current_state ( TASK_RUNNING ) ;
preempt_enable ( ) ;
ht - > thread_fn ( td - > cpu ) ;
}
}
}
static int
__smpboot_create_thread ( struct smp_hotplug_thread * ht , unsigned int cpu )
{
struct task_struct * tsk = * per_cpu_ptr ( ht - > store , cpu ) ;
struct smpboot_thread_data * td ;
if ( tsk )
return 0 ;
td = kzalloc_node ( sizeof ( * td ) , GFP_KERNEL , cpu_to_node ( cpu ) ) ;
if ( ! td )
return - ENOMEM ;
td - > cpu = cpu ;
td - > ht = ht ;
tsk = kthread_create_on_cpu ( smpboot_thread_fn , td , cpu ,
ht - > thread_comm ) ;
if ( IS_ERR ( tsk ) ) {
kfree ( td ) ;
return PTR_ERR ( tsk ) ;
}
get_task_struct ( tsk ) ;
* per_cpu_ptr ( ht - > store , cpu ) = tsk ;
return 0 ;
}
int smpboot_create_threads ( unsigned int cpu )
{
struct smp_hotplug_thread * cur ;
int ret = 0 ;
mutex_lock ( & smpboot_threads_lock ) ;
list_for_each_entry ( cur , & hotplug_threads , list ) {
ret = __smpboot_create_thread ( cur , cpu ) ;
if ( ret )
break ;
}
mutex_unlock ( & smpboot_threads_lock ) ;
return ret ;
}
static void smpboot_unpark_thread ( struct smp_hotplug_thread * ht , unsigned int cpu )
{
struct task_struct * tsk = * per_cpu_ptr ( ht - > store , cpu ) ;
kthread_unpark ( tsk ) ;
}
void smpboot_unpark_threads ( unsigned int cpu )
{
struct smp_hotplug_thread * cur ;
mutex_lock ( & smpboot_threads_lock ) ;
list_for_each_entry ( cur , & hotplug_threads , list )
smpboot_unpark_thread ( cur , cpu ) ;
mutex_unlock ( & smpboot_threads_lock ) ;
}
static void smpboot_park_thread ( struct smp_hotplug_thread * ht , unsigned int cpu )
{
struct task_struct * tsk = * per_cpu_ptr ( ht - > store , cpu ) ;
if ( tsk )
kthread_park ( tsk ) ;
}
void smpboot_park_threads ( unsigned int cpu )
{
struct smp_hotplug_thread * cur ;
mutex_lock ( & smpboot_threads_lock ) ;
list_for_each_entry_reverse ( cur , & hotplug_threads , list )
smpboot_park_thread ( cur , cpu ) ;
mutex_unlock ( & smpboot_threads_lock ) ;
}
static void smpboot_destroy_threads ( struct smp_hotplug_thread * ht )
{
unsigned int cpu ;
/* We need to destroy also the parked threads of offline cpus */
for_each_possible_cpu ( cpu ) {
struct task_struct * tsk = * per_cpu_ptr ( ht - > store , cpu ) ;
if ( tsk ) {
kthread_stop ( tsk ) ;
put_task_struct ( tsk ) ;
* per_cpu_ptr ( ht - > store , cpu ) = NULL ;
}
}
}
/**
* smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
* @ plug_thread : Hotplug thread descriptor
*
* Creates and starts the threads on all online cpus .
*/
int smpboot_register_percpu_thread ( struct smp_hotplug_thread * plug_thread )
{
unsigned int cpu ;
int ret = 0 ;
mutex_lock ( & smpboot_threads_lock ) ;
for_each_online_cpu ( cpu ) {
ret = __smpboot_create_thread ( plug_thread , cpu ) ;
if ( ret ) {
smpboot_destroy_threads ( plug_thread ) ;
goto out ;
}
smpboot_unpark_thread ( plug_thread , cpu ) ;
}
list_add ( & plug_thread - > list , & hotplug_threads ) ;
out :
mutex_unlock ( & smpboot_threads_lock ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( smpboot_register_percpu_thread ) ;
/**
* smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
* @ plug_thread : Hotplug thread descriptor
*
* Stops all threads on all possible cpus .
*/
void smpboot_unregister_percpu_thread ( struct smp_hotplug_thread * plug_thread )
{
get_online_cpus ( ) ;
mutex_lock ( & smpboot_threads_lock ) ;
list_del ( & plug_thread - > list ) ;
smpboot_destroy_threads ( plug_thread ) ;
mutex_unlock ( & smpboot_threads_lock ) ;
put_online_cpus ( ) ;
}
EXPORT_SYMBOL_GPL ( smpboot_unregister_percpu_thread ) ;