2008-07-28 21:16:28 +04:00
/* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
2006-09-29 13:01:35 +04:00
* GPL v2 and any later version .
*/
2005-04-17 02:20:36 +04:00
# include <linux/cpu.h>
# include <linux/err.h>
2007-05-08 11:25:08 +04:00
# include <linux/kthread.h>
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/stop_machine.h>
2005-04-17 02:20:36 +04:00
# include <linux/syscalls.h>
2007-05-11 09:22:47 +04:00
# include <linux/interrupt.h>
2005-04-17 02:20:36 +04:00
# include <asm/atomic.h>
# include <asm/uaccess.h>
2008-07-28 21:16:28 +04:00
/* This controls the threads on each CPU. */
2005-04-17 02:20:36 +04:00
enum stopmachine_state {
2008-07-28 21:16:28 +04:00
/* Dummy starting state for thread. */
STOPMACHINE_NONE ,
/* Awaiting everyone to be scheduled. */
2005-04-17 02:20:36 +04:00
STOPMACHINE_PREPARE ,
2008-07-28 21:16:28 +04:00
/* Disable interrupts. */
2005-04-17 02:20:36 +04:00
STOPMACHINE_DISABLE_IRQ ,
2008-07-28 21:16:28 +04:00
/* Run the function */
2008-02-28 19:33:03 +03:00
STOPMACHINE_RUN ,
2008-07-28 21:16:28 +04:00
/* Exit */
2005-04-17 02:20:36 +04:00
STOPMACHINE_EXIT ,
} ;
2008-07-28 21:16:28 +04:00
static enum stopmachine_state state ;
2005-04-17 02:20:36 +04:00
2008-02-28 19:33:03 +03:00
struct stop_machine_data {
int ( * fn ) ( void * ) ;
void * data ;
2008-07-28 21:16:28 +04:00
int fnret ;
} ;
2008-02-28 19:33:03 +03:00
2008-07-28 21:16:28 +04:00
/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
static unsigned int num_threads ;
static atomic_t thread_ack ;
static DEFINE_MUTEX ( lock ) ;
2008-12-22 14:36:30 +03:00
/* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
static DEFINE_MUTEX ( setup_lock ) ;
/* Users of stop_machine. */
static int refcount ;
2008-10-14 01:50:10 +04:00
static struct workqueue_struct * stop_machine_wq ;
static struct stop_machine_data active , idle ;
2009-03-31 08:05:16 +04:00
static const struct cpumask * active_cpus ;
2008-10-14 01:50:10 +04:00
static void * stop_machine_work ;
2008-07-28 21:16:28 +04:00
static void set_state ( enum stopmachine_state newstate )
2005-04-17 02:20:36 +04:00
{
2008-07-28 21:16:28 +04:00
/* Reset ack counter. */
atomic_set ( & thread_ack , num_threads ) ;
smp_wmb ( ) ;
state = newstate ;
2005-04-17 02:20:36 +04:00
}
2008-07-28 21:16:28 +04:00
/* Last one to ack a state moves to the next state. */
static void ack_state ( void )
2005-04-17 02:20:36 +04:00
{
2008-10-14 01:50:10 +04:00
if ( atomic_dec_and_test ( & thread_ack ) )
set_state ( state + 1 ) ;
2005-04-17 02:20:36 +04:00
}
2008-10-14 01:50:10 +04:00
/* This is the actual function which stops the CPU. It runs
* in the context of a dedicated stopmachine workqueue . */
static void stop_cpu ( struct work_struct * unused )
2005-04-17 02:20:36 +04:00
{
2008-07-28 21:16:28 +04:00
enum stopmachine_state curstate = STOPMACHINE_NONE ;
2008-10-14 01:50:10 +04:00
struct stop_machine_data * smdata = & idle ;
int cpu = smp_processor_id ( ) ;
2008-10-22 19:00:26 +04:00
int err ;
2008-10-14 01:50:10 +04:00
if ( ! active_cpus ) {
2009-01-01 02:42:28 +03:00
if ( cpu = = cpumask_first ( cpu_online_mask ) )
2008-10-14 01:50:10 +04:00
smdata = & active ;
} else {
2009-01-01 02:42:28 +03:00
if ( cpumask_test_cpu ( cpu , active_cpus ) )
2008-10-14 01:50:10 +04:00
smdata = & active ;
}
2008-07-28 21:16:28 +04:00
/* Simple state machine */
do {
/* Chill out and ensure we re-read stopmachine_state. */
2008-05-08 17:20:38 +04:00
cpu_relax ( ) ;
2008-07-28 21:16:28 +04:00
if ( state ! = curstate ) {
curstate = state ;
switch ( curstate ) {
case STOPMACHINE_DISABLE_IRQ :
local_irq_disable ( ) ;
hard_irq_disable ( ) ;
break ;
case STOPMACHINE_RUN :
2008-10-22 19:00:26 +04:00
/* On multiple CPUs only a single error code
* is needed to tell that something failed . */
err = smdata - > fn ( smdata - > data ) ;
if ( err )
smdata - > fnret = err ;
2008-07-28 21:16:28 +04:00
break ;
default :
break ;
}
ack_state ( ) ;
}
} while ( curstate ! = STOPMACHINE_EXIT ) ;
2005-04-17 02:20:36 +04:00
local_irq_enable ( ) ;
}
2008-07-28 21:16:28 +04:00
/* Callback for CPUs which aren't supposed to do anything. */
static int chill ( void * unused )
2008-02-28 19:33:03 +03:00
{
2008-07-28 21:16:28 +04:00
return 0 ;
2008-02-28 19:33:03 +03:00
}
2005-04-17 02:20:36 +04:00
2008-12-22 14:36:30 +03:00
int stop_machine_create ( void )
{
mutex_lock ( & setup_lock ) ;
if ( refcount )
goto done ;
stop_machine_wq = create_rt_workqueue ( " kstop " ) ;
if ( ! stop_machine_wq )
goto err_out ;
stop_machine_work = alloc_percpu ( struct work_struct ) ;
if ( ! stop_machine_work )
goto err_out ;
done :
refcount + + ;
mutex_unlock ( & setup_lock ) ;
return 0 ;
err_out :
if ( stop_machine_wq )
destroy_workqueue ( stop_machine_wq ) ;
mutex_unlock ( & setup_lock ) ;
return - ENOMEM ;
}
EXPORT_SYMBOL_GPL ( stop_machine_create ) ;
void stop_machine_destroy ( void )
{
mutex_lock ( & setup_lock ) ;
refcount - - ;
if ( refcount )
goto done ;
destroy_workqueue ( stop_machine_wq ) ;
free_percpu ( stop_machine_work ) ;
done :
mutex_unlock ( & setup_lock ) ;
}
EXPORT_SYMBOL_GPL ( stop_machine_destroy ) ;
2009-01-01 02:42:28 +03:00
int __stop_machine ( int ( * fn ) ( void * ) , void * data , const struct cpumask * cpus )
2005-04-17 02:20:36 +04:00
{
2008-10-14 01:50:10 +04:00
struct work_struct * sm_work ;
2008-11-17 00:52:18 +03:00
int i , ret ;
2008-07-28 21:16:28 +04:00
2008-10-14 01:50:10 +04:00
/* Set up initial state. */
mutex_lock ( & lock ) ;
num_threads = num_online_cpus ( ) ;
active_cpus = cpus ;
2008-07-28 21:16:28 +04:00
active . fn = fn ;
active . data = data ;
active . fnret = 0 ;
idle . fn = chill ;
idle . data = NULL ;
set_state ( STOPMACHINE_PREPARE ) ;
2005-04-17 02:20:36 +04:00
2008-10-14 01:50:10 +04:00
/* Schedule the stop_cpu work on all cpus: hold this CPU so one
2008-07-28 21:16:28 +04:00
* doesn ' t hit this CPU until we ' re ready . */
2008-07-28 21:16:30 +04:00
get_cpu ( ) ;
2008-10-14 01:50:10 +04:00
for_each_online_cpu ( i ) {
2009-02-20 10:29:08 +03:00
sm_work = per_cpu_ptr ( stop_machine_work , i ) ;
2008-10-14 01:50:10 +04:00
INIT_WORK ( sm_work , stop_cpu ) ;
queue_work_on ( i , stop_machine_wq , sm_work ) ;
}
2008-07-28 21:16:28 +04:00
/* This will release the thread on our CPU. */
put_cpu ( ) ;
2008-10-14 01:50:10 +04:00
flush_workqueue ( stop_machine_wq ) ;
2008-11-17 00:52:18 +03:00
ret = active . fnret ;
2008-07-28 21:16:28 +04:00
mutex_unlock ( & lock ) ;
2008-11-17 00:52:18 +03:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2009-01-01 02:42:28 +03:00
int stop_machine ( int ( * fn ) ( void * ) , void * data , const struct cpumask * cpus )
2005-04-17 02:20:36 +04:00
{
int ret ;
2008-12-22 14:36:30 +03:00
ret = stop_machine_create ( ) ;
if ( ret )
return ret ;
2005-04-17 02:20:36 +04:00
/* No CPUs can come up or down during this. */
2008-01-25 23:08:02 +03:00
get_online_cpus ( ) ;
2008-07-28 21:16:30 +04:00
ret = __stop_machine ( fn , data , cpus ) ;
2008-01-25 23:08:02 +03:00
put_online_cpus ( ) ;
2008-12-22 14:36:30 +03:00
stop_machine_destroy ( ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2008-07-28 21:16:30 +04:00
EXPORT_SYMBOL_GPL ( stop_machine ) ;