2005-04-17 02:20:36 +04:00
# include <linux/stop_machine.h>
# include <linux/kthread.h>
# include <linux/sched.h>
# include <linux/cpu.h>
# include <linux/err.h>
# include <linux/syscalls.h>
# include <asm/atomic.h>
# include <asm/semaphore.h>
# include <asm/uaccess.h>
/* Since we effect priority and affinity (both of which are visible
* to , and settable by outside processes ) we do indirection via a
* kthread . */
/* Thread to stop each CPU in user context. */
enum stopmachine_state {
STOPMACHINE_WAIT ,
STOPMACHINE_PREPARE ,
STOPMACHINE_DISABLE_IRQ ,
STOPMACHINE_EXIT ,
} ;
static enum stopmachine_state stopmachine_state ;
static unsigned int stopmachine_num_threads ;
static atomic_t stopmachine_thread_ack ;
static DECLARE_MUTEX ( stopmachine_mutex ) ;
2006-07-04 04:32:22 +04:00
static int stopmachine ( void * cpu )
2005-04-17 02:20:36 +04:00
{
int irqs_disabled = 0 ;
int prepared = 0 ;
2006-07-04 04:32:22 +04:00
set_cpus_allowed ( current , cpumask_of_cpu ( ( int ) ( long ) cpu ) ) ;
2005-04-17 02:20:36 +04:00
/* Ack: we are alive */
2005-05-01 19:58:47 +04:00
smp_mb ( ) ; /* Theoretically the ack = 0 might not be on this CPU yet. */
2005-04-17 02:20:36 +04:00
atomic_inc ( & stopmachine_thread_ack ) ;
/* Simple state machine */
while ( stopmachine_state ! = STOPMACHINE_EXIT ) {
if ( stopmachine_state = = STOPMACHINE_DISABLE_IRQ
& & ! irqs_disabled ) {
local_irq_disable ( ) ;
irqs_disabled = 1 ;
/* Ack: irqs disabled. */
2005-05-01 19:58:47 +04:00
smp_mb ( ) ; /* Must read state first. */
2005-04-17 02:20:36 +04:00
atomic_inc ( & stopmachine_thread_ack ) ;
} else if ( stopmachine_state = = STOPMACHINE_PREPARE
& & ! prepared ) {
/* Everyone is in place, hold CPU. */
preempt_disable ( ) ;
prepared = 1 ;
2005-05-01 19:58:47 +04:00
smp_mb ( ) ; /* Must read state first. */
2005-04-17 02:20:36 +04:00
atomic_inc ( & stopmachine_thread_ack ) ;
}
/* Yield in first stage: migration threads need to
* help our sisters onto their CPUs . */
if ( ! prepared & & ! irqs_disabled )
yield ( ) ;
else
cpu_relax ( ) ;
}
/* Ack: we are exiting. */
2005-05-01 19:58:47 +04:00
smp_mb ( ) ; /* Must read state first. */
2005-04-17 02:20:36 +04:00
atomic_inc ( & stopmachine_thread_ack ) ;
if ( irqs_disabled )
local_irq_enable ( ) ;
if ( prepared )
preempt_enable ( ) ;
return 0 ;
}
/* Change the thread state */
static void stopmachine_set_state ( enum stopmachine_state state )
{
atomic_set ( & stopmachine_thread_ack , 0 ) ;
2005-05-01 19:58:47 +04:00
smp_wmb ( ) ;
2005-04-17 02:20:36 +04:00
stopmachine_state = state ;
while ( atomic_read ( & stopmachine_thread_ack ) ! = stopmachine_num_threads )
cpu_relax ( ) ;
}
static int stop_machine ( void )
{
2006-07-04 04:32:22 +04:00
int i , ret = 0 ;
2005-04-17 02:20:36 +04:00
struct sched_param param = { . sched_priority = MAX_RT_PRIO - 1 } ;
/* One high-prio thread per cpu. We'll do this one. */
2006-01-10 07:51:38 +03:00
sched_setscheduler ( current , SCHED_FIFO , & param ) ;
2005-04-17 02:20:36 +04:00
atomic_set ( & stopmachine_thread_ack , 0 ) ;
stopmachine_num_threads = 0 ;
stopmachine_state = STOPMACHINE_WAIT ;
for_each_online_cpu ( i ) {
2005-06-22 04:14:34 +04:00
if ( i = = raw_smp_processor_id ( ) )
2005-04-17 02:20:36 +04:00
continue ;
2006-07-04 04:32:22 +04:00
ret = kernel_thread ( stopmachine , ( void * ) ( long ) i , CLONE_KERNEL ) ;
if ( ret < 0 )
2005-04-17 02:20:36 +04:00
break ;
stopmachine_num_threads + + ;
}
/* Wait for them all to come to life. */
while ( atomic_read ( & stopmachine_thread_ack ) ! = stopmachine_num_threads )
yield ( ) ;
/* If some failed, kill them all. */
if ( ret < 0 ) {
stopmachine_set_state ( STOPMACHINE_EXIT ) ;
up ( & stopmachine_mutex ) ;
return ret ;
}
/* Now they are all started, make them hold the CPUs, ready. */
2005-11-14 03:07:30 +03:00
preempt_disable ( ) ;
2005-04-17 02:20:36 +04:00
stopmachine_set_state ( STOPMACHINE_PREPARE ) ;
/* Make them disable irqs. */
2005-11-14 03:07:30 +03:00
local_irq_disable ( ) ;
2005-04-17 02:20:36 +04:00
stopmachine_set_state ( STOPMACHINE_DISABLE_IRQ ) ;
return 0 ;
}
static void restart_machine ( void )
{
stopmachine_set_state ( STOPMACHINE_EXIT ) ;
local_irq_enable ( ) ;
2005-11-14 03:07:30 +03:00
preempt_enable_no_resched ( ) ;
2005-04-17 02:20:36 +04:00
}
struct stop_machine_data
{
int ( * fn ) ( void * ) ;
void * data ;
struct completion done ;
} ;
static int do_stop ( void * _smdata )
{
struct stop_machine_data * smdata = _smdata ;
int ret ;
ret = stop_machine ( ) ;
if ( ret = = 0 ) {
ret = smdata - > fn ( smdata - > data ) ;
restart_machine ( ) ;
}
/* We're done: you can kthread_stop us now */
complete ( & smdata - > done ) ;
/* Wait for kthread_stop */
set_current_state ( TASK_INTERRUPTIBLE ) ;
while ( ! kthread_should_stop ( ) ) {
schedule ( ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
}
__set_current_state ( TASK_RUNNING ) ;
return ret ;
}
struct task_struct * __stop_machine_run ( int ( * fn ) ( void * ) , void * data ,
unsigned int cpu )
{
struct stop_machine_data smdata ;
struct task_struct * p ;
smdata . fn = fn ;
smdata . data = data ;
init_completion ( & smdata . done ) ;
down ( & stopmachine_mutex ) ;
/* If they don't care which CPU fn runs on, bind to any online one. */
if ( cpu = = NR_CPUS )
2005-06-22 04:14:34 +04:00
cpu = raw_smp_processor_id ( ) ;
2005-04-17 02:20:36 +04:00
p = kthread_create ( do_stop , & smdata , " kstopmachine " ) ;
if ( ! IS_ERR ( p ) ) {
kthread_bind ( p , cpu ) ;
wake_up_process ( p ) ;
wait_for_completion ( & smdata . done ) ;
}
up ( & stopmachine_mutex ) ;
return p ;
}
int stop_machine_run ( int ( * fn ) ( void * ) , void * data , unsigned int cpu )
{
struct task_struct * p ;
int ret ;
/* No CPUs can come up or down during this. */
lock_cpu_hotplug ( ) ;
p = __stop_machine_run ( fn , data , cpu ) ;
if ( ! IS_ERR ( p ) )
ret = kthread_stop ( p ) ;
else
ret = PTR_ERR ( p ) ;
unlock_cpu_hotplug ( ) ;
return ret ;
}