2005-04-17 02:20:36 +04:00
/*
* linux / kernel / workqueue . c
*
* Generic mechanism for defining kernel helper threads for running
* arbitrary tasks in process context .
*
* Started by Ingo Molnar , Copyright ( C ) 2002
*
* Derived from the taskqueue / keventd code by :
*
* David Woodhouse < dwmw2 @ infradead . org >
* Andrew Morton < andrewm @ uow . edu . au >
* Kai Petzke < wpp @ marie . physik . tu - berlin . de >
* Theodore Ts ' o < tytso @ mit . edu >
2005-10-31 02:01:59 +03:00
*
* Made to use alloc_percpu by Christoph Lameter < clameter @ sgi . com > .
2005-04-17 02:20:36 +04:00
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/sched.h>
# include <linux/init.h>
# include <linux/signal.h>
# include <linux/completion.h>
# include <linux/workqueue.h>
# include <linux/slab.h>
# include <linux/cpu.h>
# include <linux/notifier.h>
# include <linux/kthread.h>
2006-02-23 21:43:43 +03:00
# include <linux/hardirq.h>
2005-04-17 02:20:36 +04:00
/*
2006-01-08 12:05:12 +03:00
* The per - CPU workqueue ( if single thread , we always use the first
* possible cpu ) .
2005-04-17 02:20:36 +04:00
*
* The sequence counters are for flush_scheduled_work ( ) . It wants to wait
* until until all currently - scheduled works are completed , but it doesn ' t
* want to be livelocked by new , incoming ones . So it waits until
* remove_sequence is > = the insert_sequence which pertained when
* flush_scheduled_work ( ) was called .
*/
struct cpu_workqueue_struct {
spinlock_t lock ;
long remove_sequence ; /* Least-recently added (next to run) */
long insert_sequence ; /* Next to add */
struct list_head worklist ;
wait_queue_head_t more_work ;
wait_queue_head_t work_done ;
struct workqueue_struct * wq ;
2006-07-03 11:25:41 +04:00
struct task_struct * thread ;
2005-04-17 02:20:36 +04:00
int run_depth ; /* Detect run_workqueue() recursion depth */
} ____cacheline_aligned ;
/*
* The externally visible workqueue abstraction is an array of
* per - CPU workqueues :
*/
struct workqueue_struct {
2005-10-31 02:01:59 +03:00
struct cpu_workqueue_struct * cpu_wq ;
2005-04-17 02:20:36 +04:00
const char * name ;
struct list_head list ; /* Empty if single thread */
} ;
/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
threads to each one as cpus come / go . */
static DEFINE_SPINLOCK ( workqueue_lock ) ;
static LIST_HEAD ( workqueues ) ;
2006-01-08 12:05:12 +03:00
static int singlethread_cpu ;
2005-04-17 02:20:36 +04:00
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded ( struct workqueue_struct * wq )
{
return list_empty ( & wq - > list ) ;
}
/* Preempt must be disabled. */
static void __queue_work ( struct cpu_workqueue_struct * cwq ,
struct work_struct * work )
{
unsigned long flags ;
spin_lock_irqsave ( & cwq - > lock , flags ) ;
work - > wq_data = cwq ;
list_add_tail ( & work - > entry , & cwq - > worklist ) ;
cwq - > insert_sequence + + ;
wake_up ( & cwq - > more_work ) ;
spin_unlock_irqrestore ( & cwq - > lock , flags ) ;
}
2006-07-30 14:03:42 +04:00
/**
* queue_work - queue work on a workqueue
* @ wq : workqueue to use
* @ work : work to queue
*
* Returns non - zero if it was successfully added .
2005-04-17 02:20:36 +04:00
*
* We queue the work to the CPU it was submitted , but there is no
* guarantee that it will be processed by that CPU .
*/
int fastcall queue_work ( struct workqueue_struct * wq , struct work_struct * work )
{
int ret = 0 , cpu = get_cpu ( ) ;
if ( ! test_and_set_bit ( 0 , & work - > pending ) ) {
if ( unlikely ( is_single_threaded ( wq ) ) )
2006-01-08 12:05:12 +03:00
cpu = singlethread_cpu ;
2005-04-17 02:20:36 +04:00
BUG_ON ( ! list_empty ( & work - > entry ) ) ;
2005-10-31 02:01:59 +03:00
__queue_work ( per_cpu_ptr ( wq - > cpu_wq , cpu ) , work ) ;
2005-04-17 02:20:36 +04:00
ret = 1 ;
}
put_cpu ( ) ;
return ret ;
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL_GPL ( queue_work ) ;
2005-04-17 02:20:36 +04:00
static void delayed_work_timer_fn ( unsigned long __data )
{
struct work_struct * work = ( struct work_struct * ) __data ;
struct workqueue_struct * wq = work - > wq_data ;
int cpu = smp_processor_id ( ) ;
if ( unlikely ( is_single_threaded ( wq ) ) )
2006-01-08 12:05:12 +03:00
cpu = singlethread_cpu ;
2005-04-17 02:20:36 +04:00
2005-10-31 02:01:59 +03:00
__queue_work ( per_cpu_ptr ( wq - > cpu_wq , cpu ) , work ) ;
2005-04-17 02:20:36 +04:00
}
2006-07-30 14:03:42 +04:00
/**
* queue_delayed_work - queue work on a workqueue after delay
* @ wq : workqueue to use
* @ work : work to queue
* @ delay : number of jiffies to wait before queueing
*
* Returns non - zero if it was successfully added .
*/
2005-04-17 02:20:36 +04:00
int fastcall queue_delayed_work ( struct workqueue_struct * wq ,
struct work_struct * work , unsigned long delay )
{
int ret = 0 ;
struct timer_list * timer = & work - > timer ;
if ( ! test_and_set_bit ( 0 , & work - > pending ) ) {
BUG_ON ( timer_pending ( timer ) ) ;
BUG_ON ( ! list_empty ( & work - > entry ) ) ;
/* This stores wq for the moment, for the timer_fn */
work - > wq_data = wq ;
timer - > expires = jiffies + delay ;
timer - > data = ( unsigned long ) work ;
timer - > function = delayed_work_timer_fn ;
add_timer ( timer ) ;
ret = 1 ;
}
return ret ;
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL_GPL ( queue_delayed_work ) ;
2005-04-17 02:20:36 +04:00
2006-07-30 14:03:42 +04:00
/**
* queue_delayed_work_on - queue work on specific CPU after delay
* @ cpu : CPU number to execute work on
* @ wq : workqueue to use
* @ work : work to queue
* @ delay : number of jiffies to wait before queueing
*
* Returns non - zero if it was successfully added .
*/
2006-06-29 00:50:33 +04:00
int queue_delayed_work_on ( int cpu , struct workqueue_struct * wq ,
struct work_struct * work , unsigned long delay )
{
int ret = 0 ;
struct timer_list * timer = & work - > timer ;
if ( ! test_and_set_bit ( 0 , & work - > pending ) ) {
BUG_ON ( timer_pending ( timer ) ) ;
BUG_ON ( ! list_empty ( & work - > entry ) ) ;
/* This stores wq for the moment, for the timer_fn */
work - > wq_data = wq ;
timer - > expires = jiffies + delay ;
timer - > data = ( unsigned long ) work ;
timer - > function = delayed_work_timer_fn ;
add_timer_on ( timer , cpu ) ;
ret = 1 ;
}
return ret ;
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL_GPL ( queue_delayed_work_on ) ;
2005-04-17 02:20:36 +04:00
2006-01-15 00:20:43 +03:00
static void run_workqueue ( struct cpu_workqueue_struct * cwq )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
/*
* Keep taking off work from the queue until
* done .
*/
spin_lock_irqsave ( & cwq - > lock , flags ) ;
cwq - > run_depth + + ;
if ( cwq - > run_depth > 3 ) {
/* morton gets to eat his hat */
printk ( " %s: recursion depth exceeded: %d \n " ,
__FUNCTION__ , cwq - > run_depth ) ;
dump_stack ( ) ;
}
while ( ! list_empty ( & cwq - > worklist ) ) {
struct work_struct * work = list_entry ( cwq - > worklist . next ,
struct work_struct , entry ) ;
void ( * f ) ( void * ) = work - > func ;
void * data = work - > data ;
list_del_init ( cwq - > worklist . next ) ;
spin_unlock_irqrestore ( & cwq - > lock , flags ) ;
BUG_ON ( work - > wq_data ! = cwq ) ;
clear_bit ( 0 , & work - > pending ) ;
f ( data ) ;
spin_lock_irqsave ( & cwq - > lock , flags ) ;
cwq - > remove_sequence + + ;
wake_up ( & cwq - > work_done ) ;
}
cwq - > run_depth - - ;
spin_unlock_irqrestore ( & cwq - > lock , flags ) ;
}
static int worker_thread ( void * __cwq )
{
struct cpu_workqueue_struct * cwq = __cwq ;
DECLARE_WAITQUEUE ( wait , current ) ;
struct k_sigaction sa ;
sigset_t blocked ;
current - > flags | = PF_NOFREEZE ;
set_user_nice ( current , - 5 ) ;
/* Block and flush all signals */
sigfillset ( & blocked ) ;
sigprocmask ( SIG_BLOCK , & blocked , NULL ) ;
flush_signals ( current ) ;
/* SIG_IGN makes children autoreap: see do_notify_parent(). */
sa . sa . sa_handler = SIG_IGN ;
sa . sa . sa_flags = 0 ;
siginitset ( & sa . sa . sa_mask , sigmask ( SIGCHLD ) ) ;
do_sigaction ( SIGCHLD , & sa , ( struct k_sigaction * ) 0 ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
while ( ! kthread_should_stop ( ) ) {
add_wait_queue ( & cwq - > more_work , & wait ) ;
if ( list_empty ( & cwq - > worklist ) )
schedule ( ) ;
else
__set_current_state ( TASK_RUNNING ) ;
remove_wait_queue ( & cwq - > more_work , & wait ) ;
if ( ! list_empty ( & cwq - > worklist ) )
run_workqueue ( cwq ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
}
__set_current_state ( TASK_RUNNING ) ;
return 0 ;
}
static void flush_cpu_workqueue ( struct cpu_workqueue_struct * cwq )
{
if ( cwq - > thread = = current ) {
/*
* Probably keventd trying to flush its own queue . So simply run
* it by hand rather than deadlocking .
*/
run_workqueue ( cwq ) ;
} else {
DEFINE_WAIT ( wait ) ;
long sequence_needed ;
spin_lock_irq ( & cwq - > lock ) ;
sequence_needed = cwq - > insert_sequence ;
while ( sequence_needed - cwq - > remove_sequence > 0 ) {
prepare_to_wait ( & cwq - > work_done , & wait ,
TASK_UNINTERRUPTIBLE ) ;
spin_unlock_irq ( & cwq - > lock ) ;
schedule ( ) ;
spin_lock_irq ( & cwq - > lock ) ;
}
finish_wait ( & cwq - > work_done , & wait ) ;
spin_unlock_irq ( & cwq - > lock ) ;
}
}
2006-07-30 14:03:42 +04:00
/**
2005-04-17 02:20:36 +04:00
* flush_workqueue - ensure that any scheduled work has run to completion .
2006-07-30 14:03:42 +04:00
* @ wq : workqueue to flush
2005-04-17 02:20:36 +04:00
*
* Forces execution of the workqueue and blocks until its completion .
* This is typically used in driver shutdown handlers .
*
* This function will sample each workqueue ' s current insert_sequence number and
* will sleep until the head sequence is greater than or equal to that . This
* means that we sleep until all works which were queued on entry have been
* handled , but we are not livelocked by new incoming ones .
*
* This function used to run the workqueues itself . Now we just wait for the
* helper threads to do it .
*/
void fastcall flush_workqueue ( struct workqueue_struct * wq )
{
might_sleep ( ) ;
if ( is_single_threaded ( wq ) ) {
2005-11-29 00:43:56 +03:00
/* Always use first cpu's area. */
2006-01-08 12:05:12 +03:00
flush_cpu_workqueue ( per_cpu_ptr ( wq - > cpu_wq , singlethread_cpu ) ) ;
2005-04-17 02:20:36 +04:00
} else {
int cpu ;
lock_cpu_hotplug ( ) ;
for_each_online_cpu ( cpu )
2005-10-31 02:01:59 +03:00
flush_cpu_workqueue ( per_cpu_ptr ( wq - > cpu_wq , cpu ) ) ;
2005-04-17 02:20:36 +04:00
unlock_cpu_hotplug ( ) ;
}
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL_GPL ( flush_workqueue ) ;
2005-04-17 02:20:36 +04:00
static struct task_struct * create_workqueue_thread ( struct workqueue_struct * wq ,
int cpu )
{
2005-10-31 02:01:59 +03:00
struct cpu_workqueue_struct * cwq = per_cpu_ptr ( wq - > cpu_wq , cpu ) ;
2005-04-17 02:20:36 +04:00
struct task_struct * p ;
spin_lock_init ( & cwq - > lock ) ;
cwq - > wq = wq ;
cwq - > thread = NULL ;
cwq - > insert_sequence = 0 ;
cwq - > remove_sequence = 0 ;
INIT_LIST_HEAD ( & cwq - > worklist ) ;
init_waitqueue_head ( & cwq - > more_work ) ;
init_waitqueue_head ( & cwq - > work_done ) ;
if ( is_single_threaded ( wq ) )
p = kthread_create ( worker_thread , cwq , " %s " , wq - > name ) ;
else
p = kthread_create ( worker_thread , cwq , " %s/%d " , wq - > name , cpu ) ;
if ( IS_ERR ( p ) )
return NULL ;
cwq - > thread = p ;
return p ;
}
struct workqueue_struct * __create_workqueue ( const char * name ,
int singlethread )
{
int cpu , destroy = 0 ;
struct workqueue_struct * wq ;
struct task_struct * p ;
2005-09-07 02:18:31 +04:00
wq = kzalloc ( sizeof ( * wq ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! wq )
return NULL ;
2005-10-31 02:01:59 +03:00
wq - > cpu_wq = alloc_percpu ( struct cpu_workqueue_struct ) ;
2006-01-08 12:03:04 +03:00
if ( ! wq - > cpu_wq ) {
kfree ( wq ) ;
return NULL ;
}
2005-04-17 02:20:36 +04:00
wq - > name = name ;
/* We don't need the distraction of CPUs appearing and vanishing. */
lock_cpu_hotplug ( ) ;
if ( singlethread ) {
INIT_LIST_HEAD ( & wq - > list ) ;
2006-01-08 12:05:12 +03:00
p = create_workqueue_thread ( wq , singlethread_cpu ) ;
2005-04-17 02:20:36 +04:00
if ( ! p )
destroy = 1 ;
else
wake_up_process ( p ) ;
} else {
spin_lock ( & workqueue_lock ) ;
list_add ( & wq - > list , & workqueues ) ;
spin_unlock ( & workqueue_lock ) ;
for_each_online_cpu ( cpu ) {
p = create_workqueue_thread ( wq , cpu ) ;
if ( p ) {
kthread_bind ( p , cpu ) ;
wake_up_process ( p ) ;
} else
destroy = 1 ;
}
}
unlock_cpu_hotplug ( ) ;
/*
* Was there any error during startup ? If yes then clean up :
*/
if ( destroy ) {
destroy_workqueue ( wq ) ;
wq = NULL ;
}
return wq ;
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL_GPL ( __create_workqueue ) ;
2005-04-17 02:20:36 +04:00
static void cleanup_workqueue_thread ( struct workqueue_struct * wq , int cpu )
{
struct cpu_workqueue_struct * cwq ;
unsigned long flags ;
struct task_struct * p ;
2005-10-31 02:01:59 +03:00
cwq = per_cpu_ptr ( wq - > cpu_wq , cpu ) ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( & cwq - > lock , flags ) ;
p = cwq - > thread ;
cwq - > thread = NULL ;
spin_unlock_irqrestore ( & cwq - > lock , flags ) ;
if ( p )
kthread_stop ( p ) ;
}
2006-07-30 14:03:42 +04:00
/**
* destroy_workqueue - safely terminate a workqueue
* @ wq : target workqueue
*
* Safely destroy a workqueue . All work currently pending will be done first .
*/
2005-04-17 02:20:36 +04:00
void destroy_workqueue ( struct workqueue_struct * wq )
{
int cpu ;
flush_workqueue ( wq ) ;
/* We don't need the distraction of CPUs appearing and vanishing. */
lock_cpu_hotplug ( ) ;
if ( is_single_threaded ( wq ) )
2006-01-08 12:05:12 +03:00
cleanup_workqueue_thread ( wq , singlethread_cpu ) ;
2005-04-17 02:20:36 +04:00
else {
for_each_online_cpu ( cpu )
cleanup_workqueue_thread ( wq , cpu ) ;
spin_lock ( & workqueue_lock ) ;
list_del ( & wq - > list ) ;
spin_unlock ( & workqueue_lock ) ;
}
unlock_cpu_hotplug ( ) ;
2005-10-31 02:01:59 +03:00
free_percpu ( wq - > cpu_wq ) ;
2005-04-17 02:20:36 +04:00
kfree ( wq ) ;
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL_GPL ( destroy_workqueue ) ;
2005-04-17 02:20:36 +04:00
static struct workqueue_struct * keventd_wq ;
2006-07-30 14:03:42 +04:00
/**
* schedule_work - put work task in global workqueue
* @ work : job to be done
*
* This puts a job in the kernel - global workqueue .
*/
2005-04-17 02:20:36 +04:00
int fastcall schedule_work ( struct work_struct * work )
{
return queue_work ( keventd_wq , work ) ;
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL ( schedule_work ) ;
2005-04-17 02:20:36 +04:00
2006-07-30 14:03:42 +04:00
/**
* schedule_delayed_work - put work task in global workqueue after delay
* @ work : job to be done
* @ delay : number of jiffies to wait
*
* After waiting for a given time this puts a job in the kernel - global
* workqueue .
*/
2005-04-17 02:20:36 +04:00
int fastcall schedule_delayed_work ( struct work_struct * work , unsigned long delay )
{
return queue_delayed_work ( keventd_wq , work , delay ) ;
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL ( schedule_delayed_work ) ;
2005-04-17 02:20:36 +04:00
2006-07-30 14:03:42 +04:00
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @ cpu : cpu to use
* @ work : job to be done
* @ delay : number of jiffies to wait
*
* After waiting for a given time this puts a job in the kernel - global
* workqueue on the specified CPU .
*/
2005-04-17 02:20:36 +04:00
int schedule_delayed_work_on ( int cpu ,
struct work_struct * work , unsigned long delay )
{
2006-06-29 00:50:33 +04:00
return queue_delayed_work_on ( cpu , keventd_wq , work , delay ) ;
2005-04-17 02:20:36 +04:00
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL ( schedule_delayed_work_on ) ;
2005-04-17 02:20:36 +04:00
2006-06-25 16:47:49 +04:00
/**
* schedule_on_each_cpu - call a function on each online CPU from keventd
* @ func : the function to call
* @ info : a pointer to pass to func ( )
*
* Returns zero on success .
* Returns - ve errno on failure .
*
* Appears to be racy against CPU hotplug .
*
* schedule_on_each_cpu ( ) is very slow .
*/
int schedule_on_each_cpu ( void ( * func ) ( void * info ) , void * info )
2006-01-08 12:00:43 +03:00
{
int cpu ;
2006-06-25 16:47:49 +04:00
struct work_struct * works ;
2006-01-08 12:00:43 +03:00
2006-06-25 16:47:49 +04:00
works = alloc_percpu ( struct work_struct ) ;
if ( ! works )
2006-01-08 12:00:43 +03:00
return - ENOMEM ;
2006-06-25 16:47:49 +04:00
2006-01-08 12:00:43 +03:00
for_each_online_cpu ( cpu ) {
2006-06-25 16:47:49 +04:00
INIT_WORK ( per_cpu_ptr ( works , cpu ) , func , info ) ;
2006-01-08 12:00:43 +03:00
__queue_work ( per_cpu_ptr ( keventd_wq - > cpu_wq , cpu ) ,
2006-06-25 16:47:49 +04:00
per_cpu_ptr ( works , cpu ) ) ;
2006-01-08 12:00:43 +03:00
}
flush_workqueue ( keventd_wq ) ;
2006-06-25 16:47:49 +04:00
free_percpu ( works ) ;
2006-01-08 12:00:43 +03:00
return 0 ;
}
2005-04-17 02:20:36 +04:00
void flush_scheduled_work ( void )
{
flush_workqueue ( keventd_wq ) ;
}
2006-06-30 09:40:45 +04:00
EXPORT_SYMBOL ( flush_scheduled_work ) ;
2005-04-17 02:20:36 +04:00
/**
* cancel_rearming_delayed_workqueue - reliably kill off a delayed
* work whose handler rearms the delayed work .
* @ wq : the controlling workqueue structure
* @ work : the delayed work struct
*/
2005-04-17 02:23:59 +04:00
void cancel_rearming_delayed_workqueue ( struct workqueue_struct * wq ,
struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
while ( ! cancel_delayed_work ( work ) )
flush_workqueue ( wq ) ;
}
2005-04-17 02:23:59 +04:00
EXPORT_SYMBOL ( cancel_rearming_delayed_workqueue ) ;
2005-04-17 02:20:36 +04:00
/**
* cancel_rearming_delayed_work - reliably kill off a delayed keventd
* work whose handler rearms the delayed work .
* @ work : the delayed work struct
*/
void cancel_rearming_delayed_work ( struct work_struct * work )
{
cancel_rearming_delayed_workqueue ( keventd_wq , work ) ;
}
EXPORT_SYMBOL ( cancel_rearming_delayed_work ) ;
2006-02-23 21:43:43 +03:00
/**
* execute_in_process_context - reliably execute the routine with user context
* @ fn : the function to execute
* @ data : data to pass to the function
* @ ew : guaranteed storage for the execute work structure ( must
* be available when the work executes )
*
* Executes the function immediately if process context is available ,
* otherwise schedules the function for delayed execution .
*
* Returns : 0 - function was executed
* 1 - function was scheduled for execution
*/
int execute_in_process_context ( void ( * fn ) ( void * data ) , void * data ,
struct execute_work * ew )
{
if ( ! in_interrupt ( ) ) {
fn ( data ) ;
return 0 ;
}
INIT_WORK ( & ew - > work , fn , data ) ;
schedule_work ( & ew - > work ) ;
return 1 ;
}
EXPORT_SYMBOL_GPL ( execute_in_process_context ) ;
2005-04-17 02:20:36 +04:00
int keventd_up ( void )
{
return keventd_wq ! = NULL ;
}
int current_is_keventd ( void )
{
struct cpu_workqueue_struct * cwq ;
int cpu = smp_processor_id ( ) ; /* preempt-safe: keventd is per-cpu */
int ret = 0 ;
BUG_ON ( ! keventd_wq ) ;
2005-10-31 02:01:59 +03:00
cwq = per_cpu_ptr ( keventd_wq - > cpu_wq , cpu ) ;
2005-04-17 02:20:36 +04:00
if ( current = = cwq - > thread )
ret = 1 ;
return ret ;
}
# ifdef CONFIG_HOTPLUG_CPU
/* Take the work from this (downed) CPU. */
static void take_over_work ( struct workqueue_struct * wq , unsigned int cpu )
{
2005-10-31 02:01:59 +03:00
struct cpu_workqueue_struct * cwq = per_cpu_ptr ( wq - > cpu_wq , cpu ) ;
2006-06-23 13:05:55 +04:00
struct list_head list ;
2005-04-17 02:20:36 +04:00
struct work_struct * work ;
spin_lock_irq ( & cwq - > lock ) ;
2006-06-23 13:05:55 +04:00
list_replace_init ( & cwq - > worklist , & list ) ;
2005-04-17 02:20:36 +04:00
while ( ! list_empty ( & list ) ) {
printk ( " Taking work for %s \n " , wq - > name ) ;
work = list_entry ( list . next , struct work_struct , entry ) ;
list_del ( & work - > entry ) ;
2005-10-31 02:01:59 +03:00
__queue_work ( per_cpu_ptr ( wq - > cpu_wq , smp_processor_id ( ) ) , work ) ;
2005-04-17 02:20:36 +04:00
}
spin_unlock_irq ( & cwq - > lock ) ;
}
/* We're holding the cpucontrol mutex here */
2006-06-27 13:54:07 +04:00
static int __devinit workqueue_cpu_callback ( struct notifier_block * nfb ,
2005-04-17 02:20:36 +04:00
unsigned long action ,
void * hcpu )
{
unsigned int hotcpu = ( unsigned long ) hcpu ;
struct workqueue_struct * wq ;
switch ( action ) {
case CPU_UP_PREPARE :
/* Create a new workqueue thread for it. */
list_for_each_entry ( wq , & workqueues , list ) {
2005-09-07 02:17:17 +04:00
if ( ! create_workqueue_thread ( wq , hotcpu ) ) {
2005-04-17 02:20:36 +04:00
printk ( " workqueue for %i failed \n " , hotcpu ) ;
return NOTIFY_BAD ;
}
}
break ;
case CPU_ONLINE :
/* Kick off worker threads. */
list_for_each_entry ( wq , & workqueues , list ) {
2005-10-31 02:01:59 +03:00
struct cpu_workqueue_struct * cwq ;
cwq = per_cpu_ptr ( wq - > cpu_wq , hotcpu ) ;
kthread_bind ( cwq - > thread , hotcpu ) ;
wake_up_process ( cwq - > thread ) ;
2005-04-17 02:20:36 +04:00
}
break ;
case CPU_UP_CANCELED :
list_for_each_entry ( wq , & workqueues , list ) {
2006-06-25 16:49:10 +04:00
if ( ! per_cpu_ptr ( wq - > cpu_wq , hotcpu ) - > thread )
continue ;
2005-04-17 02:20:36 +04:00
/* Unbind so it can run. */
2005-10-31 02:01:59 +03:00
kthread_bind ( per_cpu_ptr ( wq - > cpu_wq , hotcpu ) - > thread ,
2005-11-07 11:58:38 +03:00
any_online_cpu ( cpu_online_map ) ) ;
2005-04-17 02:20:36 +04:00
cleanup_workqueue_thread ( wq , hotcpu ) ;
}
break ;
case CPU_DEAD :
list_for_each_entry ( wq , & workqueues , list )
cleanup_workqueue_thread ( wq , hotcpu ) ;
list_for_each_entry ( wq , & workqueues , list )
take_over_work ( wq , hotcpu ) ;
break ;
}
return NOTIFY_OK ;
}
# endif
void init_workqueues ( void )
{
2006-01-08 12:05:12 +03:00
singlethread_cpu = first_cpu ( cpu_possible_map ) ;
2005-04-17 02:20:36 +04:00
hotcpu_notifier ( workqueue_cpu_callback , 0 ) ;
keventd_wq = create_workqueue ( " events " ) ;
BUG_ON ( ! keventd_wq ) ;
}