2005-04-17 02:20:36 +04:00
/* Kernel thread helper functions.
* Copyright ( C ) 2004 IBM Corporation , Rusty Russell .
*
* Creation is done via keventd , so that we get a clean environment
* even if we ' re invoked from userspace ( think modprobe , hotplug cpu ,
* etc . ) .
*/
# include <linux/sched.h>
# include <linux/kthread.h>
# include <linux/completion.h>
# include <linux/err.h>
# include <linux/unistd.h>
# include <linux/file.h>
# include <linux/module.h>
2006-03-23 14:00:24 +03:00
# include <linux/mutex.h>
2005-04-17 02:20:36 +04:00
# include <asm/semaphore.h>
/*
* We dont want to execute off keventd since it might
* hold a semaphore our callers hold too :
*/
static struct workqueue_struct * helper_wq ;
struct kthread_create_info
{
/* Information passed to kthread() from keventd. */
int ( * threadfn ) ( void * data ) ;
void * data ;
struct completion started ;
/* Result passed back to kthread_create() from keventd. */
struct task_struct * result ;
struct completion done ;
} ;
struct kthread_stop_info
{
struct task_struct * k ;
int err ;
struct completion done ;
} ;
/* Thread stopping is done by setthing this var: lock serializes
* multiple kthread_stop calls . */
2006-03-23 14:00:24 +03:00
static DEFINE_MUTEX ( kthread_stop_lock ) ;
2005-04-17 02:20:36 +04:00
static struct kthread_stop_info kthread_stop_info ;
int kthread_should_stop ( void )
{
return ( kthread_stop_info . k = = current ) ;
}
EXPORT_SYMBOL ( kthread_should_stop ) ;
static void kthread_exit_files ( void )
{
struct fs_struct * fs ;
struct task_struct * tsk = current ;
exit_fs ( tsk ) ; /* current->fs->count--; */
fs = init_task . fs ;
tsk - > fs = fs ;
atomic_inc ( & fs - > count ) ;
exit_files ( tsk ) ;
current - > files = init_task . files ;
atomic_inc ( & tsk - > files - > count ) ;
}
static int kthread ( void * _create )
{
struct kthread_create_info * create = _create ;
int ( * threadfn ) ( void * data ) ;
void * data ;
sigset_t blocked ;
int ret = - EINTR ;
kthread_exit_files ( ) ;
/* Copy data: it's on keventd's stack */
threadfn = create - > threadfn ;
data = create - > data ;
/* Block and flush all signals (in case we're not from keventd). */
sigfillset ( & blocked ) ;
sigprocmask ( SIG_BLOCK , & blocked , NULL ) ;
flush_signals ( current ) ;
/* By default we can run anywhere, unlike keventd. */
set_cpus_allowed ( current , CPU_MASK_ALL ) ;
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state ( TASK_INTERRUPTIBLE ) ;
complete ( & create - > started ) ;
schedule ( ) ;
if ( ! kthread_should_stop ( ) )
ret = threadfn ( data ) ;
/* It might have exited on its own, w/o kthread_stop. Check. */
if ( kthread_should_stop ( ) ) {
kthread_stop_info . err = ret ;
complete ( & kthread_stop_info . done ) ;
}
return 0 ;
}
/* We are keventd: create a thread. */
static void keventd_create_kthread ( void * _create )
{
struct kthread_create_info * create = _create ;
int pid ;
/* We want our own signal handler (we take no signals by default). */
pid = kernel_thread ( kthread , create , CLONE_FS | CLONE_FILES | SIGCHLD ) ;
if ( pid < 0 ) {
create - > result = ERR_PTR ( pid ) ;
} else {
wait_for_completion ( & create - > started ) ;
create - > result = find_task_by_pid ( pid ) ;
}
complete ( & create - > done ) ;
}
struct task_struct * kthread_create ( int ( * threadfn ) ( void * data ) ,
void * data ,
const char namefmt [ ] ,
. . . )
{
struct kthread_create_info create ;
DECLARE_WORK ( work , keventd_create_kthread , & create ) ;
create . threadfn = threadfn ;
create . data = data ;
init_completion ( & create . started ) ;
init_completion ( & create . done ) ;
/*
* The workqueue needs to start up first :
*/
if ( ! helper_wq )
work . func ( work . data ) ;
else {
queue_work ( helper_wq , & work ) ;
wait_for_completion ( & create . done ) ;
}
if ( ! IS_ERR ( create . result ) ) {
va_list args ;
va_start ( args , namefmt ) ;
vsnprintf ( create . result - > comm , sizeof ( create . result - > comm ) ,
namefmt , args ) ;
va_end ( args ) ;
}
return create . result ;
}
EXPORT_SYMBOL ( kthread_create ) ;
void kthread_bind ( struct task_struct * k , unsigned int cpu )
{
BUG_ON ( k - > state ! = TASK_INTERRUPTIBLE ) ;
/* Must have done schedule() in kthread() before we set_task_cpu */
wait_task_inactive ( k ) ;
set_task_cpu ( k , cpu ) ;
k - > cpus_allowed = cpumask_of_cpu ( cpu ) ;
}
EXPORT_SYMBOL ( kthread_bind ) ;
int kthread_stop ( struct task_struct * k )
2005-10-31 02:01:40 +03:00
{
return kthread_stop_sem ( k , NULL ) ;
}
EXPORT_SYMBOL ( kthread_stop ) ;
int kthread_stop_sem ( struct task_struct * k , struct semaphore * s )
2005-04-17 02:20:36 +04:00
{
int ret ;
2006-03-23 14:00:24 +03:00
mutex_lock ( & kthread_stop_lock ) ;
2005-04-17 02:20:36 +04:00
/* It could exit after stop_info.k set, but before wake_up_process. */
get_task_struct ( k ) ;
/* Must init completion *before* thread sees kthread_stop_info.k */
init_completion ( & kthread_stop_info . done ) ;
2005-05-01 19:58:47 +04:00
smp_wmb ( ) ;
2005-04-17 02:20:36 +04:00
/* Now set kthread_should_stop() to true, and wake it up. */
kthread_stop_info . k = k ;
2005-10-31 02:01:40 +03:00
if ( s )
up ( s ) ;
else
wake_up_process ( k ) ;
2005-04-17 02:20:36 +04:00
put_task_struct ( k ) ;
/* Once it dies, reset stop ptr, gather result and we're done. */
wait_for_completion ( & kthread_stop_info . done ) ;
kthread_stop_info . k = NULL ;
ret = kthread_stop_info . err ;
2006-03-23 14:00:24 +03:00
mutex_unlock ( & kthread_stop_lock ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2005-10-31 02:01:40 +03:00
EXPORT_SYMBOL ( kthread_stop_sem ) ;
2005-04-17 02:20:36 +04:00
static __init int helper_init ( void )
{
helper_wq = create_singlethread_workqueue ( " kthread " ) ;
BUG_ON ( ! helper_wq ) ;
return 0 ;
}
core_initcall ( helper_init ) ;