2005-04-17 02:20:36 +04:00
/*
kmod , the new module loader ( replaces kerneld )
Kirk Petersen
Reorganized not to be a daemon by Adam Richter , with guidance
from Greg Zornetzer .
Modified to avoid chroot and file sharing problems .
Mikael Pettersson
Limit the concurrent number of kmod modprobes to catch loops from
" modprobe needs a service that is in a module " .
Keith Owens < kaos @ ocs . com . au > December 1999
Unblock all signals when we exec a usermode process .
Shuu Yamaguchi < shuu @ wondernetworkresources . com > December 2000
call_usermodehelper wait flag , and remove exec_usermodehelper .
Rusty Russell < rusty @ rustcorp . com . au > Jan 2003
*/
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/syscalls.h>
# include <linux/unistd.h>
# include <linux/kmod.h>
# include <linux/smp_lock.h>
# include <linux/slab.h>
2006-12-08 13:37:56 +03:00
# include <linux/mnt_namespace.h>
2005-04-17 02:20:36 +04:00
# include <linux/completion.h>
# include <linux/file.h>
# include <linux/workqueue.h>
# include <linux/security.h>
# include <linux/mount.h>
# include <linux/kernel.h>
# include <linux/init.h>
2006-10-01 10:29:28 +04:00
# include <linux/resource.h>
2005-04-17 02:20:36 +04:00
# include <asm/uaccess.h>
extern int max_threads ;
static struct workqueue_struct * khelper_wq ;
# ifdef CONFIG_KMOD
/*
modprobe_path is set via / proc / sys .
*/
char modprobe_path [ KMOD_PATH_LEN ] = " /sbin/modprobe " ;
/**
* request_module - try to load a kernel module
* @ fmt : printf style format string for the name of the module
* @ varargs : arguements as specified in the format string
*
* Load a module using the user mode module loader . The function returns
* zero on success or a negative errno code on failure . Note that a
* successful module load does not mean the module did not then unload
* and exit on an error of its own . Callers must check that the service
* they requested is now available not blindly invoke it .
*
* If module auto - loading support is disabled then this function
* becomes a no - operation .
*/
int request_module ( const char * fmt , . . . )
{
va_list args ;
char module_name [ MODULE_NAME_LEN ] ;
unsigned int max_modprobes ;
int ret ;
char * argv [ ] = { modprobe_path , " -q " , " -- " , module_name , NULL } ;
static char * envp [ ] = { " HOME=/ " ,
" TERM=linux " ,
" PATH=/sbin:/usr/sbin:/bin:/usr/bin " ,
NULL } ;
static atomic_t kmod_concurrent = ATOMIC_INIT ( 0 ) ;
# define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
static int kmod_loop_msg ;
va_start ( args , fmt ) ;
ret = vsnprintf ( module_name , MODULE_NAME_LEN , fmt , args ) ;
va_end ( args ) ;
if ( ret > = MODULE_NAME_LEN )
return - ENAMETOOLONG ;
/* If modprobe needs a service that is in a module, we get a recursive
* loop . Limit the number of running kmod threads to max_threads / 2 or
* MAX_KMOD_CONCURRENT , whichever is the smaller . A cleaner method
* would be to run the parents of this process , counting how many times
* kmod was invoked . That would mean accessing the internals of the
* process tables to get the command line , proc_pid_cmdline is static
* and it is not worth changing the proc code just to handle this case .
* KAO .
*
* " trace the ppid " is simple , but will fail if someone ' s
* parent exits . I think this is as good as it gets . - - RR
*/
max_modprobes = min ( max_threads / 2 , MAX_KMOD_CONCURRENT ) ;
atomic_inc ( & kmod_concurrent ) ;
if ( atomic_read ( & kmod_concurrent ) > max_modprobes ) {
/* We may be blaming an innocent here, but unlikely */
if ( kmod_loop_msg + + < 5 )
printk ( KERN_ERR
" request_module: runaway loop modprobe %s \n " ,
module_name ) ;
atomic_dec ( & kmod_concurrent ) ;
return - ENOMEM ;
}
ret = call_usermodehelper ( modprobe_path , argv , envp , 1 ) ;
atomic_dec ( & kmod_concurrent ) ;
return ret ;
}
EXPORT_SYMBOL ( request_module ) ;
# endif /* CONFIG_KMOD */
struct subprocess_info {
2006-11-22 17:55:48 +03:00
struct work_struct work ;
2005-04-17 02:20:36 +04:00
struct completion * complete ;
char * path ;
char * * argv ;
char * * envp ;
2005-06-24 09:00:51 +04:00
struct key * ring ;
2005-04-17 02:20:36 +04:00
int wait ;
int retval ;
2006-10-01 10:29:27 +04:00
struct file * stdin ;
2005-04-17 02:20:36 +04:00
} ;
/*
* This is the task which runs the usermode application
*/
static int ____call_usermodehelper ( void * data )
{
struct subprocess_info * sub_info = data ;
2005-10-31 02:02:44 +03:00
struct key * new_session , * old_session ;
2005-04-17 02:20:36 +04:00
int retval ;
2005-06-24 09:00:51 +04:00
/* Unblock all signals and set the session keyring. */
2005-10-31 02:02:44 +03:00
new_session = key_get ( sub_info - > ring ) ;
2005-04-17 02:20:36 +04:00
flush_signals ( current ) ;
spin_lock_irq ( & current - > sighand - > siglock ) ;
2005-10-31 02:02:44 +03:00
old_session = __install_session_keyring ( current , new_session ) ;
2005-04-17 02:20:36 +04:00
flush_signal_handlers ( current , 1 ) ;
sigemptyset ( & current - > blocked ) ;
recalc_sigpending ( ) ;
spin_unlock_irq ( & current - > sighand - > siglock ) ;
2005-06-24 09:00:51 +04:00
key_put ( old_session ) ;
2006-10-01 10:29:27 +04:00
/* Install input pipe when needed */
if ( sub_info - > stdin ) {
struct files_struct * f = current - > files ;
struct fdtable * fdt ;
/* no races because files should be private here */
sys_close ( 0 ) ;
fd_install ( 0 , sub_info - > stdin ) ;
spin_lock ( & f - > file_lock ) ;
fdt = files_fdtable ( f ) ;
FD_SET ( 0 , fdt - > open_fds ) ;
FD_CLR ( 0 , fdt - > close_on_exec ) ;
spin_unlock ( & f - > file_lock ) ;
2006-10-01 10:29:28 +04:00
/* and disallow core files too */
current - > signal - > rlim [ RLIMIT_CORE ] = ( struct rlimit ) { 0 , 0 } ;
2006-10-01 10:29:27 +04:00
}
2005-04-17 02:20:36 +04:00
/* We can run anywhere, unlike our parent keventd(). */
set_cpus_allowed ( current , CPU_MASK_ALL ) ;
retval = - EPERM ;
if ( current - > fs - > root )
2006-10-02 13:18:26 +04:00
retval = kernel_execve ( sub_info - > path ,
sub_info - > argv , sub_info - > envp ) ;
2005-04-17 02:20:36 +04:00
/* Exec failed? */
sub_info - > retval = retval ;
do_exit ( 0 ) ;
}
/* Keventd can't block, but this (a child) can. */
static int wait_for_helper ( void * data )
{
struct subprocess_info * sub_info = data ;
pid_t pid ;
struct k_sigaction sa ;
/* Install a handler: if SIGCLD isn't handled sys_wait4 won't
* populate the status , but will return - ECHILD . */
sa . sa . sa_handler = SIG_IGN ;
sa . sa . sa_flags = 0 ;
siginitset ( & sa . sa . sa_mask , sigmask ( SIGCHLD ) ) ;
2006-03-29 04:11:10 +04:00
do_sigaction ( SIGCHLD , & sa , NULL ) ;
2005-04-17 02:20:36 +04:00
allow_signal ( SIGCHLD ) ;
pid = kernel_thread ( ____call_usermodehelper , sub_info , SIGCHLD ) ;
if ( pid < 0 ) {
sub_info - > retval = pid ;
} else {
2006-09-29 13:00:46 +04:00
int ret ;
2005-04-17 02:20:36 +04:00
/*
* Normally it is bogus to call wait4 ( ) from in - kernel because
* wait4 ( ) wants to write the exit code to a userspace address .
* But wait_for_helper ( ) always runs as keventd , and put_user ( )
* to a kernel address works OK for kernel threads , due to their
* having an mm_segment_t which spans the entire address space .
*
* Thus the __user pointer cast is valid here .
*/
2006-09-29 13:00:46 +04:00
sys_wait4 ( pid , ( int __user * ) & ret , 0 , NULL ) ;
/*
* If ret is 0 , either ____call_usermodehelper failed and the
* real error code is already in sub_info - > retval or
* sub_info - > retval is 0 anyway , so don ' t mess with it then .
*/
if ( ret )
sub_info - > retval = ret ;
2005-04-17 02:20:36 +04:00
}
2007-02-13 15:26:23 +03:00
if ( sub_info - > wait < 0 )
kfree ( sub_info ) ;
else
complete ( sub_info - > complete ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
/* This is run by khelper thread */
2006-11-22 17:55:48 +03:00
static void __call_usermodehelper ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2006-11-22 17:55:48 +03:00
struct subprocess_info * sub_info =
container_of ( work , struct subprocess_info , work ) ;
2005-04-17 02:20:36 +04:00
pid_t pid ;
2006-09-16 23:15:55 +04:00
int wait = sub_info - > wait ;
2005-04-17 02:20:36 +04:00
/* CLONE_VFORK: wait until the usermode helper has execve'd
* successfully We need the data structures to stay around
* until that is done . */
2006-09-16 23:15:55 +04:00
if ( wait )
2005-04-17 02:20:36 +04:00
pid = kernel_thread ( wait_for_helper , sub_info ,
CLONE_FS | CLONE_FILES | SIGCHLD ) ;
else
pid = kernel_thread ( ____call_usermodehelper , sub_info ,
CLONE_VFORK | SIGCHLD ) ;
2007-02-13 15:26:23 +03:00
if ( wait < 0 )
return ;
2005-04-17 02:20:36 +04:00
if ( pid < 0 ) {
sub_info - > retval = pid ;
complete ( sub_info - > complete ) ;
2006-09-16 23:15:55 +04:00
} else if ( ! wait )
2005-04-17 02:20:36 +04:00
complete ( sub_info - > complete ) ;
}
/**
2005-06-24 09:00:51 +04:00
* call_usermodehelper_keys - start a usermode application
2005-04-17 02:20:36 +04:00
* @ path : pathname for the application
* @ argv : null - terminated argument list
* @ envp : null - terminated environment list
2005-06-24 09:00:51 +04:00
* @ session_keyring : session keyring for process ( NULL for an empty keyring )
2005-04-17 02:20:36 +04:00
* @ wait : wait for the application to finish and return status .
2007-02-13 15:26:23 +03:00
* when - 1 don ' t wait at all , but you get no useful error back when
* the program couldn ' t be exec ' ed . This makes it safe to call
* from interrupt context .
2005-04-17 02:20:36 +04:00
*
* Runs a user - space application . The application is started
* asynchronously if wait is not set , and runs as a child of keventd .
* ( ie . it runs with full root capabilities ) .
*
* Must be called from process context . Returns a negative error code
* if program was not execed successfully , or 0.
*/
2005-06-24 09:00:51 +04:00
int call_usermodehelper_keys ( char * path , char * * argv , char * * envp ,
struct key * session_keyring , int wait )
2005-04-17 02:20:36 +04:00
{
2006-07-03 11:25:26 +04:00
DECLARE_COMPLETION_ONSTACK ( done ) ;
2007-02-13 15:26:23 +03:00
struct subprocess_info * sub_info ;
int retval ;
2005-04-17 02:20:36 +04:00
if ( ! khelper_wq )
return - EBUSY ;
if ( path [ 0 ] = = ' \0 ' )
return 0 ;
2007-02-13 15:26:23 +03:00
sub_info = kzalloc ( sizeof ( struct subprocess_info ) , GFP_ATOMIC ) ;
if ( ! sub_info )
return - ENOMEM ;
INIT_WORK ( & sub_info - > work , __call_usermodehelper ) ;
sub_info - > complete = & done ;
sub_info - > path = path ;
sub_info - > argv = argv ;
sub_info - > envp = envp ;
sub_info - > ring = session_keyring ;
sub_info - > wait = wait ;
queue_work ( khelper_wq , & sub_info - > work ) ;
if ( wait < 0 ) /* task has freed sub_info */
return 0 ;
2005-04-17 02:20:36 +04:00
wait_for_completion ( & done ) ;
2007-02-13 15:26:23 +03:00
retval = sub_info - > retval ;
kfree ( sub_info ) ;
return retval ;
2005-04-17 02:20:36 +04:00
}
2005-06-24 09:00:51 +04:00
EXPORT_SYMBOL ( call_usermodehelper_keys ) ;
2005-04-17 02:20:36 +04:00
2006-10-01 10:29:27 +04:00
int call_usermodehelper_pipe ( char * path , char * * argv , char * * envp ,
struct file * * filp )
{
DECLARE_COMPLETION ( done ) ;
struct subprocess_info sub_info = {
2006-11-22 17:55:48 +03:00
. work = __WORK_INITIALIZER ( sub_info . work ,
__call_usermodehelper ) ,
2006-10-01 10:29:27 +04:00
. complete = & done ,
. path = path ,
. argv = argv ,
. envp = envp ,
. retval = 0 ,
} ;
struct file * f ;
if ( ! khelper_wq )
return - EBUSY ;
if ( path [ 0 ] = = ' \0 ' )
return 0 ;
f = create_write_pipe ( ) ;
2006-11-28 23:29:43 +03:00
if ( IS_ERR ( f ) )
return PTR_ERR ( f ) ;
2006-10-01 10:29:27 +04:00
* filp = f ;
f = create_read_pipe ( f ) ;
2006-11-28 23:29:43 +03:00
if ( IS_ERR ( f ) ) {
2006-10-01 10:29:27 +04:00
free_write_pipe ( * filp ) ;
2006-11-28 23:29:43 +03:00
return PTR_ERR ( f ) ;
2006-10-01 10:29:27 +04:00
}
sub_info . stdin = f ;
2006-11-22 17:55:48 +03:00
queue_work ( khelper_wq , & sub_info . work ) ;
2006-10-01 10:29:27 +04:00
wait_for_completion ( & done ) ;
return sub_info . retval ;
}
EXPORT_SYMBOL ( call_usermodehelper_pipe ) ;
2005-04-17 02:20:36 +04:00
void __init usermodehelper_init ( void )
{
khelper_wq = create_singlethread_workqueue ( " khelper " ) ;
BUG_ON ( ! khelper_wq ) ;
}