2005-04-17 02:20:36 +04:00
/*
* linux / net / sunrpc / svc . c
*
* High - level RPC service routines
*
* Copyright ( C ) 1995 , 1996 Olaf Kirch < okir @ monad . swb . de >
2006-10-02 13:18:01 +04:00
*
* Multiple threads pools and NUMAisation
* Copyright ( c ) 2006 Silicon Graphics , Inc .
* by Greg Banks < gnb @ melbourne . sgi . com >
2005-04-17 02:20:36 +04:00
*/
# include <linux/linkage.h>
# include <linux/sched.h>
# include <linux/errno.h>
# include <linux/net.h>
# include <linux/in.h>
# include <linux/mm.h>
2006-10-02 13:17:59 +04:00
# include <linux/interrupt.h>
# include <linux/module.h>
2008-01-30 15:33:11 +03:00
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
# include <linux/sunrpc/types.h>
# include <linux/sunrpc/xdr.h>
# include <linux/sunrpc/stats.h>
# include <linux/sunrpc/svcsock.h>
# include <linux/sunrpc/clnt.h>
# define RPCDBG_FACILITY RPCDBG_SVCDSP
2007-03-06 12:42:23 +03:00
# define svc_serv_is_pooled(serv) ((serv)->sv_function)
2006-10-02 13:18:01 +04:00
/*
* Mode for mapping cpus to pools .
*/
enum {
2007-03-06 12:42:23 +03:00
SVC_POOL_AUTO = - 1 , /* choose one of the others */
2006-10-02 13:18:01 +04:00
SVC_POOL_GLOBAL , /* no mapping, just a single global pool
* ( legacy & UP mode ) */
SVC_POOL_PERCPU , /* one pool per cpu */
SVC_POOL_PERNODE /* one pool per numa node */
} ;
2007-03-06 12:42:23 +03:00
# define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
2006-10-02 13:18:01 +04:00
/*
* Structure for mapping cpus to pools and vice versa .
* Setup once during sunrpc initialisation .
*/
static struct svc_pool_map {
2007-03-06 12:42:23 +03:00
int count ; /* How many svc_servs use us */
2006-10-02 13:18:01 +04:00
int mode ; /* Note: int not enum to avoid
* warnings about " enumeration value
* not handled in switch " */
unsigned int npools ;
unsigned int * pool_to ; /* maps pool id to cpu or node */
unsigned int * to_pool ; /* maps cpu or node to pool id */
} svc_pool_map = {
2007-03-06 12:42:23 +03:00
. count = 0 ,
. mode = SVC_POOL_DEFAULT
2006-10-02 13:18:01 +04:00
} ;
2007-03-06 12:42:23 +03:00
static DEFINE_MUTEX ( svc_pool_map_mutex ) ; /* protects svc_pool_map.count only */
static int
param_set_pool_mode ( const char * val , struct kernel_param * kp )
{
int * ip = ( int * ) kp - > arg ;
struct svc_pool_map * m = & svc_pool_map ;
int err ;
mutex_lock ( & svc_pool_map_mutex ) ;
err = - EBUSY ;
if ( m - > count )
goto out ;
err = 0 ;
if ( ! strncmp ( val , " auto " , 4 ) )
* ip = SVC_POOL_AUTO ;
else if ( ! strncmp ( val , " global " , 6 ) )
* ip = SVC_POOL_GLOBAL ;
else if ( ! strncmp ( val , " percpu " , 6 ) )
* ip = SVC_POOL_PERCPU ;
else if ( ! strncmp ( val , " pernode " , 7 ) )
* ip = SVC_POOL_PERNODE ;
else
err = - EINVAL ;
out :
mutex_unlock ( & svc_pool_map_mutex ) ;
return err ;
}
static int
param_get_pool_mode ( char * buf , struct kernel_param * kp )
{
int * ip = ( int * ) kp - > arg ;
switch ( * ip )
{
case SVC_POOL_AUTO :
return strlcpy ( buf , " auto " , 20 ) ;
case SVC_POOL_GLOBAL :
return strlcpy ( buf , " global " , 20 ) ;
case SVC_POOL_PERCPU :
return strlcpy ( buf , " percpu " , 20 ) ;
case SVC_POOL_PERNODE :
return strlcpy ( buf , " pernode " , 20 ) ;
default :
return sprintf ( buf , " %d " , * ip ) ;
}
}
2006-10-02 13:18:01 +04:00
2007-03-06 12:42:23 +03:00
module_param_call ( pool_mode , param_set_pool_mode , param_get_pool_mode ,
& svc_pool_map . mode , 0644 ) ;
2006-10-02 13:18:01 +04:00
/*
* Detect best pool mapping mode heuristically ,
* according to the machine ' s topology .
*/
static int
svc_pool_map_choose_mode ( void )
{
unsigned int node ;
if ( num_online_nodes ( ) > 1 ) {
/*
* Actually have multiple NUMA nodes ,
* so split pools on NUMA node boundaries
*/
return SVC_POOL_PERNODE ;
}
node = any_online_node ( node_online_map ) ;
if ( nr_cpus_node ( node ) > 2 ) {
/*
* Non - trivial SMP , or CONFIG_NUMA on
* non - NUMA hardware , e . g . with a generic
* x86_64 kernel on Xeons . In this case we
* want to divide the pools on cpu boundaries .
*/
return SVC_POOL_PERCPU ;
}
/* default: one global pool */
return SVC_POOL_GLOBAL ;
}
/*
* Allocate the to_pool [ ] and pool_to [ ] arrays .
* Returns 0 on success or an errno .
*/
static int
svc_pool_map_alloc_arrays ( struct svc_pool_map * m , unsigned int maxpools )
{
m - > to_pool = kcalloc ( maxpools , sizeof ( unsigned int ) , GFP_KERNEL ) ;
if ( ! m - > to_pool )
goto fail ;
m - > pool_to = kcalloc ( maxpools , sizeof ( unsigned int ) , GFP_KERNEL ) ;
if ( ! m - > pool_to )
goto fail_free ;
return 0 ;
fail_free :
kfree ( m - > to_pool ) ;
fail :
return - ENOMEM ;
}
/*
* Initialise the pool map for SVC_POOL_PERCPU mode .
* Returns number of pools or < 0 on error .
*/
static int
svc_pool_map_init_percpu ( struct svc_pool_map * m )
{
2007-02-21 00:57:51 +03:00
unsigned int maxpools = nr_cpu_ids ;
2006-10-02 13:18:01 +04:00
unsigned int pidx = 0 ;
unsigned int cpu ;
int err ;
err = svc_pool_map_alloc_arrays ( m , maxpools ) ;
if ( err )
return err ;
for_each_online_cpu ( cpu ) {
BUG_ON ( pidx > maxpools ) ;
m - > to_pool [ cpu ] = pidx ;
m - > pool_to [ pidx ] = cpu ;
pidx + + ;
}
/* cpus brought online later all get mapped to pool0, sorry */
return pidx ;
} ;
/*
* Initialise the pool map for SVC_POOL_PERNODE mode .
* Returns number of pools or < 0 on error .
*/
static int
svc_pool_map_init_pernode ( struct svc_pool_map * m )
{
2007-02-21 00:57:51 +03:00
unsigned int maxpools = nr_node_ids ;
2006-10-02 13:18:01 +04:00
unsigned int pidx = 0 ;
unsigned int node ;
int err ;
err = svc_pool_map_alloc_arrays ( m , maxpools ) ;
if ( err )
return err ;
for_each_node_with_cpus ( node ) {
/* some architectures (e.g. SN2) have cpuless nodes */
BUG_ON ( pidx > maxpools ) ;
m - > to_pool [ node ] = pidx ;
m - > pool_to [ pidx ] = node ;
pidx + + ;
}
/* nodes brought online later all get mapped to pool0, sorry */
return pidx ;
}
/*
2007-03-06 12:42:23 +03:00
* Add a reference to the global map of cpus to pools ( and
* vice versa ) . Initialise the map if we ' re the first user .
* Returns the number of pools .
2006-10-02 13:18:01 +04:00
*/
static unsigned int
2007-03-06 12:42:23 +03:00
svc_pool_map_get ( void )
2006-10-02 13:18:01 +04:00
{
struct svc_pool_map * m = & svc_pool_map ;
int npools = - 1 ;
2007-03-06 12:42:23 +03:00
mutex_lock ( & svc_pool_map_mutex ) ;
if ( m - > count + + ) {
mutex_unlock ( & svc_pool_map_mutex ) ;
2006-10-02 13:18:01 +04:00
return m - > npools ;
2007-03-06 12:42:23 +03:00
}
2006-10-02 13:18:01 +04:00
2007-03-06 12:42:23 +03:00
if ( m - > mode = = SVC_POOL_AUTO )
m - > mode = svc_pool_map_choose_mode ( ) ;
2006-10-02 13:18:01 +04:00
switch ( m - > mode ) {
case SVC_POOL_PERCPU :
npools = svc_pool_map_init_percpu ( m ) ;
break ;
case SVC_POOL_PERNODE :
npools = svc_pool_map_init_pernode ( m ) ;
break ;
}
if ( npools < 0 ) {
/* default, or memory allocation failure */
npools = 1 ;
m - > mode = SVC_POOL_GLOBAL ;
}
m - > npools = npools ;
2007-03-06 12:42:23 +03:00
mutex_unlock ( & svc_pool_map_mutex ) ;
2006-10-02 13:18:01 +04:00
return m - > npools ;
}
2007-03-06 12:42:23 +03:00
/*
* Drop a reference to the global map of cpus to pools .
* When the last reference is dropped , the map data is
* freed ; this allows the sysadmin to change the pool
* mode using the pool_mode module option without
* rebooting or re - loading sunrpc . ko .
*/
static void
svc_pool_map_put ( void )
{
struct svc_pool_map * m = & svc_pool_map ;
mutex_lock ( & svc_pool_map_mutex ) ;
if ( ! - - m - > count ) {
m - > mode = SVC_POOL_DEFAULT ;
kfree ( m - > to_pool ) ;
kfree ( m - > pool_to ) ;
m - > npools = 0 ;
}
mutex_unlock ( & svc_pool_map_mutex ) ;
}
2006-10-02 13:18:01 +04:00
/*
* Set the current thread ' s cpus_allowed mask so that it
* will only run on cpus in the given pool .
*
* Returns 1 and fills in oldmask iff a cpumask was applied .
*/
static inline int
svc_pool_map_set_cpumask ( unsigned int pidx , cpumask_t * oldmask )
{
struct svc_pool_map * m = & svc_pool_map ;
unsigned int node ; /* or cpu */
/*
* The caller checks for sv_nrpools > 1 , which
2007-03-06 12:42:23 +03:00
* implies that we ' ve been initialized .
2006-10-02 13:18:01 +04:00
*/
2007-03-06 12:42:23 +03:00
BUG_ON ( m - > count = = 0 ) ;
2006-10-02 13:18:01 +04:00
switch ( m - > mode )
{
default :
return 0 ;
case SVC_POOL_PERCPU :
node = m - > pool_to [ pidx ] ;
* oldmask = current - > cpus_allowed ;
set_cpus_allowed ( current , cpumask_of_cpu ( node ) ) ;
return 1 ;
case SVC_POOL_PERNODE :
node = m - > pool_to [ pidx ] ;
* oldmask = current - > cpus_allowed ;
set_cpus_allowed ( current , node_to_cpumask ( node ) ) ;
return 1 ;
}
}
/*
* Use the mapping mode to choose a pool for a given CPU .
* Used when enqueueing an incoming RPC . Always returns
* a non - NULL pool pointer .
*/
struct svc_pool *
svc_pool_for_cpu ( struct svc_serv * serv , int cpu )
{
struct svc_pool_map * m = & svc_pool_map ;
unsigned int pidx = 0 ;
/*
2007-03-06 12:42:23 +03:00
* An uninitialised map happens in a pure client when
2006-10-02 13:18:01 +04:00
* lockd is brought up , so silently treat it the
* same as SVC_POOL_GLOBAL .
*/
2007-03-06 12:42:23 +03:00
if ( svc_serv_is_pooled ( serv ) ) {
switch ( m - > mode ) {
case SVC_POOL_PERCPU :
pidx = m - > to_pool [ cpu ] ;
break ;
case SVC_POOL_PERNODE :
pidx = m - > to_pool [ cpu_to_node ( cpu ) ] ;
break ;
}
2006-10-02 13:18:01 +04:00
}
return & serv - > sv_pools [ pidx % serv - > sv_nrpools ] ;
}
2005-04-17 02:20:36 +04:00
/*
* Create an RPC service
*/
2006-10-02 13:17:59 +04:00
static struct svc_serv *
__svc_create ( struct svc_program * prog , unsigned int bufsize , int npools ,
2006-10-02 13:17:44 +04:00
void ( * shutdown ) ( struct svc_serv * serv ) )
2005-04-17 02:20:36 +04:00
{
struct svc_serv * serv ;
int vers ;
unsigned int xdrsize ;
2006-10-02 13:17:58 +04:00
unsigned int i ;
2005-04-17 02:20:36 +04:00
2006-07-22 01:51:30 +04:00
if ( ! ( serv = kzalloc ( sizeof ( * serv ) , GFP_KERNEL ) ) )
2005-04-17 02:20:36 +04:00
return NULL ;
2005-06-22 21:16:24 +04:00
serv - > sv_name = prog - > pg_name ;
2005-04-17 02:20:36 +04:00
serv - > sv_program = prog ;
serv - > sv_nrthreads = 1 ;
serv - > sv_stats = prog - > pg_stats ;
2006-10-06 11:44:05 +04:00
if ( bufsize > RPCSVC_MAXPAYLOAD )
bufsize = RPCSVC_MAXPAYLOAD ;
serv - > sv_max_payload = bufsize ? bufsize : 4096 ;
serv - > sv_max_mesg = roundup ( serv - > sv_max_payload + PAGE_SIZE , PAGE_SIZE ) ;
2006-10-02 13:17:44 +04:00
serv - > sv_shutdown = shutdown ;
2005-04-17 02:20:36 +04:00
xdrsize = 0 ;
2005-06-22 21:16:24 +04:00
while ( prog ) {
prog - > pg_lovers = prog - > pg_nvers - 1 ;
for ( vers = 0 ; vers < prog - > pg_nvers ; vers + + )
if ( prog - > pg_vers [ vers ] ) {
prog - > pg_hivers = vers ;
if ( prog - > pg_lovers > vers )
prog - > pg_lovers = vers ;
if ( prog - > pg_vers [ vers ] - > vs_xdrsize > xdrsize )
xdrsize = prog - > pg_vers [ vers ] - > vs_xdrsize ;
}
prog = prog - > pg_next ;
}
2005-04-17 02:20:36 +04:00
serv - > sv_xdrsize = xdrsize ;
INIT_LIST_HEAD ( & serv - > sv_tempsocks ) ;
INIT_LIST_HEAD ( & serv - > sv_permsocks ) ;
2006-10-02 13:17:54 +04:00
init_timer ( & serv - > sv_temptimer ) ;
2005-04-17 02:20:36 +04:00
spin_lock_init ( & serv - > sv_lock ) ;
2006-10-02 13:17:59 +04:00
serv - > sv_nrpools = npools ;
2006-10-02 13:17:58 +04:00
serv - > sv_pools =
2006-12-13 11:34:52 +03:00
kcalloc ( serv - > sv_nrpools , sizeof ( struct svc_pool ) ,
2006-10-02 13:17:58 +04:00
GFP_KERNEL ) ;
if ( ! serv - > sv_pools ) {
kfree ( serv ) ;
return NULL ;
}
for ( i = 0 ; i < serv - > sv_nrpools ; i + + ) {
struct svc_pool * pool = & serv - > sv_pools [ i ] ;
2007-01-31 20:14:08 +03:00
dprintk ( " svc: initialising pool %u for %s \n " ,
2006-10-02 13:17:58 +04:00
i , serv - > sv_name ) ;
pool - > sp_id = i ;
INIT_LIST_HEAD ( & pool - > sp_threads ) ;
INIT_LIST_HEAD ( & pool - > sp_sockets ) ;
2006-10-02 13:17:59 +04:00
INIT_LIST_HEAD ( & pool - > sp_all_threads ) ;
2006-10-02 13:17:58 +04:00
spin_lock_init ( & pool - > sp_lock ) ;
}
2005-04-17 02:20:36 +04:00
/* Remove any stale portmap registrations */
svc_register ( serv , 0 , 0 ) ;
return serv ;
}
2006-10-02 13:17:59 +04:00
struct svc_serv *
svc_create ( struct svc_program * prog , unsigned int bufsize ,
void ( * shutdown ) ( struct svc_serv * serv ) )
{
return __svc_create ( prog , bufsize , /*npools*/ 1 , shutdown ) ;
}
struct svc_serv *
svc_create_pooled ( struct svc_program * prog , unsigned int bufsize ,
void ( * shutdown ) ( struct svc_serv * serv ) ,
svc_thread_fn func , int sig , struct module * mod )
{
struct svc_serv * serv ;
2007-03-06 12:42:23 +03:00
unsigned int npools = svc_pool_map_get ( ) ;
2006-10-02 13:17:59 +04:00
2006-10-02 13:18:01 +04:00
serv = __svc_create ( prog , bufsize , npools , shutdown ) ;
2006-10-02 13:17:59 +04:00
if ( serv ! = NULL ) {
serv - > sv_function = func ;
serv - > sv_kill_signal = sig ;
serv - > sv_module = mod ;
}
return serv ;
}
2005-04-17 02:20:36 +04:00
/*
2006-10-02 13:17:58 +04:00
* Destroy an RPC service . Should be called with the BKL held
2005-04-17 02:20:36 +04:00
*/
void
svc_destroy ( struct svc_serv * serv )
{
struct svc_sock * svsk ;
2007-03-06 12:42:22 +03:00
struct svc_sock * tmp ;
2005-04-17 02:20:36 +04:00
2007-01-31 20:14:08 +03:00
dprintk ( " svc: svc_destroy(%s, %d) \n " ,
2005-04-17 02:20:36 +04:00
serv - > sv_program - > pg_name ,
serv - > sv_nrthreads ) ;
if ( serv - > sv_nrthreads ) {
if ( - - ( serv - > sv_nrthreads ) ! = 0 ) {
svc_sock_update_bufs ( serv ) ;
return ;
}
} else
printk ( " svc_destroy: no threads for serv=%p! \n " , serv ) ;
2006-10-02 13:17:54 +04:00
del_timer_sync ( & serv - > sv_temptimer ) ;
2007-03-06 12:42:22 +03:00
list_for_each_entry_safe ( svsk , tmp , & serv - > sv_tempsocks , sk_list )
svc_force_close_socket ( svsk ) ;
2006-10-02 13:17:44 +04:00
if ( serv - > sv_shutdown )
serv - > sv_shutdown ( serv ) ;
2007-03-06 12:42:22 +03:00
list_for_each_entry_safe ( svsk , tmp , & serv - > sv_permsocks , sk_list )
svc_force_close_socket ( svsk ) ;
BUG_ON ( ! list_empty ( & serv - > sv_permsocks ) ) ;
BUG_ON ( ! list_empty ( & serv - > sv_tempsocks ) ) ;
2007-02-10 02:38:13 +03:00
2005-04-17 02:20:36 +04:00
cache_clean_deferred ( serv ) ;
2007-03-06 12:42:23 +03:00
if ( svc_serv_is_pooled ( serv ) )
svc_pool_map_put ( ) ;
2005-04-17 02:20:36 +04:00
/* Unregister service with the portmapper */
svc_register ( serv , 0 , 0 ) ;
2006-10-02 13:17:58 +04:00
kfree ( serv - > sv_pools ) ;
2005-04-17 02:20:36 +04:00
kfree ( serv ) ;
}
/*
* Allocate an RPC server ' s buffer space .
* We allocate pages and place them in rq_argpages .
*/
static int
svc_init_buffer ( struct svc_rqst * rqstp , unsigned int size )
{
int pages ;
int arghi ;
2007-02-10 02:38:13 +03:00
2006-10-06 11:44:05 +04:00
pages = size / PAGE_SIZE + 1 ; /* extra page as we hold both request and reply.
* We assume one is at most one page
*/
2005-04-17 02:20:36 +04:00
arghi = 0 ;
2006-01-09 09:24:28 +03:00
BUG_ON ( pages > RPCSVC_MAXPAGES ) ;
2005-04-17 02:20:36 +04:00
while ( pages ) {
struct page * p = alloc_page ( GFP_KERNEL ) ;
if ( ! p )
break ;
2006-10-04 13:15:46 +04:00
rqstp - > rq_pages [ arghi + + ] = p ;
2005-04-17 02:20:36 +04:00
pages - - ;
}
return ! pages ;
}
/*
* Release an RPC server buffer
*/
static void
svc_release_buffer ( struct svc_rqst * rqstp )
{
2006-10-04 13:15:46 +04:00
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( rqstp - > rq_pages ) ; i + + )
if ( rqstp - > rq_pages [ i ] )
put_page ( rqstp - > rq_pages [ i ] ) ;
2005-04-17 02:20:36 +04:00
}
/*
2006-10-02 13:17:58 +04:00
* Create a thread in the given pool . Caller must hold BKL .
2006-10-02 13:18:01 +04:00
* On a NUMA or SMP machine , with a multi - pool serv , the thread
* will be restricted to run on the cpus belonging to the pool .
2005-04-17 02:20:36 +04:00
*/
2006-10-02 13:17:58 +04:00
static int
__svc_create_thread ( svc_thread_fn func , struct svc_serv * serv ,
struct svc_pool * pool )
2005-04-17 02:20:36 +04:00
{
struct svc_rqst * rqstp ;
int error = - ENOMEM ;
2006-10-02 13:18:01 +04:00
int have_oldmask = 0 ;
cpumask_t oldmask ;
2005-04-17 02:20:36 +04:00
2006-07-22 01:51:30 +04:00
rqstp = kzalloc ( sizeof ( * rqstp ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! rqstp )
goto out ;
init_waitqueue_head ( & rqstp - > rq_wait ) ;
2006-01-11 00:08:21 +03:00
if ( ! ( rqstp - > rq_argp = kmalloc ( serv - > sv_xdrsize , GFP_KERNEL ) )
| | ! ( rqstp - > rq_resp = kmalloc ( serv - > sv_xdrsize , GFP_KERNEL ) )
2006-10-06 11:44:05 +04:00
| | ! svc_init_buffer ( rqstp , serv - > sv_max_mesg ) )
2005-04-17 02:20:36 +04:00
goto out_thread ;
serv - > sv_nrthreads + + ;
2006-10-02 13:17:58 +04:00
spin_lock_bh ( & pool - > sp_lock ) ;
pool - > sp_nrthreads + + ;
2006-10-02 13:17:59 +04:00
list_add ( & rqstp - > rq_all , & pool - > sp_all_threads ) ;
2006-10-02 13:17:58 +04:00
spin_unlock_bh ( & pool - > sp_lock ) ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_server = serv ;
2006-10-02 13:17:58 +04:00
rqstp - > rq_pool = pool ;
2006-10-02 13:18:01 +04:00
if ( serv - > sv_nrpools > 1 )
have_oldmask = svc_pool_map_set_cpumask ( pool - > sp_id , & oldmask ) ;
2005-04-17 02:20:36 +04:00
error = kernel_thread ( ( int ( * ) ( void * ) ) func , rqstp , 0 ) ;
2006-10-02 13:18:01 +04:00
if ( have_oldmask )
set_cpus_allowed ( current , oldmask ) ;
2005-04-17 02:20:36 +04:00
if ( error < 0 )
goto out_thread ;
svc_sock_update_bufs ( serv ) ;
error = 0 ;
out :
return error ;
out_thread :
svc_exit_thread ( rqstp ) ;
goto out ;
}
/*
2006-10-02 13:17:58 +04:00
* Create a thread in the default pool . Caller must hold BKL .
*/
int
svc_create_thread ( svc_thread_fn func , struct svc_serv * serv )
{
return __svc_create_thread ( func , serv , & serv - > sv_pools [ 0 ] ) ;
}
2006-10-02 13:17:59 +04:00
/*
* Choose a pool in which to create a new thread , for svc_set_num_threads
*/
static inline struct svc_pool *
choose_pool ( struct svc_serv * serv , struct svc_pool * pool , unsigned int * state )
{
if ( pool ! = NULL )
return pool ;
2007-02-10 02:38:13 +03:00
return & serv - > sv_pools [ ( * state ) + + % serv - > sv_nrpools ] ;
2006-10-02 13:17:59 +04:00
}
/*
* Choose a thread to kill , for svc_set_num_threads
*/
static inline struct task_struct *
choose_victim ( struct svc_serv * serv , struct svc_pool * pool , unsigned int * state )
{
unsigned int i ;
struct task_struct * task = NULL ;
if ( pool ! = NULL ) {
spin_lock_bh ( & pool - > sp_lock ) ;
} else {
/* choose a pool in round-robin fashion */
2007-02-10 02:38:13 +03:00
for ( i = 0 ; i < serv - > sv_nrpools ; i + + ) {
pool = & serv - > sv_pools [ - - ( * state ) % serv - > sv_nrpools ] ;
2006-10-02 13:17:59 +04:00
spin_lock_bh ( & pool - > sp_lock ) ;
2007-02-10 02:38:13 +03:00
if ( ! list_empty ( & pool - > sp_all_threads ) )
goto found_pool ;
2006-10-02 13:17:59 +04:00
spin_unlock_bh ( & pool - > sp_lock ) ;
2007-02-10 02:38:13 +03:00
}
2006-10-02 13:17:59 +04:00
return NULL ;
}
found_pool :
if ( ! list_empty ( & pool - > sp_all_threads ) ) {
struct svc_rqst * rqstp ;
/*
* Remove from the pool - > sp_all_threads list
* so we don ' t try to kill it again .
*/
rqstp = list_entry ( pool - > sp_all_threads . next , struct svc_rqst , rq_all ) ;
list_del_init ( & rqstp - > rq_all ) ;
task = rqstp - > rq_task ;
2007-02-10 02:38:13 +03:00
}
2006-10-02 13:17:59 +04:00
spin_unlock_bh ( & pool - > sp_lock ) ;
return task ;
}
/*
* Create or destroy enough new threads to make the number
* of threads the given number . If ` pool ' is non - NULL , applies
* only to threads in that pool , otherwise round - robins between
* all pools . Must be called with a svc_get ( ) reference and
* the BKL held .
*
* Destroying threads relies on the service threads filling in
* rqstp - > rq_task , which only the nfs ones do . Assumes the serv
* has been created using svc_create_pooled ( ) .
*
* Based on code that used to be in nfsd_svc ( ) but tweaked
* to be pool - aware .
*/
int
svc_set_num_threads ( struct svc_serv * serv , struct svc_pool * pool , int nrservs )
{
struct task_struct * victim ;
int error = 0 ;
unsigned int state = serv - > sv_nrthreads - 1 ;
if ( pool = = NULL ) {
/* The -1 assumes caller has done a svc_get() */
nrservs - = ( serv - > sv_nrthreads - 1 ) ;
} else {
spin_lock_bh ( & pool - > sp_lock ) ;
nrservs - = pool - > sp_nrthreads ;
spin_unlock_bh ( & pool - > sp_lock ) ;
}
/* create new threads */
while ( nrservs > 0 ) {
nrservs - - ;
__module_get ( serv - > sv_module ) ;
error = __svc_create_thread ( serv - > sv_function , serv ,
choose_pool ( serv , pool , & state ) ) ;
if ( error < 0 ) {
module_put ( serv - > sv_module ) ;
break ;
}
}
/* destroy old threads */
while ( nrservs < 0 & &
( victim = choose_victim ( serv , pool , & state ) ) ! = NULL ) {
send_sig ( serv - > sv_kill_signal , victim , 1 ) ;
nrservs + + ;
}
return error ;
}
2006-10-02 13:17:58 +04:00
/*
* Called from a server thread as it ' s exiting . Caller must hold BKL .
2005-04-17 02:20:36 +04:00
*/
void
svc_exit_thread ( struct svc_rqst * rqstp )
{
struct svc_serv * serv = rqstp - > rq_server ;
2006-10-02 13:17:58 +04:00
struct svc_pool * pool = rqstp - > rq_pool ;
2005-04-17 02:20:36 +04:00
svc_release_buffer ( rqstp ) ;
2005-11-08 20:41:34 +03:00
kfree ( rqstp - > rq_resp ) ;
kfree ( rqstp - > rq_argp ) ;
kfree ( rqstp - > rq_auth_data ) ;
2006-10-02 13:17:58 +04:00
spin_lock_bh ( & pool - > sp_lock ) ;
pool - > sp_nrthreads - - ;
2006-10-02 13:17:59 +04:00
list_del ( & rqstp - > rq_all ) ;
2006-10-02 13:17:58 +04:00
spin_unlock_bh ( & pool - > sp_lock ) ;
2005-04-17 02:20:36 +04:00
kfree ( rqstp ) ;
/* Release the server */
if ( serv )
svc_destroy ( serv ) ;
}
/*
* Register an RPC service with the local portmapper .
2007-02-10 02:38:13 +03:00
* To unregister a service , call this routine with
2005-04-17 02:20:36 +04:00
* proto and port = = 0.
*/
int
svc_register ( struct svc_serv * serv , int proto , unsigned short port )
{
struct svc_program * progp ;
unsigned long flags ;
int i , error = 0 , dummy ;
if ( ! port )
clear_thread_flag ( TIF_SIGPENDING ) ;
2006-10-04 13:16:05 +04:00
for ( progp = serv - > sv_program ; progp ; progp = progp - > pg_next ) {
for ( i = 0 ; i < progp - > pg_nvers ; i + + ) {
if ( progp - > pg_vers [ i ] = = NULL )
continue ;
2007-01-31 20:14:08 +03:00
dprintk ( " svc: svc_register(%s, %s, %d, %d)%s \n " ,
2006-10-04 13:16:05 +04:00
progp - > pg_name ,
proto = = IPPROTO_UDP ? " udp " : " tcp " ,
port ,
i ,
progp - > pg_vers [ i ] - > vs_hidden ?
" (but not telling portmap) " : " " ) ;
if ( progp - > pg_vers [ i ] - > vs_hidden )
continue ;
2007-03-30 00:48:16 +04:00
error = rpcb_register ( progp - > pg_prog , i , proto , port , & dummy ) ;
2006-10-04 13:16:05 +04:00
if ( error < 0 )
break ;
if ( port & & ! dummy ) {
error = - EACCES ;
break ;
}
2005-04-17 02:20:36 +04:00
}
}
if ( ! port ) {
spin_lock_irqsave ( & current - > sighand - > siglock , flags ) ;
recalc_sigpending ( ) ;
spin_unlock_irqrestore ( & current - > sighand - > siglock , flags ) ;
}
return error ;
}
2007-08-25 19:09:27 +04:00
/*
* Printk the given error with the address of the client that caused it .
*/
static int
__attribute__ ( ( format ( printf , 2 , 3 ) ) )
svc_printk ( struct svc_rqst * rqstp , const char * fmt , . . . )
{
va_list args ;
int r ;
char buf [ RPC_MAX_ADDRBUFLEN ] ;
if ( ! net_ratelimit ( ) )
return 0 ;
printk ( KERN_WARNING " svc: %s: " ,
svc_print_addr ( rqstp , buf , sizeof ( buf ) ) ) ;
va_start ( args , fmt ) ;
r = vprintk ( fmt , args ) ;
va_end ( args ) ;
return r ;
}
2005-04-17 02:20:36 +04:00
/*
* Process the RPC request .
*/
int
2006-10-02 13:17:50 +04:00
svc_process ( struct svc_rqst * rqstp )
2005-04-17 02:20:36 +04:00
{
struct svc_program * progp ;
struct svc_version * versp = NULL ; /* compiler food */
struct svc_procedure * procp = NULL ;
struct kvec * argv = & rqstp - > rq_arg . head [ 0 ] ;
struct kvec * resv = & rqstp - > rq_res . head [ 0 ] ;
2006-10-02 13:17:50 +04:00
struct svc_serv * serv = rqstp - > rq_server ;
2005-04-17 02:20:36 +04:00
kxdrproc_t xdr ;
2006-09-27 09:29:38 +04:00
__be32 * statp ;
u32 dir , prog , vers , proc ;
__be32 auth_stat , rpc_stat ;
2005-04-17 02:20:36 +04:00
int auth_res ;
2006-10-04 13:16:08 +04:00
__be32 * reply_statp ;
2005-04-17 02:20:36 +04:00
rpc_stat = rpc_success ;
if ( argv - > iov_len < 6 * 4 )
goto err_short_len ;
/* setup response xdr_buf.
2007-02-10 02:38:13 +03:00
* Initially it has just one page
2005-04-17 02:20:36 +04:00
*/
2006-10-04 13:15:46 +04:00
rqstp - > rq_resused = 1 ;
2005-04-17 02:20:36 +04:00
resv - > iov_base = page_address ( rqstp - > rq_respages [ 0 ] ) ;
resv - > iov_len = 0 ;
2006-10-04 13:15:46 +04:00
rqstp - > rq_res . pages = rqstp - > rq_respages + 1 ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_res . len = 0 ;
rqstp - > rq_res . page_base = 0 ;
rqstp - > rq_res . page_len = 0 ;
2005-06-22 21:16:19 +04:00
rqstp - > rq_res . buflen = PAGE_SIZE ;
2006-06-30 12:56:19 +04:00
rqstp - > rq_res . tail [ 0 ] . iov_base = NULL ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_res . tail [ 0 ] . iov_len = 0 ;
2006-06-30 12:56:19 +04:00
/* Will be turned off only in gss privacy case: */
2007-06-12 23:22:14 +04:00
rqstp - > rq_splice_ok = 1 ;
2007-12-31 06:07:29 +03:00
/* Setup reply header */
rqstp - > rq_xprt - > xpt_ops - > xpo_prep_reply_hdr ( rqstp ) ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_xid = svc_getu32 ( argv ) ;
svc_putu32 ( resv , rqstp - > rq_xid ) ;
2006-09-27 09:28:46 +04:00
dir = svc_getnl ( argv ) ;
vers = svc_getnl ( argv ) ;
2005-04-17 02:20:36 +04:00
/* First words of reply: */
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , 1 ) ; /* REPLY */
2005-04-17 02:20:36 +04:00
if ( dir ! = 0 ) /* direction != CALL */
goto err_bad_dir ;
if ( vers ! = 2 ) /* RPC version number */
goto err_bad_rpc ;
/* Save position in case we later decide to reject: */
2006-10-04 13:16:08 +04:00
reply_statp = resv - > iov_base + resv - > iov_len ;
2005-04-17 02:20:36 +04:00
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , 0 ) ; /* ACCEPT */
2005-04-17 02:20:36 +04:00
2006-09-27 09:28:46 +04:00
rqstp - > rq_prog = prog = svc_getnl ( argv ) ; /* program number */
rqstp - > rq_vers = vers = svc_getnl ( argv ) ; /* version number */
rqstp - > rq_proc = proc = svc_getnl ( argv ) ; /* procedure number */
2005-04-17 02:20:36 +04:00
progp = serv - > sv_program ;
2005-11-07 12:00:27 +03:00
for ( progp = serv - > sv_program ; progp ; progp = progp - > pg_next )
if ( prog = = progp - > pg_prog )
break ;
2005-04-17 02:20:36 +04:00
/*
* Decode auth data , and add verifier to reply buffer .
* We do this before anything else in order to get a decent
* auth verifier .
*/
auth_res = svc_authenticate ( rqstp , & auth_stat ) ;
/* Also give the program a chance to reject this call: */
2005-11-07 12:00:27 +03:00
if ( auth_res = = SVC_OK & & progp ) {
2005-04-17 02:20:36 +04:00
auth_stat = rpc_autherr_badcred ;
auth_res = progp - > pg_authenticate ( rqstp ) ;
}
switch ( auth_res ) {
case SVC_OK :
break ;
case SVC_GARBAGE :
rpc_stat = rpc_garbage_args ;
goto err_bad ;
case SVC_SYSERR :
rpc_stat = rpc_system_err ;
goto err_bad ;
case SVC_DENIED :
goto err_bad_auth ;
case SVC_DROP :
goto dropit ;
case SVC_COMPLETE :
goto sendit ;
}
2005-11-07 12:00:27 +03:00
2005-06-22 21:16:24 +04:00
if ( progp = = NULL )
2005-04-17 02:20:36 +04:00
goto err_bad_prog ;
if ( vers > = progp - > pg_nvers | |
! ( versp = progp - > pg_vers [ vers ] ) )
goto err_bad_vers ;
procp = versp - > vs_proc + proc ;
if ( proc > = versp - > vs_nproc | | ! procp - > pc_func )
goto err_bad_proc ;
rqstp - > rq_server = serv ;
rqstp - > rq_procinfo = procp ;
/* Syntactic check complete */
serv - > sv_stats - > rpccnt + + ;
/* Build the reply header. */
statp = resv - > iov_base + resv - > iov_len ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , RPC_SUCCESS ) ;
2005-04-17 02:20:36 +04:00
/* Bump per-procedure stats counter */
procp - > pc_count + + ;
/* Initialize storage for argp and resp */
memset ( rqstp - > rq_argp , 0 , procp - > pc_argsize ) ;
memset ( rqstp - > rq_resp , 0 , procp - > pc_ressize ) ;
2007-02-10 02:38:13 +03:00
/* un-reserve some of the out-queue now that we have a
2005-04-17 02:20:36 +04:00
* better idea of reply size
*/
if ( procp - > pc_xdrressize )
2007-05-09 13:34:50 +04:00
svc_reserve_auth ( rqstp , procp - > pc_xdrressize < < 2 ) ;
2005-04-17 02:20:36 +04:00
/* Call the function that processes the request. */
if ( ! versp - > vs_dispatch ) {
/* Decode arguments */
xdr = procp - > pc_decode ;
if ( xdr & & ! xdr ( rqstp , argv - > iov_base , rqstp - > rq_argp ) )
goto err_garbage ;
* statp = procp - > pc_func ( rqstp , rqstp - > rq_argp , rqstp - > rq_resp ) ;
/* Encode reply */
2006-10-17 11:10:18 +04:00
if ( * statp = = rpc_drop_reply ) {
if ( procp - > pc_release )
procp - > pc_release ( rqstp , NULL , rqstp - > rq_resp ) ;
goto dropit ;
}
2005-04-17 02:20:36 +04:00
if ( * statp = = rpc_success & & ( xdr = procp - > pc_encode )
& & ! xdr ( rqstp , resv - > iov_base + resv - > iov_len , rqstp - > rq_resp ) ) {
dprintk ( " svc: failed to encode reply \n " ) ;
/* serv->sv_stats->rpcsystemerr++; */
* statp = rpc_system_err ;
}
} else {
dprintk ( " svc: calling dispatcher \n " ) ;
if ( ! versp - > vs_dispatch ( rqstp , statp ) ) {
/* Release reply info */
if ( procp - > pc_release )
procp - > pc_release ( rqstp , NULL , rqstp - > rq_resp ) ;
goto dropit ;
}
}
/* Check RPC status result */
if ( * statp ! = rpc_success )
resv - > iov_len = ( ( void * ) statp ) - resv - > iov_base + 4 ;
/* Release reply info */
if ( procp - > pc_release )
procp - > pc_release ( rqstp , NULL , rqstp - > rq_resp ) ;
if ( procp - > pc_encode = = NULL )
goto dropit ;
sendit :
if ( svc_authorise ( rqstp ) )
goto dropit ;
return svc_send ( rqstp ) ;
dropit :
svc_authorise ( rqstp ) ; /* doesn't hurt to call this twice */
dprintk ( " svc: svc_process dropit \n " ) ;
svc_drop ( rqstp ) ;
return 0 ;
err_short_len :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " short len %Zd, dropping request \n " ,
argv - > iov_len ) ;
2007-01-30 00:19:52 +03:00
2005-04-17 02:20:36 +04:00
goto dropit ; /* drop request */
err_bad_dir :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " bad direction %d, dropping request \n " , dir ) ;
2007-01-30 00:19:52 +03:00
2005-04-17 02:20:36 +04:00
serv - > sv_stats - > rpcbadfmt + + ;
goto dropit ; /* drop request */
err_bad_rpc :
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , 1 ) ; /* REJECT */
svc_putnl ( resv , 0 ) ; /* RPC_MISMATCH */
svc_putnl ( resv , 2 ) ; /* Only RPCv2 supported */
svc_putnl ( resv , 2 ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
err_bad_auth :
dprintk ( " svc: authentication failed (%d) \n " , ntohl ( auth_stat ) ) ;
serv - > sv_stats - > rpcbadauth + + ;
/* Restore write pointer to location of accept status: */
2006-10-04 13:16:08 +04:00
xdr_ressize_check ( rqstp , reply_statp ) ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , 1 ) ; /* REJECT */
svc_putnl ( resv , 1 ) ; /* AUTH_ERROR */
svc_putnl ( resv , ntohl ( auth_stat ) ) ; /* status */
2005-04-17 02:20:36 +04:00
goto sendit ;
err_bad_prog :
2005-06-22 21:16:24 +04:00
dprintk ( " svc: unknown program %d \n " , prog ) ;
2005-04-17 02:20:36 +04:00
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , RPC_PROG_UNAVAIL ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
err_bad_vers :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " unknown version (%d for prog %d, %s) \n " ,
2007-01-30 00:19:52 +03:00
vers , prog , progp - > pg_name ) ;
2005-04-17 02:20:36 +04:00
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , RPC_PROG_MISMATCH ) ;
svc_putnl ( resv , progp - > pg_lovers ) ;
svc_putnl ( resv , progp - > pg_hivers ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
err_bad_proc :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " unknown procedure (%d) \n " , proc ) ;
2007-01-30 00:19:52 +03:00
2005-04-17 02:20:36 +04:00
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , RPC_PROC_UNAVAIL ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
err_garbage :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " failed to decode args \n " ) ;
2007-01-30 00:19:52 +03:00
2005-04-17 02:20:36 +04:00
rpc_stat = rpc_garbage_args ;
err_bad :
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , ntohl ( rpc_stat ) ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
}
2006-10-04 13:15:47 +04:00
/*
* Return ( transport - specific ) limit on the rpc payload .
*/
u32 svc_max_payload ( const struct svc_rqst * rqstp )
{
2007-12-31 06:07:21 +03:00
u32 max = rqstp - > rq_xprt - > xpt_class - > xcl_max_payload ;
2006-10-04 13:15:47 +04:00
2006-10-06 11:44:05 +04:00
if ( rqstp - > rq_server - > sv_max_payload < max )
max = rqstp - > rq_server - > sv_max_payload ;
2006-10-04 13:15:47 +04:00
return max ;
}
EXPORT_SYMBOL_GPL ( svc_max_payload ) ;