2005-04-17 02:20:36 +04:00
/*
* linux / net / sunrpc / svc . c
*
* High - level RPC service routines
*
* Copyright ( C ) 1995 , 1996 Olaf Kirch < okir @ monad . swb . de >
2006-10-02 13:18:01 +04:00
*
* Multiple threads pools and NUMAisation
* Copyright ( c ) 2006 Silicon Graphics , Inc .
* by Greg Banks < gnb @ melbourne . sgi . com >
2005-04-17 02:20:36 +04:00
*/
# include <linux/linkage.h>
# include <linux/sched.h>
# include <linux/errno.h>
# include <linux/net.h>
# include <linux/in.h>
# include <linux/mm.h>
2006-10-02 13:17:59 +04:00
# include <linux/interrupt.h>
# include <linux/module.h>
2008-06-10 16:40:38 +04:00
# include <linux/kthread.h>
2005-04-17 02:20:36 +04:00
# include <linux/sunrpc/types.h>
# include <linux/sunrpc/xdr.h>
# include <linux/sunrpc/stats.h>
# include <linux/sunrpc/svcsock.h>
# include <linux/sunrpc/clnt.h>
# define RPCDBG_FACILITY RPCDBG_SVCDSP
2008-08-19 03:34:08 +04:00
static void svc_unregister ( const struct svc_serv * serv ) ;
2007-03-06 12:42:23 +03:00
# define svc_serv_is_pooled(serv) ((serv)->sv_function)
2006-10-02 13:18:01 +04:00
/*
* Mode for mapping cpus to pools .
*/
enum {
2007-03-06 12:42:23 +03:00
SVC_POOL_AUTO = - 1 , /* choose one of the others */
2006-10-02 13:18:01 +04:00
SVC_POOL_GLOBAL , /* no mapping, just a single global pool
* ( legacy & UP mode ) */
SVC_POOL_PERCPU , /* one pool per cpu */
SVC_POOL_PERNODE /* one pool per numa node */
} ;
2007-03-06 12:42:23 +03:00
# define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
2006-10-02 13:18:01 +04:00
/*
* Structure for mapping cpus to pools and vice versa .
* Setup once during sunrpc initialisation .
*/
static struct svc_pool_map {
2007-03-06 12:42:23 +03:00
int count ; /* How many svc_servs use us */
2006-10-02 13:18:01 +04:00
int mode ; /* Note: int not enum to avoid
* warnings about " enumeration value
* not handled in switch " */
unsigned int npools ;
unsigned int * pool_to ; /* maps pool id to cpu or node */
unsigned int * to_pool ; /* maps cpu or node to pool id */
} svc_pool_map = {
2007-03-06 12:42:23 +03:00
. count = 0 ,
. mode = SVC_POOL_DEFAULT
2006-10-02 13:18:01 +04:00
} ;
2007-03-06 12:42:23 +03:00
static DEFINE_MUTEX ( svc_pool_map_mutex ) ; /* protects svc_pool_map.count only */
static int
param_set_pool_mode ( const char * val , struct kernel_param * kp )
{
int * ip = ( int * ) kp - > arg ;
struct svc_pool_map * m = & svc_pool_map ;
int err ;
mutex_lock ( & svc_pool_map_mutex ) ;
err = - EBUSY ;
if ( m - > count )
goto out ;
err = 0 ;
if ( ! strncmp ( val , " auto " , 4 ) )
* ip = SVC_POOL_AUTO ;
else if ( ! strncmp ( val , " global " , 6 ) )
* ip = SVC_POOL_GLOBAL ;
else if ( ! strncmp ( val , " percpu " , 6 ) )
* ip = SVC_POOL_PERCPU ;
else if ( ! strncmp ( val , " pernode " , 7 ) )
* ip = SVC_POOL_PERNODE ;
else
err = - EINVAL ;
out :
mutex_unlock ( & svc_pool_map_mutex ) ;
return err ;
}
static int
param_get_pool_mode ( char * buf , struct kernel_param * kp )
{
int * ip = ( int * ) kp - > arg ;
switch ( * ip )
{
case SVC_POOL_AUTO :
return strlcpy ( buf , " auto " , 20 ) ;
case SVC_POOL_GLOBAL :
return strlcpy ( buf , " global " , 20 ) ;
case SVC_POOL_PERCPU :
return strlcpy ( buf , " percpu " , 20 ) ;
case SVC_POOL_PERNODE :
return strlcpy ( buf , " pernode " , 20 ) ;
default :
return sprintf ( buf , " %d " , * ip ) ;
}
}
2006-10-02 13:18:01 +04:00
2007-03-06 12:42:23 +03:00
module_param_call ( pool_mode , param_set_pool_mode , param_get_pool_mode ,
& svc_pool_map . mode , 0644 ) ;
2006-10-02 13:18:01 +04:00
/*
* Detect best pool mapping mode heuristically ,
* according to the machine ' s topology .
*/
static int
svc_pool_map_choose_mode ( void )
{
unsigned int node ;
if ( num_online_nodes ( ) > 1 ) {
/*
* Actually have multiple NUMA nodes ,
* so split pools on NUMA node boundaries
*/
return SVC_POOL_PERNODE ;
}
node = any_online_node ( node_online_map ) ;
if ( nr_cpus_node ( node ) > 2 ) {
/*
* Non - trivial SMP , or CONFIG_NUMA on
* non - NUMA hardware , e . g . with a generic
* x86_64 kernel on Xeons . In this case we
* want to divide the pools on cpu boundaries .
*/
return SVC_POOL_PERCPU ;
}
/* default: one global pool */
return SVC_POOL_GLOBAL ;
}
/*
* Allocate the to_pool [ ] and pool_to [ ] arrays .
* Returns 0 on success or an errno .
*/
static int
svc_pool_map_alloc_arrays ( struct svc_pool_map * m , unsigned int maxpools )
{
m - > to_pool = kcalloc ( maxpools , sizeof ( unsigned int ) , GFP_KERNEL ) ;
if ( ! m - > to_pool )
goto fail ;
m - > pool_to = kcalloc ( maxpools , sizeof ( unsigned int ) , GFP_KERNEL ) ;
if ( ! m - > pool_to )
goto fail_free ;
return 0 ;
fail_free :
kfree ( m - > to_pool ) ;
fail :
return - ENOMEM ;
}
/*
* Initialise the pool map for SVC_POOL_PERCPU mode .
* Returns number of pools or < 0 on error .
*/
static int
svc_pool_map_init_percpu ( struct svc_pool_map * m )
{
2007-02-21 00:57:51 +03:00
unsigned int maxpools = nr_cpu_ids ;
2006-10-02 13:18:01 +04:00
unsigned int pidx = 0 ;
unsigned int cpu ;
int err ;
err = svc_pool_map_alloc_arrays ( m , maxpools ) ;
if ( err )
return err ;
for_each_online_cpu ( cpu ) {
BUG_ON ( pidx > maxpools ) ;
m - > to_pool [ cpu ] = pidx ;
m - > pool_to [ pidx ] = cpu ;
pidx + + ;
}
/* cpus brought online later all get mapped to pool0, sorry */
return pidx ;
} ;
/*
* Initialise the pool map for SVC_POOL_PERNODE mode .
* Returns number of pools or < 0 on error .
*/
static int
svc_pool_map_init_pernode ( struct svc_pool_map * m )
{
2007-02-21 00:57:51 +03:00
unsigned int maxpools = nr_node_ids ;
2006-10-02 13:18:01 +04:00
unsigned int pidx = 0 ;
unsigned int node ;
int err ;
err = svc_pool_map_alloc_arrays ( m , maxpools ) ;
if ( err )
return err ;
for_each_node_with_cpus ( node ) {
/* some architectures (e.g. SN2) have cpuless nodes */
BUG_ON ( pidx > maxpools ) ;
m - > to_pool [ node ] = pidx ;
m - > pool_to [ pidx ] = node ;
pidx + + ;
}
/* nodes brought online later all get mapped to pool0, sorry */
return pidx ;
}
/*
2007-03-06 12:42:23 +03:00
* Add a reference to the global map of cpus to pools ( and
* vice versa ) . Initialise the map if we ' re the first user .
* Returns the number of pools .
2006-10-02 13:18:01 +04:00
*/
static unsigned int
2007-03-06 12:42:23 +03:00
svc_pool_map_get ( void )
2006-10-02 13:18:01 +04:00
{
struct svc_pool_map * m = & svc_pool_map ;
int npools = - 1 ;
2007-03-06 12:42:23 +03:00
mutex_lock ( & svc_pool_map_mutex ) ;
if ( m - > count + + ) {
mutex_unlock ( & svc_pool_map_mutex ) ;
2006-10-02 13:18:01 +04:00
return m - > npools ;
2007-03-06 12:42:23 +03:00
}
2006-10-02 13:18:01 +04:00
2007-03-06 12:42:23 +03:00
if ( m - > mode = = SVC_POOL_AUTO )
m - > mode = svc_pool_map_choose_mode ( ) ;
2006-10-02 13:18:01 +04:00
switch ( m - > mode ) {
case SVC_POOL_PERCPU :
npools = svc_pool_map_init_percpu ( m ) ;
break ;
case SVC_POOL_PERNODE :
npools = svc_pool_map_init_pernode ( m ) ;
break ;
}
if ( npools < 0 ) {
/* default, or memory allocation failure */
npools = 1 ;
m - > mode = SVC_POOL_GLOBAL ;
}
m - > npools = npools ;
2007-03-06 12:42:23 +03:00
mutex_unlock ( & svc_pool_map_mutex ) ;
2006-10-02 13:18:01 +04:00
return m - > npools ;
}
2007-03-06 12:42:23 +03:00
/*
* Drop a reference to the global map of cpus to pools .
* When the last reference is dropped , the map data is
* freed ; this allows the sysadmin to change the pool
* mode using the pool_mode module option without
* rebooting or re - loading sunrpc . ko .
*/
static void
svc_pool_map_put ( void )
{
struct svc_pool_map * m = & svc_pool_map ;
mutex_lock ( & svc_pool_map_mutex ) ;
if ( ! - - m - > count ) {
m - > mode = SVC_POOL_DEFAULT ;
kfree ( m - > to_pool ) ;
kfree ( m - > pool_to ) ;
m - > npools = 0 ;
}
mutex_unlock ( & svc_pool_map_mutex ) ;
}
2006-10-02 13:18:01 +04:00
/*
2008-06-10 16:40:38 +04:00
* Set the given thread ' s cpus_allowed mask so that it
2006-10-02 13:18:01 +04:00
* will only run on cpus in the given pool .
*/
2008-06-10 16:40:38 +04:00
static inline void
svc_pool_map_set_cpumask ( struct task_struct * task , unsigned int pidx )
2006-10-02 13:18:01 +04:00
{
struct svc_pool_map * m = & svc_pool_map ;
2008-06-10 16:40:38 +04:00
unsigned int node = m - > pool_to [ pidx ] ;
2006-10-02 13:18:01 +04:00
/*
* The caller checks for sv_nrpools > 1 , which
2007-03-06 12:42:23 +03:00
* implies that we ' ve been initialized .
2006-10-02 13:18:01 +04:00
*/
2007-03-06 12:42:23 +03:00
BUG_ON ( m - > count = = 0 ) ;
2006-10-02 13:18:01 +04:00
2008-06-10 16:40:38 +04:00
switch ( m - > mode ) {
2006-10-02 13:18:01 +04:00
case SVC_POOL_PERCPU :
2008-04-05 05:11:10 +04:00
{
2008-07-25 05:21:31 +04:00
set_cpus_allowed_ptr ( task , & cpumask_of_cpu ( node ) ) ;
2008-06-10 16:40:38 +04:00
break ;
2008-04-05 05:11:10 +04:00
}
2006-10-02 13:18:01 +04:00
case SVC_POOL_PERNODE :
2008-04-05 05:11:10 +04:00
{
node_to_cpumask_ptr ( nodecpumask , node ) ;
2008-06-10 16:40:38 +04:00
set_cpus_allowed_ptr ( task , nodecpumask ) ;
break ;
2006-10-02 13:18:01 +04:00
}
2008-04-05 05:11:10 +04:00
}
2006-10-02 13:18:01 +04:00
}
/*
* Use the mapping mode to choose a pool for a given CPU .
* Used when enqueueing an incoming RPC . Always returns
* a non - NULL pool pointer .
*/
struct svc_pool *
svc_pool_for_cpu ( struct svc_serv * serv , int cpu )
{
struct svc_pool_map * m = & svc_pool_map ;
unsigned int pidx = 0 ;
/*
2007-03-06 12:42:23 +03:00
* An uninitialised map happens in a pure client when
2006-10-02 13:18:01 +04:00
* lockd is brought up , so silently treat it the
* same as SVC_POOL_GLOBAL .
*/
2007-03-06 12:42:23 +03:00
if ( svc_serv_is_pooled ( serv ) ) {
switch ( m - > mode ) {
case SVC_POOL_PERCPU :
pidx = m - > to_pool [ cpu ] ;
break ;
case SVC_POOL_PERNODE :
pidx = m - > to_pool [ cpu_to_node ( cpu ) ] ;
break ;
}
2006-10-02 13:18:01 +04:00
}
return & serv - > sv_pools [ pidx % serv - > sv_nrpools ] ;
}
2005-04-17 02:20:36 +04:00
/*
* Create an RPC service
*/
2006-10-02 13:17:59 +04:00
static struct svc_serv *
__svc_create ( struct svc_program * prog , unsigned int bufsize , int npools ,
2008-07-01 02:45:30 +04:00
sa_family_t family , void ( * shutdown ) ( struct svc_serv * serv ) )
2005-04-17 02:20:36 +04:00
{
struct svc_serv * serv ;
2007-10-26 21:32:56 +04:00
unsigned int vers ;
2005-04-17 02:20:36 +04:00
unsigned int xdrsize ;
2006-10-02 13:17:58 +04:00
unsigned int i ;
2005-04-17 02:20:36 +04:00
2006-07-22 01:51:30 +04:00
if ( ! ( serv = kzalloc ( sizeof ( * serv ) , GFP_KERNEL ) ) )
2005-04-17 02:20:36 +04:00
return NULL ;
2008-07-01 02:45:30 +04:00
serv - > sv_family = family ;
2005-06-22 21:16:24 +04:00
serv - > sv_name = prog - > pg_name ;
2005-04-17 02:20:36 +04:00
serv - > sv_program = prog ;
serv - > sv_nrthreads = 1 ;
serv - > sv_stats = prog - > pg_stats ;
2006-10-06 11:44:05 +04:00
if ( bufsize > RPCSVC_MAXPAYLOAD )
bufsize = RPCSVC_MAXPAYLOAD ;
serv - > sv_max_payload = bufsize ? bufsize : 4096 ;
serv - > sv_max_mesg = roundup ( serv - > sv_max_payload + PAGE_SIZE , PAGE_SIZE ) ;
2006-10-02 13:17:44 +04:00
serv - > sv_shutdown = shutdown ;
2005-04-17 02:20:36 +04:00
xdrsize = 0 ;
2005-06-22 21:16:24 +04:00
while ( prog ) {
prog - > pg_lovers = prog - > pg_nvers - 1 ;
for ( vers = 0 ; vers < prog - > pg_nvers ; vers + + )
if ( prog - > pg_vers [ vers ] ) {
prog - > pg_hivers = vers ;
if ( prog - > pg_lovers > vers )
prog - > pg_lovers = vers ;
if ( prog - > pg_vers [ vers ] - > vs_xdrsize > xdrsize )
xdrsize = prog - > pg_vers [ vers ] - > vs_xdrsize ;
}
prog = prog - > pg_next ;
}
2005-04-17 02:20:36 +04:00
serv - > sv_xdrsize = xdrsize ;
INIT_LIST_HEAD ( & serv - > sv_tempsocks ) ;
INIT_LIST_HEAD ( & serv - > sv_permsocks ) ;
2006-10-02 13:17:54 +04:00
init_timer ( & serv - > sv_temptimer ) ;
2005-04-17 02:20:36 +04:00
spin_lock_init ( & serv - > sv_lock ) ;
2006-10-02 13:17:59 +04:00
serv - > sv_nrpools = npools ;
2006-10-02 13:17:58 +04:00
serv - > sv_pools =
2006-12-13 11:34:52 +03:00
kcalloc ( serv - > sv_nrpools , sizeof ( struct svc_pool ) ,
2006-10-02 13:17:58 +04:00
GFP_KERNEL ) ;
if ( ! serv - > sv_pools ) {
kfree ( serv ) ;
return NULL ;
}
for ( i = 0 ; i < serv - > sv_nrpools ; i + + ) {
struct svc_pool * pool = & serv - > sv_pools [ i ] ;
2007-01-31 20:14:08 +03:00
dprintk ( " svc: initialising pool %u for %s \n " ,
2006-10-02 13:17:58 +04:00
i , serv - > sv_name ) ;
pool - > sp_id = i ;
INIT_LIST_HEAD ( & pool - > sp_threads ) ;
INIT_LIST_HEAD ( & pool - > sp_sockets ) ;
2006-10-02 13:17:59 +04:00
INIT_LIST_HEAD ( & pool - > sp_all_threads ) ;
2006-10-02 13:17:58 +04:00
spin_lock_init ( & pool - > sp_lock ) ;
}
2005-04-17 02:20:36 +04:00
/* Remove any stale portmap registrations */
2008-08-19 03:34:08 +04:00
svc_unregister ( serv ) ;
2005-04-17 02:20:36 +04:00
return serv ;
}
2006-10-02 13:17:59 +04:00
struct svc_serv *
svc_create ( struct svc_program * prog , unsigned int bufsize ,
2008-07-01 02:45:30 +04:00
sa_family_t family , void ( * shutdown ) ( struct svc_serv * serv ) )
2006-10-02 13:17:59 +04:00
{
2008-07-01 02:45:30 +04:00
return __svc_create ( prog , bufsize , /*npools*/ 1 , family , shutdown ) ;
2006-10-02 13:17:59 +04:00
}
2008-12-24 00:30:12 +03:00
EXPORT_SYMBOL_GPL ( svc_create ) ;
2006-10-02 13:17:59 +04:00
struct svc_serv *
svc_create_pooled ( struct svc_program * prog , unsigned int bufsize ,
2008-07-01 02:45:30 +04:00
sa_family_t family , void ( * shutdown ) ( struct svc_serv * serv ) ,
2008-06-10 16:40:39 +04:00
svc_thread_fn func , struct module * mod )
2006-10-02 13:17:59 +04:00
{
struct svc_serv * serv ;
2007-03-06 12:42:23 +03:00
unsigned int npools = svc_pool_map_get ( ) ;
2006-10-02 13:17:59 +04:00
2008-07-01 02:45:30 +04:00
serv = __svc_create ( prog , bufsize , npools , family , shutdown ) ;
2006-10-02 13:17:59 +04:00
if ( serv ! = NULL ) {
serv - > sv_function = func ;
serv - > sv_module = mod ;
}
return serv ;
}
2008-12-24 00:30:12 +03:00
EXPORT_SYMBOL_GPL ( svc_create_pooled ) ;
2006-10-02 13:17:59 +04:00
2005-04-17 02:20:36 +04:00
/*
2008-06-10 16:40:35 +04:00
* Destroy an RPC service . Should be called with appropriate locking to
* protect the sv_nrthreads , sv_permsocks and sv_tempsocks .
2005-04-17 02:20:36 +04:00
*/
void
svc_destroy ( struct svc_serv * serv )
{
2007-01-31 20:14:08 +03:00
dprintk ( " svc: svc_destroy(%s, %d) \n " ,
2005-04-17 02:20:36 +04:00
serv - > sv_program - > pg_name ,
serv - > sv_nrthreads ) ;
if ( serv - > sv_nrthreads ) {
if ( - - ( serv - > sv_nrthreads ) ! = 0 ) {
svc_sock_update_bufs ( serv ) ;
return ;
}
} else
printk ( " svc_destroy: no threads for serv=%p! \n " , serv ) ;
2006-10-02 13:17:54 +04:00
del_timer_sync ( & serv - > sv_temptimer ) ;
2007-12-31 06:07:53 +03:00
svc_close_all ( & serv - > sv_tempsocks ) ;
2007-03-06 12:42:22 +03:00
2006-10-02 13:17:44 +04:00
if ( serv - > sv_shutdown )
serv - > sv_shutdown ( serv ) ;
2007-12-31 06:07:53 +03:00
svc_close_all ( & serv - > sv_permsocks ) ;
2007-03-06 12:42:22 +03:00
BUG_ON ( ! list_empty ( & serv - > sv_permsocks ) ) ;
BUG_ON ( ! list_empty ( & serv - > sv_tempsocks ) ) ;
2007-02-10 02:38:13 +03:00
2005-04-17 02:20:36 +04:00
cache_clean_deferred ( serv ) ;
2007-03-06 12:42:23 +03:00
if ( svc_serv_is_pooled ( serv ) )
svc_pool_map_put ( ) ;
2008-08-19 03:34:08 +04:00
svc_unregister ( serv ) ;
2006-10-02 13:17:58 +04:00
kfree ( serv - > sv_pools ) ;
2005-04-17 02:20:36 +04:00
kfree ( serv ) ;
}
2008-12-24 00:30:12 +03:00
EXPORT_SYMBOL_GPL ( svc_destroy ) ;
2005-04-17 02:20:36 +04:00
/*
* Allocate an RPC server ' s buffer space .
* We allocate pages and place them in rq_argpages .
*/
static int
svc_init_buffer ( struct svc_rqst * rqstp , unsigned int size )
{
2008-04-14 20:27:52 +04:00
unsigned int pages , arghi ;
2007-02-10 02:38:13 +03:00
2006-10-06 11:44:05 +04:00
pages = size / PAGE_SIZE + 1 ; /* extra page as we hold both request and reply.
* We assume one is at most one page
*/
2005-04-17 02:20:36 +04:00
arghi = 0 ;
2006-01-09 09:24:28 +03:00
BUG_ON ( pages > RPCSVC_MAXPAGES ) ;
2005-04-17 02:20:36 +04:00
while ( pages ) {
struct page * p = alloc_page ( GFP_KERNEL ) ;
if ( ! p )
break ;
2006-10-04 13:15:46 +04:00
rqstp - > rq_pages [ arghi + + ] = p ;
2005-04-17 02:20:36 +04:00
pages - - ;
}
2008-04-14 20:27:52 +04:00
return pages = = 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* Release an RPC server buffer
*/
static void
svc_release_buffer ( struct svc_rqst * rqstp )
{
2008-04-14 20:27:45 +04:00
unsigned int i ;
for ( i = 0 ; i < ARRAY_SIZE ( rqstp - > rq_pages ) ; i + + )
2006-10-04 13:15:46 +04:00
if ( rqstp - > rq_pages [ i ] )
put_page ( rqstp - > rq_pages [ i ] ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-29 18:30:54 +03:00
struct svc_rqst *
svc_prepare_thread ( struct svc_serv * serv , struct svc_pool * pool )
2005-04-17 02:20:36 +04:00
{
struct svc_rqst * rqstp ;
2006-07-22 01:51:30 +04:00
rqstp = kzalloc ( sizeof ( * rqstp ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! rqstp )
2008-01-29 18:30:54 +03:00
goto out_enomem ;
2005-04-17 02:20:36 +04:00
init_waitqueue_head ( & rqstp - > rq_wait ) ;
serv - > sv_nrthreads + + ;
2006-10-02 13:17:58 +04:00
spin_lock_bh ( & pool - > sp_lock ) ;
pool - > sp_nrthreads + + ;
2006-10-02 13:17:59 +04:00
list_add ( & rqstp - > rq_all , & pool - > sp_all_threads ) ;
2006-10-02 13:17:58 +04:00
spin_unlock_bh ( & pool - > sp_lock ) ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_server = serv ;
2006-10-02 13:17:58 +04:00
rqstp - > rq_pool = pool ;
2006-10-02 13:18:01 +04:00
2008-01-29 18:30:54 +03:00
rqstp - > rq_argp = kmalloc ( serv - > sv_xdrsize , GFP_KERNEL ) ;
if ( ! rqstp - > rq_argp )
goto out_thread ;
rqstp - > rq_resp = kmalloc ( serv - > sv_xdrsize , GFP_KERNEL ) ;
if ( ! rqstp - > rq_resp )
goto out_thread ;
if ( ! svc_init_buffer ( rqstp , serv - > sv_max_mesg ) )
goto out_thread ;
return rqstp ;
out_thread :
svc_exit_thread ( rqstp ) ;
out_enomem :
return ERR_PTR ( - ENOMEM ) ;
}
2008-12-24 00:30:12 +03:00
EXPORT_SYMBOL_GPL ( svc_prepare_thread ) ;
2008-01-29 18:30:54 +03:00
2006-10-02 13:17:59 +04:00
/*
* Choose a pool in which to create a new thread , for svc_set_num_threads
*/
static inline struct svc_pool *
choose_pool ( struct svc_serv * serv , struct svc_pool * pool , unsigned int * state )
{
if ( pool ! = NULL )
return pool ;
2007-02-10 02:38:13 +03:00
return & serv - > sv_pools [ ( * state ) + + % serv - > sv_nrpools ] ;
2006-10-02 13:17:59 +04:00
}
/*
* Choose a thread to kill , for svc_set_num_threads
*/
static inline struct task_struct *
choose_victim ( struct svc_serv * serv , struct svc_pool * pool , unsigned int * state )
{
unsigned int i ;
struct task_struct * task = NULL ;
if ( pool ! = NULL ) {
spin_lock_bh ( & pool - > sp_lock ) ;
} else {
/* choose a pool in round-robin fashion */
2007-02-10 02:38:13 +03:00
for ( i = 0 ; i < serv - > sv_nrpools ; i + + ) {
pool = & serv - > sv_pools [ - - ( * state ) % serv - > sv_nrpools ] ;
2006-10-02 13:17:59 +04:00
spin_lock_bh ( & pool - > sp_lock ) ;
2007-02-10 02:38:13 +03:00
if ( ! list_empty ( & pool - > sp_all_threads ) )
goto found_pool ;
2006-10-02 13:17:59 +04:00
spin_unlock_bh ( & pool - > sp_lock ) ;
2007-02-10 02:38:13 +03:00
}
2006-10-02 13:17:59 +04:00
return NULL ;
}
found_pool :
if ( ! list_empty ( & pool - > sp_all_threads ) ) {
struct svc_rqst * rqstp ;
/*
* Remove from the pool - > sp_all_threads list
* so we don ' t try to kill it again .
*/
rqstp = list_entry ( pool - > sp_all_threads . next , struct svc_rqst , rq_all ) ;
list_del_init ( & rqstp - > rq_all ) ;
task = rqstp - > rq_task ;
2007-02-10 02:38:13 +03:00
}
2006-10-02 13:17:59 +04:00
spin_unlock_bh ( & pool - > sp_lock ) ;
return task ;
}
/*
* Create or destroy enough new threads to make the number
* of threads the given number . If ` pool ' is non - NULL , applies
* only to threads in that pool , otherwise round - robins between
* all pools . Must be called with a svc_get ( ) reference and
2008-06-10 16:40:35 +04:00
* the BKL or another lock to protect access to svc_serv fields .
2006-10-02 13:17:59 +04:00
*
* Destroying threads relies on the service threads filling in
* rqstp - > rq_task , which only the nfs ones do . Assumes the serv
* has been created using svc_create_pooled ( ) .
*
* Based on code that used to be in nfsd_svc ( ) but tweaked
* to be pool - aware .
*/
int
svc_set_num_threads ( struct svc_serv * serv , struct svc_pool * pool , int nrservs )
{
2008-06-10 16:40:38 +04:00
struct svc_rqst * rqstp ;
struct task_struct * task ;
struct svc_pool * chosen_pool ;
2006-10-02 13:17:59 +04:00
int error = 0 ;
unsigned int state = serv - > sv_nrthreads - 1 ;
if ( pool = = NULL ) {
/* The -1 assumes caller has done a svc_get() */
nrservs - = ( serv - > sv_nrthreads - 1 ) ;
} else {
spin_lock_bh ( & pool - > sp_lock ) ;
nrservs - = pool - > sp_nrthreads ;
spin_unlock_bh ( & pool - > sp_lock ) ;
}
/* create new threads */
while ( nrservs > 0 ) {
nrservs - - ;
2008-06-10 16:40:38 +04:00
chosen_pool = choose_pool ( serv , pool , & state ) ;
rqstp = svc_prepare_thread ( serv , chosen_pool ) ;
if ( IS_ERR ( rqstp ) ) {
error = PTR_ERR ( rqstp ) ;
break ;
}
2006-10-02 13:17:59 +04:00
__module_get ( serv - > sv_module ) ;
2008-06-10 16:40:38 +04:00
task = kthread_create ( serv - > sv_function , rqstp , serv - > sv_name ) ;
if ( IS_ERR ( task ) ) {
error = PTR_ERR ( task ) ;
2006-10-02 13:17:59 +04:00
module_put ( serv - > sv_module ) ;
2008-06-10 16:40:38 +04:00
svc_exit_thread ( rqstp ) ;
2006-10-02 13:17:59 +04:00
break ;
}
2008-06-10 16:40:38 +04:00
rqstp - > rq_task = task ;
if ( serv - > sv_nrpools > 1 )
svc_pool_map_set_cpumask ( task , chosen_pool - > sp_id ) ;
svc_sock_update_bufs ( serv ) ;
wake_up_process ( task ) ;
2006-10-02 13:17:59 +04:00
}
/* destroy old threads */
while ( nrservs < 0 & &
2008-06-10 16:40:38 +04:00
( task = choose_victim ( serv , pool , & state ) ) ! = NULL ) {
2008-06-10 16:40:39 +04:00
send_sig ( SIGINT , task , 1 ) ;
2006-10-02 13:17:59 +04:00
nrservs + + ;
}
return error ;
}
2008-12-24 00:30:12 +03:00
EXPORT_SYMBOL_GPL ( svc_set_num_threads ) ;
2006-10-02 13:17:59 +04:00
2006-10-02 13:17:58 +04:00
/*
2008-06-10 16:40:35 +04:00
* Called from a server thread as it ' s exiting . Caller must hold the BKL or
* the " service mutex " , whichever is appropriate for the service .
2005-04-17 02:20:36 +04:00
*/
void
svc_exit_thread ( struct svc_rqst * rqstp )
{
struct svc_serv * serv = rqstp - > rq_server ;
2006-10-02 13:17:58 +04:00
struct svc_pool * pool = rqstp - > rq_pool ;
2005-04-17 02:20:36 +04:00
svc_release_buffer ( rqstp ) ;
2005-11-08 20:41:34 +03:00
kfree ( rqstp - > rq_resp ) ;
kfree ( rqstp - > rq_argp ) ;
kfree ( rqstp - > rq_auth_data ) ;
2006-10-02 13:17:58 +04:00
spin_lock_bh ( & pool - > sp_lock ) ;
pool - > sp_nrthreads - - ;
2006-10-02 13:17:59 +04:00
list_del ( & rqstp - > rq_all ) ;
2006-10-02 13:17:58 +04:00
spin_unlock_bh ( & pool - > sp_lock ) ;
2005-04-17 02:20:36 +04:00
kfree ( rqstp ) ;
/* Release the server */
if ( serv )
svc_destroy ( serv ) ;
}
2008-12-24 00:30:12 +03:00
EXPORT_SYMBOL_GPL ( svc_exit_thread ) ;
2005-04-17 02:20:36 +04:00
2008-08-19 03:34:16 +04:00
# ifdef CONFIG_SUNRPC_REGISTER_V4
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
2005-04-17 02:20:36 +04:00
/*
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
* Register an " inet " protocol family netid with the local
* rpcbind daemon via an rpcbind v4 SET request .
2008-08-19 03:34:16 +04:00
*
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
* No netconfig infrastructure is available in the kernel , so
* we map IP_ protocol numbers to netids by hand .
2008-08-19 03:34:16 +04:00
*
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
* Returns zero on success ; a negative errno value is returned
* if any error occurs .
2005-04-17 02:20:36 +04:00
*/
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
static int __svc_rpcb_register4 ( const u32 program , const u32 version ,
const unsigned short protocol ,
const unsigned short port )
2008-08-19 03:34:16 +04:00
{
struct sockaddr_in sin = {
. sin_family = AF_INET ,
. sin_addr . s_addr = htonl ( INADDR_ANY ) ,
. sin_port = htons ( port ) ,
} ;
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
char * netid ;
switch ( protocol ) {
case IPPROTO_UDP :
netid = RPCBIND_NETID_UDP ;
break ;
case IPPROTO_TCP :
netid = RPCBIND_NETID_TCP ;
break ;
default :
return - EPROTONOSUPPORT ;
}
return rpcb_v4_register ( program , version ,
( struct sockaddr * ) & sin , netid ) ;
}
/*
* Register an " inet6 " protocol family netid with the local
* rpcbind daemon via an rpcbind v4 SET request .
*
* No netconfig infrastructure is available in the kernel , so
* we map IP_ protocol numbers to netids by hand .
*
* Returns zero on success ; a negative errno value is returned
* if any error occurs .
*/
static int __svc_rpcb_register6 ( const u32 program , const u32 version ,
const unsigned short protocol ,
const unsigned short port )
{
2008-08-19 03:34:16 +04:00
struct sockaddr_in6 sin6 = {
. sin6_family = AF_INET6 ,
. sin6_addr = IN6ADDR_ANY_INIT ,
. sin6_port = htons ( port ) ,
} ;
char * netid ;
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
switch ( protocol ) {
case IPPROTO_UDP :
netid = RPCBIND_NETID_UDP6 ;
2008-08-19 03:34:16 +04:00
break ;
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
case IPPROTO_TCP :
2008-08-19 03:34:16 +04:00
netid = RPCBIND_NETID_TCP6 ;
break ;
default :
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
return - EPROTONOSUPPORT ;
}
return rpcb_v4_register ( program , version ,
( struct sockaddr * ) & sin6 , netid ) ;
}
/*
* Register a kernel RPC service via rpcbind version 4.
*
* Returns zero on success ; a negative errno value is returned
* if any error occurs .
*/
static int __svc_register ( const u32 program , const u32 version ,
2009-03-19 03:46:06 +03:00
const int family ,
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
const unsigned short protocol ,
const unsigned short port )
{
int error ;
switch ( family ) {
2009-03-19 03:46:06 +03:00
case PF_INET :
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
return __svc_rpcb_register4 ( program , version ,
protocol , port ) ;
2009-03-19 03:46:06 +03:00
case PF_INET6 :
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
error = __svc_rpcb_register6 ( program , version ,
protocol , port ) ;
if ( error < 0 )
return error ;
/*
* Work around bug in some versions of Linux rpcbind
* which don ' t allow registration of both inet and
* inet6 netids .
*
* Error return ignored for now .
*/
__svc_rpcb_register4 ( program , version ,
protocol , port ) ;
return 0 ;
2008-08-19 03:34:16 +04:00
}
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
return - EAFNOSUPPORT ;
2008-08-19 03:34:16 +04:00
}
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
# else /* CONFIG_SUNRPC_REGISTER_V4 */
/*
* Register a kernel RPC service via rpcbind version 2.
*
* Returns zero on success ; a negative errno value is returned
* if any error occurs .
*/
2008-08-19 03:34:16 +04:00
static int __svc_register ( const u32 program , const u32 version ,
2009-03-19 03:46:06 +03:00
const int family ,
2008-08-19 03:34:16 +04:00
const unsigned short protocol ,
const unsigned short port )
{
2009-03-19 03:46:06 +03:00
if ( family ! = PF_INET )
2008-08-19 03:34:16 +04:00
return - EAFNOSUPPORT ;
return rpcb_register ( program , version , protocol , port ) ;
}
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
# endif /* CONFIG_SUNRPC_REGISTER_V4 */
2008-08-19 03:34:16 +04:00
/**
* svc_register - register an RPC service with the local portmapper
* @ serv : svc_serv struct for the service to register
2009-03-19 03:46:06 +03:00
* @ family : protocol family of service ' s listener socket
2008-08-19 03:34:16 +04:00
* @ proto : transport protocol number to advertise
* @ port : port to advertise
*
2009-03-19 03:46:06 +03:00
* Service is registered for any address in the passed - in protocol family
2008-08-19 03:34:16 +04:00
*/
2009-03-19 03:46:06 +03:00
int svc_register ( const struct svc_serv * serv , const int family ,
const unsigned short proto , const unsigned short port )
2005-04-17 02:20:36 +04:00
{
struct svc_program * progp ;
2007-10-26 21:32:56 +04:00
unsigned int i ;
2008-08-19 03:34:00 +04:00
int error = 0 ;
2005-04-17 02:20:36 +04:00
2008-08-19 03:34:08 +04:00
BUG_ON ( proto = = 0 & & port = = 0 ) ;
2005-04-17 02:20:36 +04:00
2006-10-04 13:16:05 +04:00
for ( progp = serv - > sv_program ; progp ; progp = progp - > pg_next ) {
for ( i = 0 ; i < progp - > pg_nvers ; i + + ) {
if ( progp - > pg_vers [ i ] = = NULL )
continue ;
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
dprintk ( " svc: svc_register(%sv%d, %s, %u, %u)%s \n " ,
2006-10-04 13:16:05 +04:00
progp - > pg_name ,
SUNRPC: Register both netids for AF_INET6 servers
TI-RPC is a user-space library of RPC functions that replaces ONC RPC
and allows RPC to operate in the new world of IPv6.
TI-RPC combines the concept of a transport protocol (UDP and TCP)
and a protocol family (PF_INET and PF_INET6) into a single identifier
called a "netid." For example, "udp" means UDP over IPv4, and "udp6"
means UDP over IPv6.
For rpcbind, then, the RPC service tuple that is registered and
advertised is:
[RPC program, RPC version, service address and port, netid]
instead of
[RPC program, RPC version, port, protocol]
Service address is typically ANYADDR, but can be a specific address
of one of the interfaces on a multi-homed host. The third item in
the new tuple is expressed as a universal address.
The current Linux rpcbind implementation registers a netid for both
protocol families when RPCB_SET is done for just the PF_INET6 version
of the netid (ie udp6 or tcp6). So registering "udp6" causes a
registration for "udp" to appear automatically as well.
We've recently determined that this is incorrect behavior. In the
TI-RPC world, "udp6" is not meant to imply that the registered RPC
service handles requests from AF_INET as well, even if the listener
socket does address mapping. "udp" and "udp6" are entirely separate
capabilities, and must be registered separately.
The Linux kernel, unlike TI-RPC, leverages address mapping to allow a
single listener socket to handle requests for both AF_INET and AF_INET6.
This is still OK, but the kernel currently assumes registering "udp6"
will cover "udp" as well. It registers only "udp6" for it's AF_INET6
services, even though they handle both AF_INET and AF_INET6 on the same
port.
So svc_register() actually needs to register both "udp" and "udp6"
explicitly (and likewise for TCP). Until rpcbind is fixed, the
kernel can ignore the return code for the second RPCB_SET call.
Please merge this with commit 15231312:
SUNRPC: Support IPv6 when registering kernel RPC services
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2008-09-16 01:27:23 +04:00
i ,
2006-10-04 13:16:05 +04:00
proto = = IPPROTO_UDP ? " udp " : " tcp " ,
port ,
2009-03-19 03:46:06 +03:00
family ,
2006-10-04 13:16:05 +04:00
progp - > pg_vers [ i ] - > vs_hidden ?
" (but not telling portmap) " : " " ) ;
if ( progp - > pg_vers [ i ] - > vs_hidden )
continue ;
2008-08-19 03:34:16 +04:00
error = __svc_register ( progp - > pg_prog , i ,
2009-03-19 03:46:06 +03:00
family , proto , port ) ;
2006-10-04 13:16:05 +04:00
if ( error < 0 )
break ;
2005-04-17 02:20:36 +04:00
}
}
2008-08-19 03:34:08 +04:00
return error ;
}
2008-09-25 19:56:57 +04:00
# ifdef CONFIG_SUNRPC_REGISTER_V4
static void __svc_unregister ( const u32 program , const u32 version ,
const char * progname )
{
struct sockaddr_in6 sin6 = {
. sin6_family = AF_INET6 ,
. sin6_addr = IN6ADDR_ANY_INIT ,
. sin6_port = 0 ,
} ;
int error ;
error = rpcb_v4_register ( program , version ,
( struct sockaddr * ) & sin6 , " " ) ;
dprintk ( " svc: %s(%sv%u), error %d \n " ,
__func__ , progname , version , error ) ;
}
# else /* CONFIG_SUNRPC_REGISTER_V4 */
static void __svc_unregister ( const u32 program , const u32 version ,
const char * progname )
{
int error ;
error = rpcb_register ( program , version , 0 , 0 ) ;
dprintk ( " svc: %s(%sv%u), error %d \n " ,
__func__ , progname , version , error ) ;
}
# endif /* CONFIG_SUNRPC_REGISTER_V4 */
2008-08-19 03:34:08 +04:00
/*
2008-09-25 19:56:57 +04:00
* All netids , bind addresses and ports registered for [ program , version ]
* are removed from the local rpcbind database ( if the service is not
* hidden ) to make way for a new instance of the service .
2008-08-19 03:34:08 +04:00
*
2008-09-25 19:56:57 +04:00
* The result of unregistration is reported via dprintk for those who want
* verification of the result , but is otherwise not important .
2008-08-19 03:34:08 +04:00
*/
static void svc_unregister ( const struct svc_serv * serv )
{
struct svc_program * progp ;
unsigned long flags ;
unsigned int i ;
clear_thread_flag ( TIF_SIGPENDING ) ;
for ( progp = serv - > sv_program ; progp ; progp = progp - > pg_next ) {
for ( i = 0 ; i < progp - > pg_nvers ; i + + ) {
if ( progp - > pg_vers [ i ] = = NULL )
continue ;
if ( progp - > pg_vers [ i ] - > vs_hidden )
continue ;
2008-09-25 19:56:57 +04:00
__svc_unregister ( progp - > pg_prog , i , progp - > pg_name ) ;
2008-08-19 03:34:08 +04:00
}
2005-04-17 02:20:36 +04:00
}
2008-08-19 03:34:08 +04:00
spin_lock_irqsave ( & current - > sighand - > siglock , flags ) ;
recalc_sigpending ( ) ;
spin_unlock_irqrestore ( & current - > sighand - > siglock , flags ) ;
2005-04-17 02:20:36 +04:00
}
2007-08-25 19:09:27 +04:00
/*
* Printk the given error with the address of the client that caused it .
*/
static int
__attribute__ ( ( format ( printf , 2 , 3 ) ) )
svc_printk ( struct svc_rqst * rqstp , const char * fmt , . . . )
{
va_list args ;
int r ;
char buf [ RPC_MAX_ADDRBUFLEN ] ;
if ( ! net_ratelimit ( ) )
return 0 ;
printk ( KERN_WARNING " svc: %s: " ,
svc_print_addr ( rqstp , buf , sizeof ( buf ) ) ) ;
va_start ( args , fmt ) ;
r = vprintk ( fmt , args ) ;
va_end ( args ) ;
return r ;
}
2005-04-17 02:20:36 +04:00
/*
* Process the RPC request .
*/
int
2006-10-02 13:17:50 +04:00
svc_process ( struct svc_rqst * rqstp )
2005-04-17 02:20:36 +04:00
{
struct svc_program * progp ;
struct svc_version * versp = NULL ; /* compiler food */
struct svc_procedure * procp = NULL ;
struct kvec * argv = & rqstp - > rq_arg . head [ 0 ] ;
struct kvec * resv = & rqstp - > rq_res . head [ 0 ] ;
2006-10-02 13:17:50 +04:00
struct svc_serv * serv = rqstp - > rq_server ;
2005-04-17 02:20:36 +04:00
kxdrproc_t xdr ;
2006-09-27 09:29:38 +04:00
__be32 * statp ;
u32 dir , prog , vers , proc ;
__be32 auth_stat , rpc_stat ;
2005-04-17 02:20:36 +04:00
int auth_res ;
2006-10-04 13:16:08 +04:00
__be32 * reply_statp ;
2005-04-17 02:20:36 +04:00
rpc_stat = rpc_success ;
if ( argv - > iov_len < 6 * 4 )
goto err_short_len ;
/* setup response xdr_buf.
2007-02-10 02:38:13 +03:00
* Initially it has just one page
2005-04-17 02:20:36 +04:00
*/
2006-10-04 13:15:46 +04:00
rqstp - > rq_resused = 1 ;
2005-04-17 02:20:36 +04:00
resv - > iov_base = page_address ( rqstp - > rq_respages [ 0 ] ) ;
resv - > iov_len = 0 ;
2006-10-04 13:15:46 +04:00
rqstp - > rq_res . pages = rqstp - > rq_respages + 1 ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_res . len = 0 ;
rqstp - > rq_res . page_base = 0 ;
rqstp - > rq_res . page_len = 0 ;
2005-06-22 21:16:19 +04:00
rqstp - > rq_res . buflen = PAGE_SIZE ;
2006-06-30 12:56:19 +04:00
rqstp - > rq_res . tail [ 0 ] . iov_base = NULL ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_res . tail [ 0 ] . iov_len = 0 ;
2006-06-30 12:56:19 +04:00
/* Will be turned off only in gss privacy case: */
2007-06-12 23:22:14 +04:00
rqstp - > rq_splice_ok = 1 ;
2007-12-31 06:07:29 +03:00
/* Setup reply header */
rqstp - > rq_xprt - > xpt_ops - > xpo_prep_reply_hdr ( rqstp ) ;
2005-04-17 02:20:36 +04:00
rqstp - > rq_xid = svc_getu32 ( argv ) ;
svc_putu32 ( resv , rqstp - > rq_xid ) ;
2006-09-27 09:28:46 +04:00
dir = svc_getnl ( argv ) ;
vers = svc_getnl ( argv ) ;
2005-04-17 02:20:36 +04:00
/* First words of reply: */
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , 1 ) ; /* REPLY */
2005-04-17 02:20:36 +04:00
if ( dir ! = 0 ) /* direction != CALL */
goto err_bad_dir ;
if ( vers ! = 2 ) /* RPC version number */
goto err_bad_rpc ;
/* Save position in case we later decide to reject: */
2006-10-04 13:16:08 +04:00
reply_statp = resv - > iov_base + resv - > iov_len ;
2005-04-17 02:20:36 +04:00
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , 0 ) ; /* ACCEPT */
2005-04-17 02:20:36 +04:00
2006-09-27 09:28:46 +04:00
rqstp - > rq_prog = prog = svc_getnl ( argv ) ; /* program number */
rqstp - > rq_vers = vers = svc_getnl ( argv ) ; /* version number */
rqstp - > rq_proc = proc = svc_getnl ( argv ) ; /* procedure number */
2005-04-17 02:20:36 +04:00
progp = serv - > sv_program ;
2005-11-07 12:00:27 +03:00
for ( progp = serv - > sv_program ; progp ; progp = progp - > pg_next )
if ( prog = = progp - > pg_prog )
break ;
2005-04-17 02:20:36 +04:00
/*
* Decode auth data , and add verifier to reply buffer .
* We do this before anything else in order to get a decent
* auth verifier .
*/
auth_res = svc_authenticate ( rqstp , & auth_stat ) ;
/* Also give the program a chance to reject this call: */
2005-11-07 12:00:27 +03:00
if ( auth_res = = SVC_OK & & progp ) {
2005-04-17 02:20:36 +04:00
auth_stat = rpc_autherr_badcred ;
auth_res = progp - > pg_authenticate ( rqstp ) ;
}
switch ( auth_res ) {
case SVC_OK :
break ;
case SVC_GARBAGE :
2008-02-20 02:56:56 +03:00
goto err_garbage ;
2005-04-17 02:20:36 +04:00
case SVC_SYSERR :
rpc_stat = rpc_system_err ;
goto err_bad ;
case SVC_DENIED :
goto err_bad_auth ;
case SVC_DROP :
goto dropit ;
case SVC_COMPLETE :
goto sendit ;
}
2005-11-07 12:00:27 +03:00
2005-06-22 21:16:24 +04:00
if ( progp = = NULL )
2005-04-17 02:20:36 +04:00
goto err_bad_prog ;
if ( vers > = progp - > pg_nvers | |
! ( versp = progp - > pg_vers [ vers ] ) )
goto err_bad_vers ;
procp = versp - > vs_proc + proc ;
if ( proc > = versp - > vs_nproc | | ! procp - > pc_func )
goto err_bad_proc ;
rqstp - > rq_server = serv ;
rqstp - > rq_procinfo = procp ;
/* Syntactic check complete */
serv - > sv_stats - > rpccnt + + ;
/* Build the reply header. */
statp = resv - > iov_base + resv - > iov_len ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , RPC_SUCCESS ) ;
2005-04-17 02:20:36 +04:00
/* Bump per-procedure stats counter */
procp - > pc_count + + ;
/* Initialize storage for argp and resp */
memset ( rqstp - > rq_argp , 0 , procp - > pc_argsize ) ;
memset ( rqstp - > rq_resp , 0 , procp - > pc_ressize ) ;
2007-02-10 02:38:13 +03:00
/* un-reserve some of the out-queue now that we have a
2005-04-17 02:20:36 +04:00
* better idea of reply size
*/
if ( procp - > pc_xdrressize )
2007-05-09 13:34:50 +04:00
svc_reserve_auth ( rqstp , procp - > pc_xdrressize < < 2 ) ;
2005-04-17 02:20:36 +04:00
/* Call the function that processes the request. */
if ( ! versp - > vs_dispatch ) {
/* Decode arguments */
xdr = procp - > pc_decode ;
if ( xdr & & ! xdr ( rqstp , argv - > iov_base , rqstp - > rq_argp ) )
goto err_garbage ;
* statp = procp - > pc_func ( rqstp , rqstp - > rq_argp , rqstp - > rq_resp ) ;
/* Encode reply */
2006-10-17 11:10:18 +04:00
if ( * statp = = rpc_drop_reply ) {
if ( procp - > pc_release )
procp - > pc_release ( rqstp , NULL , rqstp - > rq_resp ) ;
goto dropit ;
}
2005-04-17 02:20:36 +04:00
if ( * statp = = rpc_success & & ( xdr = procp - > pc_encode )
& & ! xdr ( rqstp , resv - > iov_base + resv - > iov_len , rqstp - > rq_resp ) ) {
dprintk ( " svc: failed to encode reply \n " ) ;
/* serv->sv_stats->rpcsystemerr++; */
* statp = rpc_system_err ;
}
} else {
dprintk ( " svc: calling dispatcher \n " ) ;
if ( ! versp - > vs_dispatch ( rqstp , statp ) ) {
/* Release reply info */
if ( procp - > pc_release )
procp - > pc_release ( rqstp , NULL , rqstp - > rq_resp ) ;
goto dropit ;
}
}
/* Check RPC status result */
if ( * statp ! = rpc_success )
resv - > iov_len = ( ( void * ) statp ) - resv - > iov_base + 4 ;
/* Release reply info */
if ( procp - > pc_release )
procp - > pc_release ( rqstp , NULL , rqstp - > rq_resp ) ;
if ( procp - > pc_encode = = NULL )
goto dropit ;
sendit :
if ( svc_authorise ( rqstp ) )
goto dropit ;
return svc_send ( rqstp ) ;
dropit :
svc_authorise ( rqstp ) ; /* doesn't hurt to call this twice */
dprintk ( " svc: svc_process dropit \n " ) ;
svc_drop ( rqstp ) ;
return 0 ;
err_short_len :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " short len %Zd, dropping request \n " ,
argv - > iov_len ) ;
2007-01-30 00:19:52 +03:00
2005-04-17 02:20:36 +04:00
goto dropit ; /* drop request */
err_bad_dir :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " bad direction %d, dropping request \n " , dir ) ;
2007-01-30 00:19:52 +03:00
2005-04-17 02:20:36 +04:00
serv - > sv_stats - > rpcbadfmt + + ;
goto dropit ; /* drop request */
err_bad_rpc :
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , 1 ) ; /* REJECT */
svc_putnl ( resv , 0 ) ; /* RPC_MISMATCH */
svc_putnl ( resv , 2 ) ; /* Only RPCv2 supported */
svc_putnl ( resv , 2 ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
err_bad_auth :
dprintk ( " svc: authentication failed (%d) \n " , ntohl ( auth_stat ) ) ;
serv - > sv_stats - > rpcbadauth + + ;
/* Restore write pointer to location of accept status: */
2006-10-04 13:16:08 +04:00
xdr_ressize_check ( rqstp , reply_statp ) ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , 1 ) ; /* REJECT */
svc_putnl ( resv , 1 ) ; /* AUTH_ERROR */
svc_putnl ( resv , ntohl ( auth_stat ) ) ; /* status */
2005-04-17 02:20:36 +04:00
goto sendit ;
err_bad_prog :
2005-06-22 21:16:24 +04:00
dprintk ( " svc: unknown program %d \n " , prog ) ;
2005-04-17 02:20:36 +04:00
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , RPC_PROG_UNAVAIL ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
err_bad_vers :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " unknown version (%d for prog %d, %s) \n " ,
2007-01-30 00:19:52 +03:00
vers , prog , progp - > pg_name ) ;
2005-04-17 02:20:36 +04:00
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , RPC_PROG_MISMATCH ) ;
svc_putnl ( resv , progp - > pg_lovers ) ;
svc_putnl ( resv , progp - > pg_hivers ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
err_bad_proc :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " unknown procedure (%d) \n " , proc ) ;
2007-01-30 00:19:52 +03:00
2005-04-17 02:20:36 +04:00
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , RPC_PROC_UNAVAIL ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
err_garbage :
2007-08-25 19:09:27 +04:00
svc_printk ( rqstp , " failed to decode args \n " ) ;
2007-01-30 00:19:52 +03:00
2005-04-17 02:20:36 +04:00
rpc_stat = rpc_garbage_args ;
err_bad :
serv - > sv_stats - > rpcbadfmt + + ;
2006-09-27 09:28:46 +04:00
svc_putnl ( resv , ntohl ( rpc_stat ) ) ;
2005-04-17 02:20:36 +04:00
goto sendit ;
}
2008-12-24 00:30:12 +03:00
EXPORT_SYMBOL_GPL ( svc_process ) ;
2006-10-04 13:15:47 +04:00
/*
* Return ( transport - specific ) limit on the rpc payload .
*/
u32 svc_max_payload ( const struct svc_rqst * rqstp )
{
2007-12-31 06:07:21 +03:00
u32 max = rqstp - > rq_xprt - > xpt_class - > xcl_max_payload ;
2006-10-04 13:15:47 +04:00
2006-10-06 11:44:05 +04:00
if ( rqstp - > rq_server - > sv_max_payload < max )
max = rqstp - > rq_server - > sv_max_payload ;
2006-10-04 13:15:47 +04:00
return max ;
}
EXPORT_SYMBOL_GPL ( svc_max_payload ) ;