2005-04-17 02:20:36 +04:00
/*
* Central processing for nfsd .
*
* Authors : Olaf Kirch ( okir @ monad . swb . de )
*
* Copyright ( C ) 1995 , 1996 , 1997 Olaf Kirch < okir @ monad . swb . de >
*/
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2007-07-17 15:03:35 +04:00
# include <linux/freezer.h>
2011-07-01 22:23:34 +04:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
# include <linux/fs_struct.h>
2009-04-03 09:28:18 +04:00
# include <linux/swap.h>
2005-04-17 02:20:36 +04:00
# include <linux/sunrpc/stats.h>
# include <linux/sunrpc/svcsock.h>
# include <linux/lockd/bind.h>
2005-06-22 21:16:26 +04:00
# include <linux/nfsacl.h>
2009-08-15 19:54:41 +04:00
# include <linux/seq_file.h>
2010-09-29 16:03:50 +04:00
# include <net/net_namespace.h>
2009-12-03 21:30:56 +03:00
# include "nfsd.h"
# include "cache.h"
2009-11-05 02:12:35 +03:00
# include "vfs.h"
2012-12-06 15:23:14 +04:00
# include "netns.h"
2005-04-17 02:20:36 +04:00
# define NFSDDBG_FACILITY NFSDDBG_SVC
extern struct svc_program nfsd_program ;
2008-06-10 16:40:38 +04:00
static int nfsd ( void * vrqstp ) ;
2005-04-17 02:20:36 +04:00
2008-06-10 16:40:35 +04:00
/*
2012-12-06 15:23:24 +04:00
* nfsd_mutex protects nn - > nfsd_serv - - both the pointer itself and the members
2008-06-10 16:40:35 +04:00
* of the svc_serv struct . In particular , - > sv_nrthreads but also to some
* extent - > sv_temp_socks and - > sv_permsocks . It also protects nfsdstats . th_cnt
*
2012-12-06 15:23:24 +04:00
* If ( out side the lock ) nn - > nfsd_serv is non - NULL , then it must point to a
2008-06-10 16:40:35 +04:00
* properly initialised ' struct svc_serv ' with - > sv_nrthreads > 0. That number
* of nfsd threads must exist and each must listed in - > sp_all_threads in each
* entry of - > sv_pools [ ] .
*
* Transitions of the thread count between zero and non - zero are of particular
* interest since the svc_serv needs to be created and initialized at that
* point , or freed .
2008-06-10 16:40:36 +04:00
*
* Finally , the nfsd_mutex also protects some of the global variables that are
* accessed when nfsd starts and that are settable via the write_ * routines in
* nfsctl . c . In particular :
*
* user_recovery_dirname
* user_lease_time
* nfsd_versions
2008-06-10 16:40:35 +04:00
*/
DEFINE_MUTEX ( nfsd_mutex ) ;
2009-06-24 23:37:45 +04:00
/*
* nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used .
* nfsd_drc_max_pages limits the total amount of memory available for
* version 4.1 DRC caches .
* nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage .
*/
spinlock_t nfsd_drc_lock ;
2013-02-23 04:35:47 +04:00
unsigned long nfsd_drc_max_mem ;
unsigned long nfsd_drc_mem_used ;
2009-06-24 23:37:45 +04:00
2006-02-01 14:04:34 +03:00
# if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static struct svc_stat nfsd_acl_svcstats ;
static struct svc_version * nfsd_acl_version [ ] = {
[ 2 ] = & nfsd_acl_version2 ,
[ 3 ] = & nfsd_acl_version3 ,
} ;
# define NFSD_ACL_MINVERS 2
2006-03-24 14:15:34 +03:00
# define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
2006-02-01 14:04:34 +03:00
static struct svc_version * nfsd_acl_versions [ NFSD_ACL_NRVERS ] ;
static struct svc_program nfsd_acl_program = {
. pg_prog = NFS_ACL_PROGRAM ,
. pg_nvers = NFSD_ACL_NRVERS ,
. pg_vers = nfsd_acl_versions ,
2007-01-26 11:56:58 +03:00
. pg_name = " nfsacl " ,
2006-02-01 14:04:34 +03:00
. pg_class = " nfsd " ,
. pg_stats = & nfsd_acl_svcstats ,
. pg_authenticate = & svc_set_client ,
} ;
static struct svc_stat nfsd_acl_svcstats = {
. program = & nfsd_acl_program ,
} ;
# endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
2005-11-07 12:00:25 +03:00
static struct svc_version * nfsd_version [ ] = {
[ 2 ] = & nfsd_version2 ,
# if defined(CONFIG_NFSD_V3)
[ 3 ] = & nfsd_version3 ,
# endif
# if defined(CONFIG_NFSD_V4)
[ 4 ] = & nfsd_version4 ,
# endif
} ;
# define NFSD_MINVERS 2
2006-03-24 14:15:34 +03:00
# define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
2005-11-07 12:00:25 +03:00
static struct svc_version * nfsd_versions [ NFSD_NRVERS ] ;
struct svc_program nfsd_program = {
2006-02-01 14:04:34 +03:00
# if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
. pg_next = & nfsd_acl_program ,
# endif
2005-11-07 12:00:25 +03:00
. pg_prog = NFS_PROGRAM , /* program number */
. pg_nvers = NFSD_NRVERS , /* nr of entries in nfsd_version */
. pg_vers = nfsd_versions , /* version table */
. pg_name = " nfsd " , /* program name */
. pg_class = " nfsd " , /* authentication class */
. pg_stats = & nfsd_svcstats , /* version table */
. pg_authenticate = & svc_set_client , /* export authentication */
} ;
2013-07-09 03:51:44 +04:00
static bool nfsd_supported_minorversions [ NFSD_SUPPORTED_MINOR_VERSION + 1 ] = {
[ 0 ] = 1 ,
[ 1 ] = 1 ,
} ;
2009-04-03 09:28:59 +04:00
2006-10-02 13:17:46 +04:00
int nfsd_vers ( int vers , enum vers_op change )
{
if ( vers < NFSD_MINVERS | | vers > = NFSD_NRVERS )
2010-05-14 15:33:36 +04:00
return 0 ;
2006-10-02 13:17:46 +04:00
switch ( change ) {
case NFSD_SET :
nfsd_versions [ vers ] = nfsd_version [ vers ] ;
# if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
if ( vers < NFSD_ACL_NRVERS )
2007-01-26 11:56:58 +03:00
nfsd_acl_versions [ vers ] = nfsd_acl_version [ vers ] ;
2006-10-02 13:17:46 +04:00
# endif
2007-01-26 11:56:58 +03:00
break ;
2006-10-02 13:17:46 +04:00
case NFSD_CLEAR :
nfsd_versions [ vers ] = NULL ;
# if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
if ( vers < NFSD_ACL_NRVERS )
2007-01-26 11:56:58 +03:00
nfsd_acl_versions [ vers ] = NULL ;
2006-10-02 13:17:46 +04:00
# endif
break ;
case NFSD_TEST :
return nfsd_versions [ vers ] ! = NULL ;
case NFSD_AVAIL :
return nfsd_version [ vers ] ! = NULL ;
}
return 0 ;
}
2009-04-03 09:28:59 +04:00
int nfsd_minorversion ( u32 minorversion , enum vers_op change )
{
if ( minorversion > NFSD_SUPPORTED_MINOR_VERSION )
return - 1 ;
switch ( change ) {
case NFSD_SET :
2013-07-09 03:51:44 +04:00
nfsd_supported_minorversions [ minorversion ] = true ;
2009-04-03 09:28:59 +04:00
break ;
case NFSD_CLEAR :
2013-07-09 03:51:44 +04:00
nfsd_supported_minorversions [ minorversion ] = false ;
2009-04-03 09:28:59 +04:00
break ;
case NFSD_TEST :
2013-07-09 03:51:44 +04:00
return nfsd_supported_minorversions [ minorversion ] ;
2009-04-03 09:28:59 +04:00
case NFSD_AVAIL :
return minorversion < = NFSD_SUPPORTED_MINOR_VERSION ;
}
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
* Maximum number of nfsd processes
*/
# define NFSD_MAXSERVS 8192
2012-12-06 15:23:24 +04:00
int nfsd_nrthreads ( struct net * net )
2005-04-17 02:20:36 +04:00
{
2008-06-12 07:38:42 +04:00
int rv = 0 ;
2012-12-06 15:23:24 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
2008-06-12 07:38:42 +04:00
mutex_lock ( & nfsd_mutex ) ;
2012-12-06 15:23:24 +04:00
if ( nn - > nfsd_serv )
rv = nn - > nfsd_serv - > sv_nrthreads ;
2008-06-12 07:38:42 +04:00
mutex_unlock ( & nfsd_mutex ) ;
return rv ;
2005-04-17 02:20:36 +04:00
}
2012-12-10 13:19:09 +04:00
static int nfsd_init_socks ( struct net * net )
2010-07-22 02:29:25 +04:00
{
int error ;
2012-12-06 15:23:24 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
if ( ! list_empty ( & nn - > nfsd_serv - > sv_permsocks ) )
2010-07-22 02:29:25 +04:00
return 0 ;
2012-12-06 15:23:24 +04:00
error = svc_create_xprt ( nn - > nfsd_serv , " udp " , net , PF_INET , NFS_PORT ,
2010-07-22 02:29:25 +04:00
SVC_SOCK_DEFAULTS ) ;
if ( error < 0 )
return error ;
2012-12-06 15:23:24 +04:00
error = svc_create_xprt ( nn - > nfsd_serv , " tcp " , net , PF_INET , NFS_PORT ,
2010-07-22 02:29:25 +04:00
SVC_SOCK_DEFAULTS ) ;
if ( error < 0 )
return error ;
return 0 ;
}
2012-12-06 15:23:39 +04:00
static int nfsd_users = 0 ;
2010-07-20 00:50:04 +04:00
2012-12-06 15:23:29 +04:00
static int nfsd_startup_generic ( int nrservs )
{
int ret ;
2012-12-06 15:23:39 +04:00
if ( nfsd_users + + )
2012-12-06 15:23:29 +04:00
return 0 ;
/*
* Readahead param cache - will no - op if it already exists .
* ( Note therefore results will be suboptimal if number of
* threads is modified after nfsd start . )
*/
ret = nfsd_racache_init ( 2 * nrservs ) ;
if ( ret )
return ret ;
ret = nfs4_state_start ( ) ;
if ( ret )
goto out_racache ;
return 0 ;
out_racache :
nfsd_racache_shutdown ( ) ;
return ret ;
}
static void nfsd_shutdown_generic ( void )
{
2012-12-06 15:23:39 +04:00
if ( - - nfsd_users )
return ;
2012-12-06 15:23:29 +04:00
nfs4_state_shutdown ( ) ;
nfsd_racache_shutdown ( ) ;
}
2013-12-31 09:17:30 +04:00
static bool nfsd_needs_lockd ( void )
{
2014-01-06 07:28:41 +04:00
# if defined(CONFIG_NFSD_V3)
2013-12-31 09:17:30 +04:00
return ( nfsd_versions [ 2 ] ! = NULL ) | | ( nfsd_versions [ 3 ] ! = NULL ) ;
2014-01-06 07:28:41 +04:00
# else
return ( nfsd_versions [ 2 ] ! = NULL ) ;
# endif
2013-12-31 09:17:30 +04:00
}
2012-12-06 15:23:34 +04:00
static int nfsd_startup_net ( int nrservs , struct net * net )
2012-12-06 15:23:09 +04:00
{
2012-12-06 15:23:14 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
2012-12-06 15:23:09 +04:00
int ret ;
2012-12-06 15:23:14 +04:00
if ( nn - > nfsd_net_up )
return 0 ;
2012-12-06 15:23:34 +04:00
ret = nfsd_startup_generic ( nrservs ) ;
2012-12-06 15:23:09 +04:00
if ( ret )
return ret ;
2012-12-06 15:23:34 +04:00
ret = nfsd_init_socks ( net ) ;
if ( ret )
goto out_socks ;
2013-12-31 09:17:30 +04:00
if ( nfsd_needs_lockd ( ) & & ! nn - > lockd_up ) {
ret = lockd_up ( net ) ;
if ( ret )
goto out_socks ;
nn - > lockd_up = 1 ;
}
2012-12-06 15:23:09 +04:00
ret = nfs4_state_start_net ( net ) ;
if ( ret )
goto out_lockd ;
2012-12-06 15:23:14 +04:00
nn - > nfsd_net_up = true ;
2012-12-06 15:23:09 +04:00
return 0 ;
out_lockd :
2013-12-31 09:17:30 +04:00
if ( nn - > lockd_up ) {
lockd_down ( net ) ;
nn - > lockd_up = 0 ;
}
2012-12-06 15:23:34 +04:00
out_socks :
2012-12-06 15:23:29 +04:00
nfsd_shutdown_generic ( ) ;
2010-07-20 00:50:04 +04:00
return ret ;
}
2012-12-06 15:23:09 +04:00
static void nfsd_shutdown_net ( struct net * net )
{
2012-12-06 15:23:14 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
2012-12-06 15:23:09 +04:00
nfs4_state_shutdown_net ( net ) ;
2013-12-31 09:17:30 +04:00
if ( nn - > lockd_up ) {
lockd_down ( net ) ;
nn - > lockd_up = 0 ;
}
2012-12-06 15:23:14 +04:00
nn - > nfsd_net_up = false ;
2012-12-06 15:23:34 +04:00
nfsd_shutdown_generic ( ) ;
2012-12-06 15:23:09 +04:00
}
2012-12-06 15:23:44 +04:00
static void nfsd_last_thread ( struct svc_serv * serv , struct net * net )
2010-07-20 00:50:04 +04:00
{
2012-12-06 15:23:34 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
2010-07-20 00:50:04 +04:00
/*
* write_ports can create the server without actually starting
* any threads - - if we get shut down before any threads are
* started , then nfsd_last_thread will be run before any of this
* other initialization has been done .
*/
2012-12-06 15:23:34 +04:00
if ( ! nn - > nfsd_net_up )
2010-07-20 00:50:04 +04:00
return ;
2012-12-06 15:23:09 +04:00
nfsd_shutdown_net ( net ) ;
2006-10-02 13:17:44 +04:00
2012-01-13 14:03:04 +04:00
svc_rpcb_cleanup ( serv , net ) ;
2011-10-25 15:17:28 +04:00
2008-06-10 16:40:37 +04:00
printk ( KERN_WARNING " nfsd: last server has exited, flushing export "
" cache \n " ) ;
2012-04-11 15:13:21 +04:00
nfsd_export_flush ( net ) ;
2006-10-02 13:17:44 +04:00
}
2006-10-02 13:17:46 +04:00
void nfsd_reset_versions ( void )
{
int found_one = 0 ;
int i ;
for ( i = NFSD_MINVERS ; i < NFSD_NRVERS ; i + + ) {
if ( nfsd_program . pg_vers [ i ] )
found_one = 1 ;
}
if ( ! found_one ) {
for ( i = NFSD_MINVERS ; i < NFSD_NRVERS ; i + + )
nfsd_program . pg_vers [ i ] = nfsd_version [ i ] ;
# if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
for ( i = NFSD_ACL_MINVERS ; i < NFSD_ACL_NRVERS ; i + + )
nfsd_acl_program . pg_vers [ i ] =
nfsd_acl_version [ i ] ;
# endif
}
}
2009-04-03 09:28:18 +04:00
/*
* Each session guarantees a negotiated per slot memory cache for replies
* which in turn consumes memory beyond the v2 / v3 / v4 .0 server . A dedicated
* NFSv4 .1 server might want to use more memory for a DRC than a machine
* with mutiple services .
*
* Impose a hard limit on the number of pages for the DRC which varies
* according to the machines free pages . This is of course only a default .
*
* For now this is a # defined shift which could be under admin control
* in the future .
*/
static void set_max_drc ( void )
{
2009-07-28 03:06:45 +04:00
# define NFSD_DRC_SIZE_SHIFT 10
2009-07-28 03:09:19 +04:00
nfsd_drc_max_mem = ( nr_free_buffer_pages ( )
> > NFSD_DRC_SIZE_SHIFT ) * PAGE_SIZE ;
nfsd_drc_mem_used = 0 ;
2009-06-24 23:37:45 +04:00
spin_lock_init ( & nfsd_drc_lock ) ;
2013-02-23 04:35:47 +04:00
dprintk ( " %s nfsd_drc_max_mem %lu \n " , __func__ , nfsd_drc_max_mem ) ;
2009-04-03 09:28:18 +04:00
}
2008-06-10 16:40:35 +04:00
2012-01-31 01:18:35 +04:00
static int nfsd_get_default_max_blksize ( void )
2006-10-02 13:17:46 +04:00
{
2012-01-31 01:18:35 +04:00
struct sysinfo i ;
unsigned long long target ;
unsigned long ret ;
2008-06-10 16:40:35 +04:00
2012-01-31 01:18:35 +04:00
si_meminfo ( & i ) ;
2012-01-31 01:21:11 +04:00
target = ( i . totalram - i . totalhigh ) < < PAGE_SHIFT ;
2012-01-31 01:18:35 +04:00
/*
* Aim for 1 / 4096 of memory per thread This gives 1 MB on 4 Gig
* machines , but only uses 32 K on 128 M machines . Bottom out at
* 8 K on 32 M and smaller . Of course , this is only a default .
*/
target > > = 12 ;
ret = NFSSVC_MAXBLKSIZE ;
while ( ret > target & & ret > = 8 * 1024 * 2 )
ret / = 2 ;
return ret ;
}
2012-12-10 13:19:20 +04:00
int nfsd_create_serv ( struct net * net )
2012-01-31 01:18:35 +04:00
{
2012-05-02 16:08:38 +04:00
int error ;
2012-12-06 15:23:19 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
2012-05-02 16:08:38 +04:00
2008-06-10 16:40:35 +04:00
WARN_ON ( ! mutex_is_locked ( & nfsd_mutex ) ) ;
2012-12-06 15:23:24 +04:00
if ( nn - > nfsd_serv ) {
svc_get ( nn - > nfsd_serv ) ;
2006-10-02 13:17:46 +04:00
return 0 ;
}
2012-01-31 01:18:35 +04:00
if ( nfsd_max_blksize = = 0 )
nfsd_max_blksize = nfsd_get_default_max_blksize ( ) ;
2010-08-06 23:48:03 +04:00
nfsd_reset_versions ( ) ;
2012-12-06 15:23:24 +04:00
nn - > nfsd_serv = svc_create_pooled ( & nfsd_program , nfsd_max_blksize ,
2008-06-10 16:40:39 +04:00
nfsd_last_thread , nfsd , THIS_MODULE ) ;
2012-12-06 15:23:24 +04:00
if ( nn - > nfsd_serv = = NULL )
2010-07-22 00:40:08 +04:00
return - ENOMEM ;
2008-06-10 16:40:35 +04:00
2012-12-06 15:23:24 +04:00
error = svc_bind ( nn - > nfsd_serv , net ) ;
2012-05-02 16:08:38 +04:00
if ( error < 0 ) {
2012-12-06 15:23:24 +04:00
svc_destroy ( nn - > nfsd_serv ) ;
2012-05-02 16:08:38 +04:00
return error ;
}
2010-07-22 00:40:08 +04:00
set_max_drc ( ) ;
2012-12-06 15:23:19 +04:00
do_gettimeofday ( & nn - > nfssvc_boot ) ; /* record boot time */
2012-01-31 01:18:35 +04:00
return 0 ;
2006-10-02 13:17:46 +04:00
}
2012-12-06 15:23:24 +04:00
int nfsd_nrpools ( struct net * net )
2006-10-02 13:18:02 +04:00
{
2012-12-06 15:23:24 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
if ( nn - > nfsd_serv = = NULL )
2006-10-02 13:18:02 +04:00
return 0 ;
else
2012-12-06 15:23:24 +04:00
return nn - > nfsd_serv - > sv_nrpools ;
2006-10-02 13:18:02 +04:00
}
2012-12-06 15:23:24 +04:00
int nfsd_get_nrthreads ( int n , int * nthreads , struct net * net )
2006-10-02 13:18:02 +04:00
{
int i = 0 ;
2012-12-06 15:23:24 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
2006-10-02 13:18:02 +04:00
2012-12-06 15:23:24 +04:00
if ( nn - > nfsd_serv ! = NULL ) {
for ( i = 0 ; i < nn - > nfsd_serv - > sv_nrpools & & i < n ; i + + )
nthreads [ i ] = nn - > nfsd_serv - > sv_pools [ i ] . sp_nrthreads ;
2006-10-02 13:18:02 +04:00
}
return 0 ;
}
2012-12-06 15:23:24 +04:00
void nfsd_destroy ( struct net * net )
{
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
int destroy = ( nn - > nfsd_serv - > sv_nrthreads = = 1 ) ;
if ( destroy )
svc_shutdown_net ( nn - > nfsd_serv , net ) ;
svc_destroy ( nn - > nfsd_serv ) ;
if ( destroy )
nn - > nfsd_serv = NULL ;
}
2012-12-10 13:19:30 +04:00
int nfsd_set_nrthreads ( int n , int * nthreads , struct net * net )
2006-10-02 13:18:02 +04:00
{
int i = 0 ;
int tot = 0 ;
int err = 0 ;
2012-12-06 15:23:24 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
2006-10-02 13:18:02 +04:00
2008-06-10 16:40:35 +04:00
WARN_ON ( ! mutex_is_locked ( & nfsd_mutex ) ) ;
2012-12-06 15:23:24 +04:00
if ( nn - > nfsd_serv = = NULL | | n < = 0 )
2006-10-02 13:18:02 +04:00
return 0 ;
2012-12-06 15:23:24 +04:00
if ( n > nn - > nfsd_serv - > sv_nrpools )
n = nn - > nfsd_serv - > sv_nrpools ;
2006-10-02 13:18:02 +04:00
/* enforce a global maximum number of threads */
tot = 0 ;
for ( i = 0 ; i < n ; i + + ) {
if ( nthreads [ i ] > NFSD_MAXSERVS )
nthreads [ i ] = NFSD_MAXSERVS ;
tot + = nthreads [ i ] ;
}
if ( tot > NFSD_MAXSERVS ) {
/* total too large: scale down requested numbers */
for ( i = 0 ; i < n & & tot > 0 ; i + + ) {
int new = nthreads [ i ] * NFSD_MAXSERVS / tot ;
tot - = ( nthreads [ i ] - new ) ;
nthreads [ i ] = new ;
}
for ( i = 0 ; i < n & & tot > 0 ; i + + ) {
nthreads [ i ] - - ;
tot - - ;
}
}
/*
* There must always be a thread in pool 0 ; the admin
* can ' t shut down NFS completely using pool_threads .
*/
if ( nthreads [ 0 ] = = 0 )
nthreads [ 0 ] = 1 ;
/* apply the new numbers */
2012-12-06 15:23:24 +04:00
svc_get ( nn - > nfsd_serv ) ;
2006-10-02 13:18:02 +04:00
for ( i = 0 ; i < n ; i + + ) {
2012-12-06 15:23:24 +04:00
err = svc_set_num_threads ( nn - > nfsd_serv , & nn - > nfsd_serv - > sv_pools [ i ] ,
2006-10-02 13:18:02 +04:00
nthreads [ i ] ) ;
if ( err )
break ;
}
2012-07-03 16:46:41 +04:00
nfsd_destroy ( net ) ;
2006-10-02 13:18:02 +04:00
return err ;
}
2010-07-20 22:10:22 +04:00
/*
* Adjust the number of threads and return the new number of threads .
* This is also the function that starts the server if necessary , if
* this is the first time nrservs is nonzero .
*/
2005-04-17 02:20:36 +04:00
int
2012-12-10 13:19:25 +04:00
nfsd_svc ( int nrservs , struct net * net )
2005-04-17 02:20:36 +04:00
{
int error ;
2010-08-02 22:12:44 +04:00
bool nfsd_up_before ;
2012-12-06 15:23:24 +04:00
struct nfsd_net * nn = net_generic ( net , nfsd_net_id ) ;
2008-06-10 16:40:35 +04:00
mutex_lock ( & nfsd_mutex ) ;
2006-10-02 13:17:46 +04:00
dprintk ( " nfsd: creating service \n " ) ;
2005-04-17 02:20:36 +04:00
if ( nrservs < = 0 )
nrservs = 0 ;
if ( nrservs > NFSD_MAXSERVS )
nrservs = NFSD_MAXSERVS ;
2009-06-16 05:03:20 +04:00
error = 0 ;
2012-12-06 15:23:24 +04:00
if ( nrservs = = 0 & & nn - > nfsd_serv = = NULL )
2009-06-16 05:03:20 +04:00
goto out ;
2012-12-10 13:19:20 +04:00
error = nfsd_create_serv ( net ) ;
2006-10-02 13:17:46 +04:00
if ( error )
2010-08-02 22:12:44 +04:00
goto out ;
2012-12-06 15:23:34 +04:00
nfsd_up_before = nn - > nfsd_net_up ;
2010-08-02 22:12:44 +04:00
2012-12-06 15:23:34 +04:00
error = nfsd_startup_net ( nrservs , net ) ;
2010-07-22 02:31:42 +04:00
if ( error )
goto out_destroy ;
2012-12-06 15:23:24 +04:00
error = svc_set_num_threads ( nn - > nfsd_serv , NULL , nrservs ) ;
2010-08-02 22:12:44 +04:00
if ( error )
goto out_shutdown ;
2012-12-06 15:23:24 +04:00
/* We are holding a reference to nn->nfsd_serv which
2010-07-22 02:31:42 +04:00
* we don ' t want to count in the return value ,
* so subtract 1
*/
2012-12-06 15:23:24 +04:00
error = nn - > nfsd_serv - > sv_nrthreads - 1 ;
2010-07-20 00:50:04 +04:00
out_shutdown :
2010-08-02 22:12:44 +04:00
if ( error < 0 & & ! nfsd_up_before )
2012-12-06 15:23:44 +04:00
nfsd_shutdown_net ( net ) ;
2010-08-02 22:12:44 +04:00
out_destroy :
2012-07-03 16:46:41 +04:00
nfsd_destroy ( net ) ; /* Release server */
2010-07-20 00:50:04 +04:00
out :
2008-06-10 16:40:35 +04:00
mutex_unlock ( & nfsd_mutex ) ;
2005-04-17 02:20:36 +04:00
return error ;
}
/*
* This is the NFS server kernel thread
*/
2008-06-10 16:40:38 +04:00
static int
nfsd ( void * vrqstp )
2005-04-17 02:20:36 +04:00
{
2008-06-10 16:40:38 +04:00
struct svc_rqst * rqstp = ( struct svc_rqst * ) vrqstp ;
2012-12-06 19:34:42 +04:00
struct svc_xprt * perm_sock = list_entry ( rqstp - > rq_server - > sv_permsocks . next , typeof ( struct svc_xprt ) , xpt_list ) ;
struct net * net = perm_sock - > xpt_net ;
2012-08-18 05:47:53 +04:00
int err ;
2005-04-17 02:20:36 +04:00
/* Lock module and set up kernel thread */
2008-06-10 16:40:35 +04:00
mutex_lock ( & nfsd_mutex ) ;
2005-04-17 02:20:36 +04:00
2008-06-10 16:40:38 +04:00
/* At this point, the thread shares current->fs
2005-04-17 02:20:36 +04:00
* with the init process . We need to create files with a
* umask of 0 instead of init ' s umask . */
2009-03-30 03:00:13 +04:00
if ( unshare_fs_struct ( ) < 0 ) {
2005-04-17 02:20:36 +04:00
printk ( " Unable to start nfsd thread: out of memory \n " ) ;
goto out ;
}
2009-03-30 03:00:13 +04:00
2005-04-17 02:20:36 +04:00
current - > fs - > umask = 0 ;
2008-06-10 16:40:38 +04:00
/*
* thread is spawned with all signals set to SIG_IGN , re - enable
2008-06-30 22:09:46 +04:00
* the ones that will bring down the thread
2008-06-10 16:40:38 +04:00
*/
2008-06-30 22:09:46 +04:00
allow_signal ( SIGKILL ) ;
allow_signal ( SIGHUP ) ;
allow_signal ( SIGINT ) ;
allow_signal ( SIGQUIT ) ;
2008-06-10 16:40:35 +04:00
2005-04-17 02:20:36 +04:00
nfsdstats . th_cnt + + ;
2008-06-10 16:40:35 +04:00
mutex_unlock ( & nfsd_mutex ) ;
2005-04-17 02:20:36 +04:00
/*
* We want less throttling in balance_dirty_pages ( ) so that nfs to
* localhost doesn ' t cause nfsd to lock up due to all the client ' s
* dirty pages .
*/
current - > flags | = PF_LESS_THROTTLE ;
2007-07-17 15:03:35 +04:00
set_freezable ( ) ;
2005-04-17 02:20:36 +04:00
/*
* The main request loop
*/
for ( ; ; ) {
/*
* Find a socket with data available and call its
* recvfrom routine .
*/
2006-10-02 13:17:50 +04:00
while ( ( err = svc_recv ( rqstp , 60 * 60 * HZ ) ) = = - EAGAIN )
2005-04-17 02:20:36 +04:00
;
2008-06-10 16:40:38 +04:00
if ( err = = - EINTR )
2005-04-17 02:20:36 +04:00
break ;
2009-09-02 12:13:40 +04:00
validate_process_creds ( ) ;
2006-10-02 13:17:50 +04:00
svc_process ( rqstp ) ;
2009-09-02 12:13:40 +04:00
validate_process_creds ( ) ;
2005-04-17 02:20:36 +04:00
}
2006-10-02 13:17:45 +04:00
/* Clear signals before calling svc_exit_thread() */
2005-04-17 02:26:37 +04:00
flush_signals ( current ) ;
2005-04-17 02:20:36 +04:00
2008-06-10 16:40:35 +04:00
mutex_lock ( & nfsd_mutex ) ;
2005-04-17 02:20:36 +04:00
nfsdstats . th_cnt - - ;
out :
2012-07-03 16:46:41 +04:00
rqstp - > rq_server = NULL ;
2012-05-04 12:49:41 +04:00
2005-04-17 02:20:36 +04:00
/* Release the thread */
svc_exit_thread ( rqstp ) ;
2012-12-06 19:34:42 +04:00
nfsd_destroy ( net ) ;
2012-07-03 16:46:41 +04:00
2005-04-17 02:20:36 +04:00
/* Release module */
2008-06-10 16:40:35 +04:00
mutex_unlock ( & nfsd_mutex ) ;
2005-04-17 02:20:36 +04:00
module_put_and_exit ( 0 ) ;
2008-06-10 16:40:38 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2007-07-17 15:04:48 +04:00
static __be32 map_new_errors ( u32 vers , __be32 nfserr )
{
if ( nfserr = = nfserr_jukebox & & vers = = 2 )
return nfserr_dropit ;
if ( nfserr = = nfserr_wrongsec & & vers < 4 )
return nfserr_acces ;
return nfserr ;
}
2005-04-17 02:20:36 +04:00
int
2006-10-20 10:29:02 +04:00
nfsd_dispatch ( struct svc_rqst * rqstp , __be32 * statp )
2005-04-17 02:20:36 +04:00
{
struct svc_procedure * proc ;
kxdrproc_t xdr ;
2006-10-20 10:28:55 +04:00
__be32 nfserr ;
__be32 * nfserrp ;
2005-04-17 02:20:36 +04:00
dprintk ( " nfsd_dispatch: vers %d proc %d \n " ,
rqstp - > rq_vers , rqstp - > rq_proc ) ;
proc = rqstp - > rq_procinfo ;
2011-01-24 20:11:02 +03:00
/*
* Give the xdr decoder a chance to change this if it wants
* ( necessary in the NFSv4 .0 compound case )
*/
rqstp - > rq_cachetype = proc - > pc_cachetype ;
/* Decode arguments */
xdr = proc - > pc_decode ;
if ( xdr & & ! xdr ( rqstp , ( __be32 * ) rqstp - > rq_arg . head [ 0 ] . iov_base ,
rqstp - > rq_argp ) ) {
dprintk ( " nfsd: failed to decode arguments! \n " ) ;
* statp = rpc_garbage_args ;
return 1 ;
}
2005-04-17 02:20:36 +04:00
/* Check whether we have this call in the cache. */
2011-01-24 20:11:02 +03:00
switch ( nfsd_cache_lookup ( rqstp ) ) {
2005-04-17 02:20:36 +04:00
case RC_DROPIT :
return 0 ;
case RC_REPLY :
return 1 ;
case RC_DOIT : ;
/* do it */
}
/* need to grab the location to store the status, as
* nfsv4 does some encoding while processing
*/
nfserrp = rqstp - > rq_res . head [ 0 ] . iov_base
+ rqstp - > rq_res . head [ 0 ] . iov_len ;
2006-10-20 10:28:55 +04:00
rqstp - > rq_res . head [ 0 ] . iov_len + = sizeof ( __be32 ) ;
2005-04-17 02:20:36 +04:00
/* Now call the procedure handler, and encode NFS status. */
nfserr = proc - > pc_func ( rqstp , rqstp - > rq_argp , rqstp - > rq_resp ) ;
2007-07-17 15:04:48 +04:00
nfserr = map_new_errors ( rqstp - > rq_vers , nfserr ) ;
2011-01-03 05:56:36 +03:00
if ( nfserr = = nfserr_dropit | | rqstp - > rq_dropme ) {
2007-06-23 01:26:32 +04:00
dprintk ( " nfsd: Dropping request; may be revisited later \n " ) ;
2005-04-17 02:20:36 +04:00
nfsd_cache_update ( rqstp , RC_NOCACHE , NULL ) ;
return 0 ;
}
if ( rqstp - > rq_proc ! = 0 )
* nfserrp + + = nfserr ;
/* Encode result.
* For NFSv2 , additional info is never returned in case of an error .
*/
if ( ! ( nfserr & & rqstp - > rq_vers = = 2 ) ) {
xdr = proc - > pc_encode ;
if ( xdr & & ! xdr ( rqstp , nfserrp ,
rqstp - > rq_resp ) ) {
/* Failed to encode result. Release cache entry */
dprintk ( " nfsd: failed to encode result! \n " ) ;
nfsd_cache_update ( rqstp , RC_NOCACHE , NULL ) ;
* statp = rpc_system_err ;
return 1 ;
}
}
/* Store reply in cache. */
2012-11-17 00:22:43 +04:00
nfsd_cache_update ( rqstp , rqstp - > rq_cachetype , statp + 1 ) ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
2009-01-13 13:26:36 +03:00
int nfsd_pool_stats_open ( struct inode * inode , struct file * file )
{
2009-08-15 19:54:41 +04:00
int ret ;
2013-02-01 16:56:12 +04:00
struct nfsd_net * nn = net_generic ( inode - > i_sb - > s_fs_info , nfsd_net_id ) ;
2012-12-06 15:23:24 +04:00
2009-08-15 19:54:41 +04:00
mutex_lock ( & nfsd_mutex ) ;
2012-12-06 15:23:24 +04:00
if ( nn - > nfsd_serv = = NULL ) {
2009-08-15 19:54:41 +04:00
mutex_unlock ( & nfsd_mutex ) ;
2009-01-13 13:26:36 +03:00
return - ENODEV ;
2009-08-15 19:54:41 +04:00
}
/* bump up the psudo refcount while traversing */
2012-12-06 15:23:24 +04:00
svc_get ( nn - > nfsd_serv ) ;
ret = svc_pool_stats_open ( nn - > nfsd_serv , file ) ;
2009-08-15 19:54:41 +04:00
mutex_unlock ( & nfsd_mutex ) ;
return ret ;
}
int nfsd_pool_stats_release ( struct inode * inode , struct file * file )
{
int ret = seq_release ( inode , file ) ;
2013-02-01 16:56:12 +04:00
struct net * net = inode - > i_sb - > s_fs_info ;
2012-05-04 12:49:41 +04:00
2009-08-15 19:54:41 +04:00
mutex_lock ( & nfsd_mutex ) ;
/* this function really, really should have been called svc_put() */
2012-07-03 16:46:41 +04:00
nfsd_destroy ( net ) ;
2009-08-15 19:54:41 +04:00
mutex_unlock ( & nfsd_mutex ) ;
return ret ;
2009-01-13 13:26:36 +03:00
}