2019-05-19 16:51:31 +03:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2012-03-21 17:52:05 +04:00
/*
* per net namespace data structures for nfsd
*
* Copyright ( C ) 2012 , Jeff Layton < jlayton @ redhat . com >
*/
# ifndef __NFSD_NETNS_H__
# define __NFSD_NETNS_H__
# include <net/net_namespace.h>
# include <net/netns/generic.h>
2021-01-06 10:52:35 +03:00
# include <linux/percpu_counter.h>
2021-12-29 22:43:16 +03:00
# include <linux/siphash.h>
2012-03-21 17:52:05 +04:00
2012-11-14 19:21:16 +04:00
/* Hash tables for nfs4_clientid state */
# define CLIENT_HASH_BITS 4
# define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
# define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
2012-11-14 19:21:51 +04:00
# define SESSION_HASH_SIZE 512
2012-03-21 17:52:05 +04:00
struct cld_net ;
2012-12-04 15:29:27 +04:00
struct nfsd4_client_tracking_ops ;
2012-03-21 17:52:05 +04:00
2021-01-06 10:52:35 +03:00
enum {
/* cache misses due only to checksum comparison failures */
NFSD_NET_PAYLOAD_MISSES ,
/* amount of memory (in bytes) currently consumed by the DRC */
NFSD_NET_DRC_MEM_USAGE ,
NFSD_NET_COUNTERS_NUM
} ;
2014-08-05 23:13:30 +04:00
/*
* Represents a nfsd " container " . With respect to nfsv4 state tracking , the
* fields of interest are the * _id_hashtbls and the * _name_tree . These track
* the nfs4_client objects by either short or long form clientid .
*
* Each nfsd_net runs a nfs4_laundromat workqueue job when necessary to clean
* up expired clients and delegations within the container .
*/
2012-03-21 17:52:05 +04:00
struct nfsd_net {
struct cld_net * cld_net ;
2012-04-11 15:13:21 +04:00
2012-04-11 15:13:28 +04:00
struct cache_detail * svc_expkey_cache ;
2012-04-11 15:13:21 +04:00
struct cache_detail * svc_export_cache ;
2012-04-11 17:32:51 +04:00
struct cache_detail * idtoname_cache ;
2012-04-11 17:32:58 +04:00
struct cache_detail * nametoid_cache ;
2012-07-25 16:56:58 +04:00
struct lock_manager nfsd4_manager ;
2012-07-25 16:57:37 +04:00
bool grace_ended ;
2019-10-31 17:53:13 +03:00
time64_t boot_time ;
2012-11-14 19:21:16 +04:00
2019-03-22 18:11:06 +03:00
struct dentry * nfsd_client_dir ;
2012-11-14 19:21:16 +04:00
/*
* reclaim_str_hashtbl [ ] holds known client info from previous reset / reboot
* used in reboot / reset lease grace period processing
2012-11-14 19:21:26 +04:00
*
* conf_id_hashtbl [ ] , and conf_name_tree hold confirmed
* setclientid_confirmed info .
2012-11-14 19:21:36 +04:00
*
* unconf_str_hastbl [ ] and unconf_name_tree hold unconfirmed
* setclientid info .
2012-11-14 19:21:16 +04:00
*/
struct list_head * reclaim_str_hashtbl ;
int reclaim_str_hashtbl_size ;
2012-11-14 19:21:21 +04:00
struct list_head * conf_id_hashtbl ;
2012-11-14 19:21:26 +04:00
struct rb_root conf_name_tree ;
2012-11-14 19:21:31 +04:00
struct list_head * unconf_id_hashtbl ;
2012-11-14 19:21:36 +04:00
struct rb_root unconf_name_tree ;
2012-11-14 19:21:51 +04:00
struct list_head * sessionid_hashtbl ;
2012-11-14 19:21:56 +04:00
/*
* client_lru holds client queue ordered by nfs4_client . cl_time
* for lease renewal .
2012-11-14 19:22:01 +04:00
*
* close_lru holds ( open ) stateowner queue ordered by nfs4_stateowner . so_time
* for last close replay .
*
* All of the above fields are protected by the client_mutex .
2012-11-14 19:21:56 +04:00
*/
struct list_head client_lru ;
2012-11-14 19:22:01 +04:00
struct list_head close_lru ;
2013-03-21 23:19:33 +04:00
struct list_head del_recall_lru ;
2016-10-20 16:34:31 +03:00
/* protected by blocked_locks_lock */
2016-09-16 23:28:25 +03:00
struct list_head blocked_locks_lru ;
2012-11-14 19:22:17 +04:00
struct delayed_work laundromat_work ;
2012-11-26 16:21:58 +04:00
/* client_lock protects the client lru list and session hash table */
spinlock_t client_lock ;
2012-11-26 17:16:25 +04:00
2016-10-20 16:34:31 +03:00
/* protects blocked_locks_lru */
spinlock_t blocked_locks_lock ;
2012-11-26 17:16:25 +04:00
struct file * rec_file ;
2012-11-26 17:16:30 +04:00
bool in_grace ;
2015-11-22 10:22:10 +03:00
const struct nfsd4_client_tracking_ops * client_tracking_ops ;
2012-11-27 15:11:44 +04:00
2019-11-04 18:31:52 +03:00
time64_t nfsd4_lease ;
time64_t nfsd4_grace ;
2018-06-08 19:28:47 +03:00
bool somebody_reclaimed ;
2012-12-06 15:23:14 +04:00
2019-03-27 01:06:28 +03:00
bool track_reclaim_completes ;
atomic_t nr_reclaim_complete ;
2012-12-06 15:23:14 +04:00
bool nfsd_net_up ;
2013-12-31 09:17:30 +04:00
bool lockd_up ;
2012-12-06 15:23:19 +04:00
2021-12-29 22:43:16 +03:00
seqlock_t writeverf_lock ;
unsigned char writeverf [ 8 ] ;
2012-12-06 15:23:24 +04:00
2014-07-03 00:11:22 +04:00
/*
* Max number of connections this nfsd container will allow . Defaults
* to ' 0 ' which is means that it bases this on the number of threads .
*/
unsigned int max_connections ;
2019-03-14 23:20:19 +03:00
u32 clientid_base ;
2014-07-30 16:27:15 +04:00
u32 clientid_counter ;
2015-07-18 02:33:31 +03:00
u32 clverifier_counter ;
2014-07-30 16:27:15 +04:00
2012-12-06 15:23:24 +04:00
struct svc_serv * nfsd_serv ;
2021-11-29 07:51:25 +03:00
/* When a listening socket is added to nfsd, keep_active is set
* and this justifies a reference on nfsd_serv . This stops
* nfsd_serv from being freed . When the number of threads is
* set , keep_active is cleared and the reference is dropped . So
* when the last thread exits , the service will be destroyed .
*/
int keep_active ;
2017-11-10 10:19:35 +03:00
2018-07-21 01:19:20 +03:00
/*
* clientid and stateid data for construction of net unique COPY
* stateids .
*/
u32 s2s_cp_cl_id ;
struct idr s2s_cp_stateids ;
spinlock_t s2s_cp_lock ;
2019-04-09 18:46:19 +03:00
/*
* Version information
*/
bool * nfsd_versions ;
bool * nfsd4_minorversions ;
2019-05-17 16:03:38 +03:00
/*
* Duplicate reply cache
*/
struct nfsd_drc_bucket * drc_hashtbl ;
/* max number of entries allowed in the cache */
unsigned int max_drc_entries ;
/* number of significant bits in the hash value */
unsigned int maskbits ;
unsigned int drc_hashsize ;
/*
2019-05-17 23:22:18 +03:00
* Stats and other tracking of on the duplicate reply cache .
2021-01-06 10:52:35 +03:00
* The longest_chain * fields are modified with only the per - bucket
* cache lock , which isn ' t really safe and should be fixed if we want
* these statistics to be completely accurate .
2019-05-17 16:03:38 +03:00
*/
/* total number of entries */
atomic_t num_drc_entries ;
2021-01-06 10:52:35 +03:00
/* Per-netns stats counters */
struct percpu_counter counter [ NFSD_NET_COUNTERS_NUM ] ;
2019-05-17 16:03:38 +03:00
/* longest hash chain seen */
unsigned int longest_chain ;
/* size of cache when we saw the longest hash chain */
unsigned int longest_chain_cachesize ;
struct shrinker nfsd_reply_cache_shrinker ;
2021-05-21 22:09:37 +03:00
/* tracking server-to-server copy mounts */
spinlock_t nfsd_ssc_lock ;
struct list_head nfsd_ssc_mount_list ;
wait_queue_head_t nfsd_ssc_waitq ;
2020-07-20 03:14:03 +03:00
/* utsname taken from the process that starts the server */
2020-02-19 23:52:15 +03:00
char nfsd_name [ UNX_MAXNODENAME + 1 ] ;
2021-12-01 02:58:14 +03:00
struct nfsd_fcache_disposal * fcache_disposal ;
2021-12-29 22:43:16 +03:00
siphash_key_t siphash_key ;
2012-03-21 17:52:05 +04:00
} ;
2012-11-29 20:40:39 +04:00
/* Simple check to find out if a given net was properly initialized */
# define nfsd_netns_ready(nn) ((nn)->sessionid_hashtbl)
2019-04-09 18:46:19 +03:00
extern void nfsd_netns_free_versions ( struct nfsd_net * nn ) ;
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 04:58:21 +03:00
extern unsigned int nfsd_net_id ;
2019-09-02 20:02:56 +03:00
2021-12-30 18:22:05 +03:00
void nfsd_copy_write_verifier ( __be32 verf [ 2 ] , struct nfsd_net * nn ) ;
void nfsd_reset_write_verifier ( struct nfsd_net * nn ) ;
2012-03-21 17:52:05 +04:00
# endif /* __NFSD_NETNS_H__ */