inetpeer: add namespace support for inetpeer
now inetpeer doesn't support namespace,the information will be leaking across namespace. this patch move the global vars v4_peers and v6_peers to netns_ipv4 and netns_ipv6 as a field peers. add struct pernet_operations inetpeer_ops to initial pernet inetpeer data. and change family_to_base and inet_getpeer to support namespace. Signed-off-by: Gao feng <gaofeng@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1578e7778f
commit
c8a627ed06
@ -75,7 +75,9 @@ static inline bool inet_metrics_new(const struct inet_peer *p)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* can be called with or without local BH being disabled */
|
/* can be called with or without local BH being disabled */
|
||||||
struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create);
|
struct inet_peer *inet_getpeer(struct net *net,
|
||||||
|
const struct inetpeer_addr *daddr,
|
||||||
|
int create);
|
||||||
|
|
||||||
static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
|
static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
|
||||||
{
|
{
|
||||||
@ -83,7 +85,7 @@ static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
|
|||||||
|
|
||||||
daddr.addr.a4 = v4daddr;
|
daddr.addr.a4 = v4daddr;
|
||||||
daddr.family = AF_INET;
|
daddr.family = AF_INET;
|
||||||
return inet_getpeer(&daddr, create);
|
return inet_getpeer(&init_net, &daddr, create);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, int create)
|
static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, int create)
|
||||||
@ -92,14 +94,14 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
|
|||||||
|
|
||||||
*(struct in6_addr *)daddr.addr.a6 = *v6daddr;
|
*(struct in6_addr *)daddr.addr.a6 = *v6daddr;
|
||||||
daddr.family = AF_INET6;
|
daddr.family = AF_INET6;
|
||||||
return inet_getpeer(&daddr, create);
|
return inet_getpeer(&init_net, &daddr, create);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* can be called from BH context or outside */
|
/* can be called from BH context or outside */
|
||||||
extern void inet_putpeer(struct inet_peer *p);
|
extern void inet_putpeer(struct inet_peer *p);
|
||||||
extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
|
extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
|
||||||
|
|
||||||
extern void inetpeer_invalidate_tree(int family);
|
extern void inetpeer_invalidate_tree(struct net *net, int family);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
|
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
|
||||||
|
@ -30,7 +30,7 @@ struct netns_ipv4 {
|
|||||||
|
|
||||||
struct sock **icmp_sk;
|
struct sock **icmp_sk;
|
||||||
struct sock *tcp_sock;
|
struct sock *tcp_sock;
|
||||||
|
struct inet_peer_base *peers;
|
||||||
struct netns_frags frags;
|
struct netns_frags frags;
|
||||||
#ifdef CONFIG_NETFILTER
|
#ifdef CONFIG_NETFILTER
|
||||||
struct xt_table *iptable_filter;
|
struct xt_table *iptable_filter;
|
||||||
|
@ -33,6 +33,7 @@ struct netns_ipv6 {
|
|||||||
struct netns_sysctl_ipv6 sysctl;
|
struct netns_sysctl_ipv6 sysctl;
|
||||||
struct ipv6_devconf *devconf_all;
|
struct ipv6_devconf *devconf_all;
|
||||||
struct ipv6_devconf *devconf_dflt;
|
struct ipv6_devconf *devconf_dflt;
|
||||||
|
struct inet_peer_base *peers;
|
||||||
struct netns_frags frags;
|
struct netns_frags frags;
|
||||||
#ifdef CONFIG_NETFILTER
|
#ifdef CONFIG_NETFILTER
|
||||||
struct xt_table *ip6table_filter;
|
struct xt_table *ip6table_filter;
|
||||||
|
@ -88,18 +88,6 @@ struct inet_peer_base {
|
|||||||
int total;
|
int total;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct inet_peer_base v4_peers = {
|
|
||||||
.root = peer_avl_empty_rcu,
|
|
||||||
.lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
|
|
||||||
.total = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct inet_peer_base v6_peers = {
|
|
||||||
.root = peer_avl_empty_rcu,
|
|
||||||
.lock = __SEQLOCK_UNLOCKED(v6_peers.lock),
|
|
||||||
.total = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
|
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
|
||||||
|
|
||||||
/* Exported for sysctl_net_ipv4. */
|
/* Exported for sysctl_net_ipv4. */
|
||||||
@ -153,6 +141,46 @@ static void inetpeer_gc_worker(struct work_struct *work)
|
|||||||
schedule_delayed_work(&gc_work, gc_delay);
|
schedule_delayed_work(&gc_work, gc_delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __net_init inetpeer_net_init(struct net *net)
|
||||||
|
{
|
||||||
|
net->ipv4.peers = kzalloc(sizeof(struct inet_peer_base),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (net->ipv4.peers == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
net->ipv4.peers->root = peer_avl_empty_rcu;
|
||||||
|
seqlock_init(&net->ipv4.peers->lock);
|
||||||
|
|
||||||
|
net->ipv6.peers = kzalloc(sizeof(struct inet_peer_base),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (net->ipv6.peers == NULL)
|
||||||
|
goto out_ipv6;
|
||||||
|
|
||||||
|
net->ipv6.peers->root = peer_avl_empty_rcu;
|
||||||
|
seqlock_init(&net->ipv6.peers->lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
out_ipv6:
|
||||||
|
kfree(net->ipv4.peers);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __net_exit inetpeer_net_exit(struct net *net)
|
||||||
|
{
|
||||||
|
inetpeer_invalidate_tree(net, AF_INET);
|
||||||
|
kfree(net->ipv4.peers);
|
||||||
|
net->ipv4.peers = NULL;
|
||||||
|
|
||||||
|
inetpeer_invalidate_tree(net, AF_INET6);
|
||||||
|
kfree(net->ipv6.peers);
|
||||||
|
net->ipv6.peers = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct pernet_operations inetpeer_ops = {
|
||||||
|
.init = inetpeer_net_init,
|
||||||
|
.exit = inetpeer_net_exit,
|
||||||
|
};
|
||||||
|
|
||||||
/* Called from ip_output.c:ip_init */
|
/* Called from ip_output.c:ip_init */
|
||||||
void __init inet_initpeers(void)
|
void __init inet_initpeers(void)
|
||||||
{
|
{
|
||||||
@ -177,6 +205,7 @@ void __init inet_initpeers(void)
|
|||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
|
INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
|
||||||
|
register_pernet_subsys(&inetpeer_ops);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int addr_compare(const struct inetpeer_addr *a,
|
static int addr_compare(const struct inetpeer_addr *a,
|
||||||
@ -401,9 +430,10 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
|
|||||||
call_rcu(&p->rcu, inetpeer_free_rcu);
|
call_rcu(&p->rcu, inetpeer_free_rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct inet_peer_base *family_to_base(int family)
|
static struct inet_peer_base *family_to_base(struct net *net,
|
||||||
|
int family)
|
||||||
{
|
{
|
||||||
return family == AF_INET ? &v4_peers : &v6_peers;
|
return family == AF_INET ? net->ipv4.peers : net->ipv6.peers;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* perform garbage collect on all items stacked during a lookup */
|
/* perform garbage collect on all items stacked during a lookup */
|
||||||
@ -443,10 +473,12 @@ static int inet_peer_gc(struct inet_peer_base *base,
|
|||||||
return cnt;
|
return cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
|
struct inet_peer *inet_getpeer(struct net *net,
|
||||||
|
const struct inetpeer_addr *daddr,
|
||||||
|
int create)
|
||||||
{
|
{
|
||||||
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
|
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
|
||||||
struct inet_peer_base *base = family_to_base(daddr->family);
|
struct inet_peer_base *base = family_to_base(net, daddr->family);
|
||||||
struct inet_peer *p;
|
struct inet_peer *p;
|
||||||
unsigned int sequence;
|
unsigned int sequence;
|
||||||
int invalidated, gccnt = 0;
|
int invalidated, gccnt = 0;
|
||||||
@ -571,10 +603,10 @@ static void inetpeer_inval_rcu(struct rcu_head *head)
|
|||||||
schedule_delayed_work(&gc_work, gc_delay);
|
schedule_delayed_work(&gc_work, gc_delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
void inetpeer_invalidate_tree(int family)
|
void inetpeer_invalidate_tree(struct net *net, int family)
|
||||||
{
|
{
|
||||||
struct inet_peer *old, *new, *prev;
|
struct inet_peer *old, *new, *prev;
|
||||||
struct inet_peer_base *base = family_to_base(family);
|
struct inet_peer_base *base = family_to_base(net, family);
|
||||||
|
|
||||||
write_seqlock_bh(&base->lock);
|
write_seqlock_bh(&base->lock);
|
||||||
|
|
||||||
|
@ -938,7 +938,7 @@ static void rt_cache_invalidate(struct net *net)
|
|||||||
|
|
||||||
get_random_bytes(&shuffle, sizeof(shuffle));
|
get_random_bytes(&shuffle, sizeof(shuffle));
|
||||||
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
|
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
|
||||||
inetpeer_invalidate_tree(AF_INET);
|
inetpeer_invalidate_tree(net, AF_INET);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user