Merge branch 'inet-fix-defrag-units-dismantle-races'
Eric Dumazet says: ==================== inet: fix defrag units dismantle races This series add a new pre_exit() method to struct pernet_operations to solve a race in defrag units dismantle, without adding extra delays to netns dismantles. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6c9bef32c6
@ -20,7 +20,7 @@ struct fqdir {
|
||||
|
||||
/* Keep atomic mem on separate cachelines in structs that include it */
|
||||
atomic_long_t mem ____cacheline_aligned_in_smp;
|
||||
struct rcu_work destroy_rwork;
|
||||
struct work_struct destroy_work;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -113,6 +113,12 @@ int inet_frags_init(struct inet_frags *);
|
||||
void inet_frags_fini(struct inet_frags *);
|
||||
|
||||
int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
|
||||
|
||||
static void inline fqdir_pre_exit(struct fqdir *fqdir)
|
||||
{
|
||||
fqdir->high_thresh = 0; /* prevent creation of new frags */
|
||||
fqdir->dead = true;
|
||||
}
|
||||
void fqdir_exit(struct fqdir *fqdir);
|
||||
|
||||
void inet_frag_kill(struct inet_frag_queue *q);
|
||||
|
@ -67,6 +67,8 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
|
||||
struct sk_buff *head;
|
||||
|
||||
rcu_read_lock();
|
||||
if (fq->q.fqdir->dead)
|
||||
goto out_rcu_unlock;
|
||||
spin_lock(&fq->q.lock);
|
||||
|
||||
if (fq->q.flags & INET_FRAG_COMPLETE)
|
||||
|
@ -355,8 +355,13 @@ struct pernet_operations {
|
||||
* synchronize_rcu() related to these pernet_operations,
|
||||
* instead of separate synchronize_rcu() for every net.
|
||||
* Please, avoid synchronize_rcu() at all, where it's possible.
|
||||
*
|
||||
* Note that a combination of pre_exit() and exit() can
|
||||
* be used, since a synchronize_rcu() is guaranteed between
|
||||
* the calls.
|
||||
*/
|
||||
int (*init)(struct net *net);
|
||||
void (*pre_exit)(struct net *net);
|
||||
void (*exit)(struct net *net);
|
||||
void (*exit_batch)(struct list_head *net_exit_list);
|
||||
unsigned int *id;
|
||||
|
@ -145,6 +145,17 @@ static void ops_free(const struct pernet_operations *ops, struct net *net)
|
||||
}
|
||||
}
|
||||
|
||||
static void ops_pre_exit_list(const struct pernet_operations *ops,
|
||||
struct list_head *net_exit_list)
|
||||
{
|
||||
struct net *net;
|
||||
|
||||
if (ops->pre_exit) {
|
||||
list_for_each_entry(net, net_exit_list, exit_list)
|
||||
ops->pre_exit(net);
|
||||
}
|
||||
}
|
||||
|
||||
static void ops_exit_list(const struct pernet_operations *ops,
|
||||
struct list_head *net_exit_list)
|
||||
{
|
||||
@ -328,6 +339,12 @@ out_undo:
|
||||
* for the pernet modules whose init functions did not fail.
|
||||
*/
|
||||
list_add(&net->exit_list, &net_exit_list);
|
||||
saved_ops = ops;
|
||||
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
saved_ops = ops;
|
||||
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
|
||||
ops_exit_list(ops, &net_exit_list);
|
||||
@ -541,10 +558,15 @@ static void cleanup_net(struct work_struct *work)
|
||||
list_add_tail(&net->exit_list, &net_exit_list);
|
||||
}
|
||||
|
||||
/* Run all of the network namespace pre_exit methods */
|
||||
list_for_each_entry_reverse(ops, &pernet_list, list)
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
|
||||
/*
|
||||
* Another CPU might be rcu-iterating the list, wait for it.
|
||||
* This needs to be before calling the exit() notifiers, so
|
||||
* the rcu_barrier() below isn't sufficient alone.
|
||||
* Also the pre_exit() and exit() methods need this barrier.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
@ -1101,6 +1123,8 @@ static int __register_pernet_operations(struct list_head *list,
|
||||
out_undo:
|
||||
/* If I have an error cleanup all namespaces I initialized */
|
||||
list_del(&ops->list);
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
synchronize_rcu();
|
||||
ops_exit_list(ops, &net_exit_list);
|
||||
ops_free_list(ops, &net_exit_list);
|
||||
return error;
|
||||
@ -1115,6 +1139,8 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
|
||||
/* See comment in __register_pernet_operations() */
|
||||
for_each_net(net)
|
||||
list_add_tail(&net->exit_list, &net_exit_list);
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
synchronize_rcu();
|
||||
ops_exit_list(ops, &net_exit_list);
|
||||
ops_free_list(ops, &net_exit_list);
|
||||
}
|
||||
@ -1139,6 +1165,8 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
|
||||
} else {
|
||||
LIST_HEAD(net_exit_list);
|
||||
list_add(&init_net.exit_list, &net_exit_list);
|
||||
ops_pre_exit_list(ops, &net_exit_list);
|
||||
synchronize_rcu();
|
||||
ops_exit_list(ops, &net_exit_list);
|
||||
ops_free_list(ops, &net_exit_list);
|
||||
}
|
||||
|
@ -459,6 +459,14 @@ static int __net_init lowpan_frags_init_net(struct net *net)
|
||||
return res;
|
||||
}
|
||||
|
||||
static void __net_exit lowpan_frags_pre_exit_net(struct net *net)
|
||||
{
|
||||
struct netns_ieee802154_lowpan *ieee802154_lowpan =
|
||||
net_ieee802154_lowpan(net);
|
||||
|
||||
fqdir_pre_exit(ieee802154_lowpan->fqdir);
|
||||
}
|
||||
|
||||
static void __net_exit lowpan_frags_exit_net(struct net *net)
|
||||
{
|
||||
struct netns_ieee802154_lowpan *ieee802154_lowpan =
|
||||
@ -469,8 +477,9 @@ static void __net_exit lowpan_frags_exit_net(struct net *net)
|
||||
}
|
||||
|
||||
static struct pernet_operations lowpan_frags_ops = {
|
||||
.init = lowpan_frags_init_net,
|
||||
.exit = lowpan_frags_exit_net,
|
||||
.init = lowpan_frags_init_net,
|
||||
.pre_exit = lowpan_frags_pre_exit_net,
|
||||
.exit = lowpan_frags_exit_net,
|
||||
};
|
||||
|
||||
static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
|
||||
|
@ -145,10 +145,9 @@ static void inet_frags_free_cb(void *ptr, void *arg)
|
||||
inet_frag_destroy(fq);
|
||||
}
|
||||
|
||||
static void fqdir_rwork_fn(struct work_struct *work)
|
||||
static void fqdir_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct fqdir *fqdir = container_of(to_rcu_work(work),
|
||||
struct fqdir, destroy_rwork);
|
||||
struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
|
||||
struct inet_frags *f = fqdir->f;
|
||||
|
||||
rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
|
||||
@ -187,18 +186,8 @@ EXPORT_SYMBOL(fqdir_init);
|
||||
|
||||
void fqdir_exit(struct fqdir *fqdir)
|
||||
{
|
||||
fqdir->high_thresh = 0; /* prevent creation of new frags */
|
||||
|
||||
fqdir->dead = true;
|
||||
|
||||
/* call_rcu is supposed to provide memory barrier semantics,
|
||||
* separating the setting of fqdir->dead with the destruction
|
||||
* work. This implicit barrier is paired with inet_frag_kill().
|
||||
*/
|
||||
|
||||
INIT_RCU_WORK(&fqdir->destroy_rwork, fqdir_rwork_fn);
|
||||
queue_rcu_work(system_wq, &fqdir->destroy_rwork);
|
||||
|
||||
INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
|
||||
queue_work(system_wq, &fqdir->destroy_work);
|
||||
}
|
||||
EXPORT_SYMBOL(fqdir_exit);
|
||||
|
||||
|
@ -143,6 +143,10 @@ static void ip_expire(struct timer_list *t)
|
||||
net = qp->q.fqdir->net;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
if (qp->q.fqdir->dead)
|
||||
goto out_rcu_unlock;
|
||||
|
||||
spin_lock(&qp->q.lock);
|
||||
|
||||
if (qp->q.flags & INET_FRAG_COMPLETE)
|
||||
@ -676,6 +680,11 @@ static int __net_init ipv4_frags_init_net(struct net *net)
|
||||
return res;
|
||||
}
|
||||
|
||||
static void __net_exit ipv4_frags_pre_exit_net(struct net *net)
|
||||
{
|
||||
fqdir_pre_exit(net->ipv4.fqdir);
|
||||
}
|
||||
|
||||
static void __net_exit ipv4_frags_exit_net(struct net *net)
|
||||
{
|
||||
ip4_frags_ns_ctl_unregister(net);
|
||||
@ -683,8 +692,9 @@ static void __net_exit ipv4_frags_exit_net(struct net *net)
|
||||
}
|
||||
|
||||
static struct pernet_operations ip4_frags_ops = {
|
||||
.init = ipv4_frags_init_net,
|
||||
.exit = ipv4_frags_exit_net,
|
||||
.init = ipv4_frags_init_net,
|
||||
.pre_exit = ipv4_frags_pre_exit_net,
|
||||
.exit = ipv4_frags_exit_net,
|
||||
};
|
||||
|
||||
|
||||
|
@ -499,6 +499,11 @@ static int nf_ct_net_init(struct net *net)
|
||||
return res;
|
||||
}
|
||||
|
||||
static void nf_ct_net_pre_exit(struct net *net)
|
||||
{
|
||||
fqdir_pre_exit(net->nf_frag.fqdir);
|
||||
}
|
||||
|
||||
static void nf_ct_net_exit(struct net *net)
|
||||
{
|
||||
nf_ct_frags6_sysctl_unregister(net);
|
||||
@ -506,8 +511,9 @@ static void nf_ct_net_exit(struct net *net)
|
||||
}
|
||||
|
||||
static struct pernet_operations nf_ct_net_ops = {
|
||||
.init = nf_ct_net_init,
|
||||
.exit = nf_ct_net_exit,
|
||||
.init = nf_ct_net_init,
|
||||
.pre_exit = nf_ct_net_pre_exit,
|
||||
.exit = nf_ct_net_exit,
|
||||
};
|
||||
|
||||
static const struct rhashtable_params nfct_rhash_params = {
|
||||
|
@ -520,6 +520,11 @@ static int __net_init ipv6_frags_init_net(struct net *net)
|
||||
return res;
|
||||
}
|
||||
|
||||
static void __net_exit ipv6_frags_pre_exit_net(struct net *net)
|
||||
{
|
||||
fqdir_pre_exit(net->ipv6.fqdir);
|
||||
}
|
||||
|
||||
static void __net_exit ipv6_frags_exit_net(struct net *net)
|
||||
{
|
||||
ip6_frags_ns_sysctl_unregister(net);
|
||||
@ -527,8 +532,9 @@ static void __net_exit ipv6_frags_exit_net(struct net *net)
|
||||
}
|
||||
|
||||
static struct pernet_operations ip6_frags_ops = {
|
||||
.init = ipv6_frags_init_net,
|
||||
.exit = ipv6_frags_exit_net,
|
||||
.init = ipv6_frags_init_net,
|
||||
.pre_exit = ipv6_frags_pre_exit_net,
|
||||
.exit = ipv6_frags_exit_net,
|
||||
};
|
||||
|
||||
static const struct rhashtable_params ip6_rhash_params = {
|
||||
|
Loading…
x
Reference in New Issue
Block a user