From 2ed3bf188b33630cf9d93b996ebf001847a00b5a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:16 +0200 Subject: [PATCH 01/17] netfilter: ecache: use dedicated list for event redelivery This disentangles event redelivery and the percpu dying list. Because entries are now stored on a dedicated list, all entries are in NFCT_ECACHE_DESTROY_FAIL state and all entries still have confirmed bit set -- the reference count is at least 1. The 'struct net' back-pointer can be removed as well. The pcpu dying list will be removed eventually, it has no functionality. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 3 +- include/net/netfilter/nf_conntrack_ecache.h | 2 - net/netfilter/nf_conntrack_core.c | 33 +++++- net/netfilter/nf_conntrack_ecache.c | 115 +++++++++----------- 4 files changed, 81 insertions(+), 72 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 69e6c6a218be..28672a944499 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -45,7 +45,8 @@ union nf_conntrack_expect_proto { struct nf_conntrack_net_ecache { struct delayed_work dwork; - struct netns_ct *ct_net; + spinlock_t dying_lock; + struct hlist_nulls_head dying_list; }; struct nf_conntrack_net { diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 6c4c490a3e34..a6135b5030dd 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -14,7 +14,6 @@ #include enum nf_ct_ecache_state { - NFCT_ECACHE_UNKNOWN, /* destroy event not sent */ NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */ NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */ }; @@ -23,7 +22,6 @@ struct nf_conntrack_ecache { unsigned long cache; /* bitops want long */ u16 ctmask; /* bitmask of ct events to be delivered */ u16 expmask; /* bitmask of expect events to be delivered */ - enum nf_ct_ecache_state state:8;/* ecache state */ u32 missed; /* missed events */ u32 portid; /* netlink portid of destroyer */ }; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0164e5f522e8..ca1d1d105163 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -660,15 +660,12 @@ void nf_ct_destroy(struct nf_conntrack *nfct) } EXPORT_SYMBOL(nf_ct_destroy); -static void nf_ct_delete_from_lists(struct nf_conn *ct) +static void __nf_ct_delete_from_lists(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); unsigned int hash, reply_hash; unsigned int sequence; - nf_ct_helper_destroy(ct); - - local_bh_disable(); do { sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, @@ -681,12 +678,33 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct) clean_from_lists(ct); nf_conntrack_double_unlock(hash, reply_hash); +} +static void nf_ct_delete_from_lists(struct nf_conn *ct) +{ + nf_ct_helper_destroy(ct); + local_bh_disable(); + + __nf_ct_delete_from_lists(ct); nf_ct_add_to_dying_list(ct); local_bh_enable(); } +static void nf_ct_add_to_ecache_list(struct nf_conn *ct) +{ +#ifdef CONFIG_NF_CONNTRACK_EVENTS + struct nf_conntrack_net *cnet = nf_ct_pernet(nf_ct_net(ct)); + + spin_lock(&cnet->ecache.dying_lock); + hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, + &cnet->ecache.dying_list); + spin_unlock(&cnet->ecache.dying_lock); +#else + nf_ct_add_to_dying_list(ct); +#endif +} + bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) { struct nf_conn_tstamp *tstamp; @@ -709,7 +727,12 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) /* destroy event was not delivered. nf_ct_put will * be done by event cache worker on redelivery. */ - nf_ct_delete_from_lists(ct); + nf_ct_helper_destroy(ct); + local_bh_disable(); + __nf_ct_delete_from_lists(ct); + nf_ct_add_to_ecache_list(ct); + local_bh_enable(); + nf_conntrack_ecache_work(nf_ct_net(ct), NFCT_ECACHE_DESTROY_FAIL); return false; } diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 0cb2da0a759a..2752859479b2 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -29,8 +28,9 @@ static DEFINE_MUTEX(nf_ct_ecache_mutex); -#define ECACHE_RETRY_WAIT (HZ/10) -#define ECACHE_STACK_ALLOC (256 / sizeof(void *)) +#define DYING_NULLS_VAL ((1 << 30) + 1) +#define ECACHE_MAX_JIFFIES msecs_to_jiffies(10) +#define ECACHE_RETRY_JIFFIES msecs_to_jiffies(10) enum retry_state { STATE_CONGESTED, @@ -38,58 +38,58 @@ enum retry_state { STATE_DONE, }; -static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu) +static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet) { - struct nf_conn *refs[ECACHE_STACK_ALLOC]; + unsigned long stop = jiffies + ECACHE_MAX_JIFFIES; + struct hlist_nulls_head evicted_list; enum retry_state ret = STATE_DONE; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - unsigned int evicted = 0; + unsigned int sent; - spin_lock(&pcpu->lock); + INIT_HLIST_NULLS_HEAD(&evicted_list, DYING_NULLS_VAL); - hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) { +next: + sent = 0; + spin_lock_bh(&cnet->ecache.dying_lock); + + hlist_nulls_for_each_entry_safe(h, n, &cnet->ecache.dying_list, hnnode) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); - struct nf_conntrack_ecache *e; - if (!nf_ct_is_confirmed(ct)) - continue; - - /* This ecache access is safe because the ct is on the - * pcpu dying list and we hold the spinlock -- the entry - * cannot be free'd until after the lock is released. - * - * This is true even if ct has a refcount of 0: the - * cpu that is about to free the entry must remove it - * from the dying list and needs the lock to do so. - */ - e = nf_ct_ecache_find(ct); - if (!e || e->state != NFCT_ECACHE_DESTROY_FAIL) - continue; - - /* ct is in NFCT_ECACHE_DESTROY_FAIL state, this means - * the worker owns this entry: the ct will remain valid - * until the worker puts its ct reference. + /* The worker owns all entries, ct remains valid until nf_ct_put + * in the loop below. */ if (nf_conntrack_event(IPCT_DESTROY, ct)) { ret = STATE_CONGESTED; break; } - e->state = NFCT_ECACHE_DESTROY_SENT; - refs[evicted] = ct; + hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); + hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &evicted_list); - if (++evicted >= ARRAY_SIZE(refs)) { + if (time_after(stop, jiffies)) { ret = STATE_RESTART; break; } + + if (sent++ > 16) { + spin_unlock_bh(&cnet->ecache.dying_lock); + cond_resched(); + goto next; + } } - spin_unlock(&pcpu->lock); + spin_unlock_bh(&cnet->ecache.dying_lock); - /* can't _put while holding lock */ - while (evicted) - nf_ct_put(refs[--evicted]); + hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) { + struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); + + hlist_nulls_add_fake(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); + hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); + nf_ct_put(ct); + + cond_resched(); + } return ret; } @@ -97,35 +97,20 @@ static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu) static void ecache_work(struct work_struct *work) { struct nf_conntrack_net *cnet = container_of(work, struct nf_conntrack_net, ecache.dwork.work); - struct netns_ct *ctnet = cnet->ecache.ct_net; - int cpu, delay = -1; - struct ct_pcpu *pcpu; + int ret, delay = -1; - local_bh_disable(); - - for_each_possible_cpu(cpu) { - enum retry_state ret; - - pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu); - - ret = ecache_work_evict_list(pcpu); - - switch (ret) { - case STATE_CONGESTED: - delay = ECACHE_RETRY_WAIT; - goto out; - case STATE_RESTART: - delay = 0; - break; - case STATE_DONE: - break; - } + ret = ecache_work_evict_list(cnet); + switch (ret) { + case STATE_CONGESTED: + delay = ECACHE_RETRY_JIFFIES; + break; + case STATE_RESTART: + delay = 0; + break; + case STATE_DONE: + break; } - out: - local_bh_enable(); - - ctnet->ecache_dwork_pending = delay > 0; if (delay >= 0) schedule_delayed_work(&cnet->ecache.dwork, delay); } @@ -199,7 +184,6 @@ int nf_conntrack_eventmask_report(unsigned int events, struct nf_conn *ct, */ if (e->portid == 0 && portid != 0) e->portid = portid; - e->state = NFCT_ECACHE_DESTROY_FAIL; } return ret; @@ -297,8 +281,10 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state) schedule_delayed_work(&cnet->ecache.dwork, HZ); net->ct.ecache_dwork_pending = true; } else if (state == NFCT_ECACHE_DESTROY_SENT) { - net->ct.ecache_dwork_pending = false; - mod_delayed_work(system_wq, &cnet->ecache.dwork, 0); + if (!hlist_nulls_empty(&cnet->ecache.dying_list)) + mod_delayed_work(system_wq, &cnet->ecache.dwork, 0); + else + net->ct.ecache_dwork_pending = false; } } @@ -311,8 +297,9 @@ void nf_conntrack_ecache_pernet_init(struct net *net) net->ct.sysctl_events = nf_ct_events; - cnet->ecache.ct_net = &net->ct; INIT_DELAYED_WORK(&cnet->ecache.dwork, ecache_work); + INIT_HLIST_NULLS_HEAD(&cnet->ecache.dying_list, DYING_NULLS_VAL); + spin_lock_init(&cnet->ecache.dying_lock); BUILD_BUG_ON(__IPCT_MAX >= 16); /* e->ctmask is u16 */ } From 0d3cc504ba9cdcff76346306c37eb1ea01e60a86 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:17 +0200 Subject: [PATCH 02/17] netfilter: conntrack: include ecache dying list in dumps The new pernet dying list includes conntrack entries that await delivery of the 'destroy' event via ctnetlink. The old percpu dying list will be removed soon. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_ecache.h | 2 + net/netfilter/nf_conntrack_ecache.c | 10 +++++ net/netfilter/nf_conntrack_netlink.c | 43 +++++++++++++++++++++ 3 files changed, 55 insertions(+) diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index a6135b5030dd..b57d73785e4d 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -164,6 +164,8 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state); void nf_conntrack_ecache_pernet_init(struct net *net); void nf_conntrack_ecache_pernet_fini(struct net *net); +struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net); + static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net) { return net->ct.ecache_dwork_pending; diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 2752859479b2..334b2b4e5e8b 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -38,6 +38,16 @@ enum retry_state { STATE_DONE, }; +struct nf_conntrack_net_ecache *nf_conn_pernet_ecache(const struct net *net) +{ + struct nf_conntrack_net *cnet = nf_ct_pernet(net); + + return &cnet->ecache; +} +#if IS_MODULE(CONFIG_NF_CT_NETLINK) +EXPORT_SYMBOL_GPL(nf_conn_pernet_ecache); +#endif + static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet) { unsigned long stop = jiffies + ECACHE_MAX_JIFFIES; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 924d766e6c53..a4ec2aad2187 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -62,6 +62,7 @@ struct ctnetlink_list_dump_ctx { struct nf_conn *last; unsigned int cpu; bool done; + bool retrans_done; }; static int ctnetlink_dump_tuples_proto(struct sk_buff *skb, @@ -1802,6 +1803,48 @@ out: static int ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) { + struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; + struct nf_conn *last = ctx->last; +#ifdef CONFIG_NF_CONNTRACK_EVENTS + const struct net *net = sock_net(skb->sk); + struct nf_conntrack_net_ecache *ecache_net; + struct nf_conntrack_tuple_hash *h; + struct hlist_nulls_node *n; +#endif + + if (ctx->retrans_done) + return ctnetlink_dump_list(skb, cb, true); + + ctx->last = NULL; + +#ifdef CONFIG_NF_CONNTRACK_EVENTS + ecache_net = nf_conn_pernet_ecache(net); + spin_lock_bh(&ecache_net->dying_lock); + + hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) { + struct nf_conn *ct; + int res; + + ct = nf_ct_tuplehash_to_ctrack(h); + if (last && last != ct) + continue; + + res = ctnetlink_dump_one_entry(skb, cb, ct, true); + if (res < 0) { + spin_unlock_bh(&ecache_net->dying_lock); + nf_ct_put(last); + return skb->len; + } + + nf_ct_put(last); + last = NULL; + } + + spin_unlock_bh(&ecache_net->dying_lock); +#endif + nf_ct_put(last); + ctx->retrans_done = true; + return ctnetlink_dump_list(skb, cb, true); } From 1397af5bfd7d32b0cf2adb70a78c9a9e8f11d912 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:18 +0200 Subject: [PATCH 03/17] netfilter: conntrack: remove the percpu dying list Its no longer needed. Entries that need event redelivery are placed on the new pernet dying list. The advantage is that there is no need to take additional spinlock on conntrack removal unless event redelivery failed or the conntrack entry was never added to the table in the first place (confirmed bit not set). The IPS_CONFIRMED bit now needs to be set as soon as the entry has been unlinked from the unconfirmed list, else the destroy function may attempt to unlink it a second time. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netns/conntrack.h | 1 - net/netfilter/nf_conntrack_core.c | 35 +++++----------------------- net/netfilter/nf_conntrack_ecache.c | 1 - net/netfilter/nf_conntrack_netlink.c | 23 ++++++------------ 4 files changed, 13 insertions(+), 47 deletions(-) diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 0294f3d473af..e985a3010b89 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -96,7 +96,6 @@ struct nf_ip_net { struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; - struct hlist_nulls_head dying; }; struct netns_ct { diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index ca1d1d105163..9010b6e5a072 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -525,21 +525,6 @@ clean_from_lists(struct nf_conn *ct) nf_ct_remove_expectations(ct); } -/* must be called with local_bh_disable */ -static void nf_ct_add_to_dying_list(struct nf_conn *ct) -{ - struct ct_pcpu *pcpu; - - /* add this conntrack to the (per cpu) dying list */ - ct->cpu = smp_processor_id(); - pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); - - spin_lock(&pcpu->lock); - hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, - &pcpu->dying); - spin_unlock(&pcpu->lock); -} - /* must be called with local_bh_disable */ static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) { @@ -556,11 +541,11 @@ static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) } /* must be called with local_bh_disable */ -static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) +static void nf_ct_del_from_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; - /* We overload first tuple to link into unconfirmed or dying list.*/ + /* We overload first tuple to link into unconfirmed list.*/ pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); @@ -648,7 +633,8 @@ void nf_ct_destroy(struct nf_conntrack *nfct) */ nf_ct_remove_expectations(ct); - nf_ct_del_from_dying_or_unconfirmed_list(ct); + if (unlikely(!nf_ct_is_confirmed(ct))) + nf_ct_del_from_unconfirmed_list(ct); local_bh_enable(); @@ -686,7 +672,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct) local_bh_disable(); __nf_ct_delete_from_lists(ct); - nf_ct_add_to_dying_list(ct); local_bh_enable(); } @@ -700,8 +685,6 @@ static void nf_ct_add_to_ecache_list(struct nf_conn *ct) hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &cnet->ecache.dying_list); spin_unlock(&cnet->ecache.dying_lock); -#else - nf_ct_add_to_dying_list(ct); #endif } @@ -995,7 +978,6 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct) struct nf_conn_tstamp *tstamp; refcount_inc(&ct->ct_general.use); - ct->status |= IPS_CONFIRMED; /* set conntrack timestamp, if enabled. */ tstamp = nf_conn_tstamp_find(ct); @@ -1024,7 +1006,6 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb, nf_conntrack_get(&ct->ct_general); nf_ct_acct_merge(ct, ctinfo, loser_ct); - nf_ct_add_to_dying_list(loser_ct); nf_ct_put(loser_ct); nf_ct_set(skb, ct, ctinfo); @@ -1157,7 +1138,6 @@ nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h, return ret; drop: - nf_ct_add_to_dying_list(loser_ct); NF_CT_STAT_INC(net, drop); NF_CT_STAT_INC(net, insert_failed); return NF_DROP; @@ -1224,10 +1204,10 @@ __nf_conntrack_confirm(struct sk_buff *skb) * user context, else we insert an already 'dead' hash, blocking * further use of that particular connection -JM. */ - nf_ct_del_from_dying_or_unconfirmed_list(ct); + nf_ct_del_from_unconfirmed_list(ct); + ct->status |= IPS_CONFIRMED; if (unlikely(nf_ct_is_dying(ct))) { - nf_ct_add_to_dying_list(ct); NF_CT_STAT_INC(net, insert_failed); goto dying; } @@ -1251,7 +1231,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) goto out; if (chainlen++ > max_chainlen) { chaintoolong: - nf_ct_add_to_dying_list(ct); NF_CT_STAT_INC(net, chaintoolong); NF_CT_STAT_INC(net, insert_failed); ret = NF_DROP; @@ -2800,7 +2779,6 @@ void nf_conntrack_init_end(void) * We need to use special "null" values, not used in hash table */ #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) -#define DYING_NULLS_VAL ((1<<30)+1) int nf_conntrack_init_net(struct net *net) { @@ -2821,7 +2799,6 @@ int nf_conntrack_init_net(struct net *net) spin_lock_init(&pcpu->lock); INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); - INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); } net->ct.stat = alloc_percpu(struct ip_conntrack_stat); diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 334b2b4e5e8b..7472c544642f 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -94,7 +94,6 @@ next: hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); - hlist_nulls_add_fake(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); nf_ct_put(ct); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index a4ec2aad2187..2e9c8183e4a2 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -62,7 +62,6 @@ struct ctnetlink_list_dump_ctx { struct nf_conn *last; unsigned int cpu; bool done; - bool retrans_done; }; static int ctnetlink_dump_tuples_proto(struct sk_buff *skb, @@ -1751,13 +1750,12 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb, } static int -ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying) +ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb) { struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; struct nf_conn *ct, *last; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; - struct hlist_nulls_head *list; struct net *net = sock_net(skb->sk); int res, cpu; @@ -1774,12 +1772,11 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); spin_lock_bh(&pcpu->lock); - list = dying ? &pcpu->dying : &pcpu->unconfirmed; restart: - hlist_nulls_for_each_entry(h, n, list, hnnode) { + hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); - res = ctnetlink_dump_one_entry(skb, cb, ct, dying); + res = ctnetlink_dump_one_entry(skb, cb, ct, false); if (res < 0) { ctx->cpu = cpu; spin_unlock_bh(&pcpu->lock); @@ -1812,8 +1809,8 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) struct hlist_nulls_node *n; #endif - if (ctx->retrans_done) - return ctnetlink_dump_list(skb, cb, true); + if (ctx->done) + return 0; ctx->last = NULL; @@ -1842,10 +1839,10 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) spin_unlock_bh(&ecache_net->dying_lock); #endif + ctx->done = true; nf_ct_put(last); - ctx->retrans_done = true; - return ctnetlink_dump_list(skb, cb, true); + return skb->len; } static int ctnetlink_get_ct_dying(struct sk_buff *skb, @@ -1863,12 +1860,6 @@ static int ctnetlink_get_ct_dying(struct sk_buff *skb, return -EOPNOTSUPP; } -static int -ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb) -{ - return ctnetlink_dump_list(skb, cb, false); -} - static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) From 78222bacfca97cb18505df1ba5f3591864498a7e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:19 +0200 Subject: [PATCH 04/17] netfilter: cttimeout: decouple unlink and free on netns destruction Make it so netns pre_exit unlinks the objects from the pernet list, so they cannot be found anymore. netns core issues a synchronize_rcu() before calling the exit hooks so any the time the exit hooks run unconfirmed nf_conn entries have been free'd or they have been committed to the hashtable. The exit hook still tags unconfirmed entries as dying, this can now be removed in a followup change. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_timeout.h | 8 ------ net/netfilter/nfnetlink_cttimeout.c | 30 ++++++++++++++++++-- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index 3ea94f6f3844..fea258983d23 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -17,14 +17,6 @@ struct nf_ct_timeout { char data[]; }; -struct ctnl_timeout { - struct list_head head; - struct rcu_head rcu_head; - refcount_t refcnt; - char name[CTNL_TIMEOUT_NAME_MAX]; - struct nf_ct_timeout timeout; -}; - struct nf_conn_timeout { struct nf_ct_timeout __rcu *timeout; }; diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index eea486f32971..83fa15c4193c 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -33,8 +33,19 @@ static unsigned int nfct_timeout_id __read_mostly; +struct ctnl_timeout { + struct list_head head; + struct rcu_head rcu_head; + refcount_t refcnt; + char name[CTNL_TIMEOUT_NAME_MAX]; + struct nf_ct_timeout timeout; + + struct list_head free_head; +}; + struct nfct_timeout_pernet { struct list_head nfct_timeout_list; + struct list_head nfct_timeout_freelist; }; MODULE_LICENSE("GPL"); @@ -574,10 +585,24 @@ static int __net_init cttimeout_net_init(struct net *net) struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net); INIT_LIST_HEAD(&pernet->nfct_timeout_list); + INIT_LIST_HEAD(&pernet->nfct_timeout_freelist); return 0; } +static void __net_exit cttimeout_net_pre_exit(struct net *net) +{ + struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net); + struct ctnl_timeout *cur, *tmp; + + list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_list, head) { + list_del_rcu(&cur->head); + list_add(&cur->free_head, &pernet->nfct_timeout_freelist); + } + + /* core calls synchronize_rcu() after this */ +} + static void __net_exit cttimeout_net_exit(struct net *net) { struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net); @@ -586,8 +611,8 @@ static void __net_exit cttimeout_net_exit(struct net *net) nf_ct_unconfirmed_destroy(net); nf_ct_untimeout(net, NULL); - list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_list, head) { - list_del_rcu(&cur->head); + list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_freelist, head) { + list_del(&cur->free_head); if (refcount_dec_and_test(&cur->refcnt)) kfree_rcu(cur, rcu_head); @@ -596,6 +621,7 @@ static void __net_exit cttimeout_net_exit(struct net *net) static struct pernet_operations cttimeout_ops = { .init = cttimeout_net_init, + .pre_exit = cttimeout_net_pre_exit, .exit = cttimeout_net_exit, .id = &nfct_timeout_id, .size = sizeof(struct nfct_timeout_pernet), From 17438b42ce14cb60ceda9ae62ad5dd022d55a216 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:20 +0200 Subject: [PATCH 05/17] netfilter: remove nf_ct_unconfirmed_destroy helper This helper tags connections not yet in the conntrack table as dying. These nf_conn entries will be dropped instead when the core attempts to insert them from the input or postrouting 'confirm' hook. After the previous change, the entries get unlinked from the list earlier, so that by the time the actual exit hook runs, new connections no longer have a timeout policy assigned. Its enough to walk the hashtable instead. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 3 --- net/netfilter/nf_conntrack_core.c | 14 -------------- net/netfilter/nfnetlink_cttimeout.c | 4 +++- 3 files changed, 3 insertions(+), 18 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 28672a944499..f60212244b13 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -237,9 +237,6 @@ static inline bool nf_ct_kill(struct nf_conn *ct) return nf_ct_delete(ct, 0, 0); } -/* Set all unconfirmed conntrack as dying */ -void nf_ct_unconfirmed_destroy(struct net *); - /* Iterate over all conntracks: if iter returns true, it's deleted. */ void nf_ct_iterate_cleanup_net(struct net *net, int (*iter)(struct nf_conn *i, void *data), diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 9010b6e5a072..b3cc318ceb45 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -2431,20 +2431,6 @@ __nf_ct_unconfirmed_destroy(struct net *net) } } -void nf_ct_unconfirmed_destroy(struct net *net) -{ - struct nf_conntrack_net *cnet = nf_ct_pernet(net); - - might_sleep(); - - if (atomic_read(&cnet->count) > 0) { - __nf_ct_unconfirmed_destroy(net); - nf_queue_nf_hook_drop(net); - synchronize_net(); - } -} -EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy); - void nf_ct_iterate_cleanup_net(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, u32 portid, int report) diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 83fa15c4193c..f366b8187915 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -608,7 +608,9 @@ static void __net_exit cttimeout_net_exit(struct net *net) struct nfct_timeout_pernet *pernet = nfct_timeout_pernet(net); struct ctnl_timeout *cur, *tmp; - nf_ct_unconfirmed_destroy(net); + if (list_empty(&pernet->nfct_timeout_freelist)) + return; + nf_ct_untimeout(net, NULL); list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_freelist, head) { From c56716c69ce1ac320432fb1ea5654196ba24d2f8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:21 +0200 Subject: [PATCH 06/17] netfilter: extensions: introduce extension genid count Multiple netfilter extensions store pointers to external data in their extension area struct. Examples: 1. Timeout policies 2. Connection tracking helpers. No references are taken for these. When a helper or timeout policy is removed, the conntrack table gets traversed and affected extensions are cleared. Conntrack entries not yet in the hashtable are referenced via a special list, the unconfirmed list. On removal of a policy or connection tracking helper, the unconfirmed list gets traversed an all entries are marked as dying, this prevents them from getting committed to the table at insertion time: core checks for dying bit, if set, the conntrack entry gets destroyed at confirm time. The disadvantage is that each new conntrack has to be added to the percpu unconfirmed list, and each insertion needs to remove it from this list. The list is only ever needed when a policy or helper is removed -- a rare occurrence. Add a generation ID count: Instead of adding to the list and then traversing that list on policy/helper removal, increment a counter that is stored in the extension area. For unconfirmed conntracks, the extension has the genid valid at ct allocation time. Removal of a helper/policy etc. increments the counter. At confirmation time, validate that ext->genid == global_id. If the stored number is not the same, do not allow the conntrack insertion, just like as if a confirmed-list traversal would have flagged the entry as dying. After insertion, the genid is no longer relevant (conntrack entries are now reachable via the conntrack table iterators and is set to 0. This allows removal of the percpu unconfirmed list. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_extend.h | 31 ++++++------ include/net/netfilter/nf_conntrack_labels.h | 10 +++- net/netfilter/nf_conntrack_core.c | 55 +++++++++++++++++++++ net/netfilter/nf_conntrack_extend.c | 32 +++++++++++- 4 files changed, 111 insertions(+), 17 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 96635ad2acc7..0b247248b032 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -34,21 +34,11 @@ enum nf_ct_ext_id { NF_CT_EXT_NUM, }; -#define NF_CT_EXT_HELPER_TYPE struct nf_conn_help -#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat -#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj -#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct -#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache -#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp -#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout -#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels -#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy -#define NF_CT_EXT_ACT_CT_TYPE struct nf_conn_act_ct_ext - /* Extensions: optional stuff which isn't permanently in struct. */ struct nf_ct_ext { u8 offset[NF_CT_EXT_NUM]; u8 len; + unsigned int gen_id; char data[] __aligned(8); }; @@ -62,17 +52,28 @@ static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id) return (ct->ext && __nf_ct_ext_exist(ct->ext, id)); } -static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id) +void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id); + +static inline void *nf_ct_ext_find(const struct nf_conn *ct, u8 id) { - if (!nf_ct_ext_exist(ct, id)) + struct nf_ct_ext *ext = ct->ext; + + if (!ext || !__nf_ct_ext_exist(ext, id)) return NULL; + if (unlikely(ext->gen_id)) + return __nf_ct_ext_find(ext, id); + return (void *)ct->ext + ct->ext->offset[id]; } -#define nf_ct_ext_find(ext, id) \ - ((id##_TYPE *)__nf_ct_ext_find((ext), (id))) /* Add this type, returns pointer to data or NULL. */ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); +/* ext genid. if ext->id != ext_genid, extensions cannot be used + * anymore unless conntrack has CONFIRMED bit set. + */ +extern atomic_t nf_conntrack_ext_genid; +void nf_ct_ext_bump_genid(void); + #endif /* _NF_CONNTRACK_EXTEND_H */ diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index 3c23298e68ca..66bab6c60d12 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -17,10 +17,18 @@ struct nf_conn_labels { unsigned long bits[NF_CT_LABELS_MAX_SIZE / sizeof(long)]; }; +/* Can't use nf_ct_ext_find(), flow dissector cannot use symbols + * exported by nf_conntrack module. + */ static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_LABELS - return nf_ct_ext_find(ct, NF_CT_EXT_LABELS); + struct nf_ct_ext *ext = ct->ext; + + if (!ext || !__nf_ct_ext_exist(ext, NF_CT_EXT_LABELS)) + return NULL; + + return (void *)ct->ext + ct->ext->offset[NF_CT_EXT_LABELS]; #else return NULL; #endif diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index b3cc318ceb45..76310940cbd7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -876,6 +876,33 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, &nf_conntrack_hash[reply_hash]); } +static bool nf_ct_ext_valid_pre(const struct nf_ct_ext *ext) +{ + /* if ext->gen_id is not equal to nf_conntrack_ext_genid, some extensions + * may contain stale pointers to e.g. helper that has been removed. + * + * The helper can't clear this because the nf_conn object isn't in + * any hash and synchronize_rcu() isn't enough because associated skb + * might sit in a queue. + */ + return !ext || ext->gen_id == atomic_read(&nf_conntrack_ext_genid); +} + +static bool nf_ct_ext_valid_post(struct nf_ct_ext *ext) +{ + if (!ext) + return true; + + if (ext->gen_id != atomic_read(&nf_conntrack_ext_genid)) + return false; + + /* inserted into conntrack table, nf_ct_iterate_cleanup() + * will find it. Disable nf_ct_ext_find() id check. + */ + WRITE_ONCE(ext->gen_id, 0); + return true; +} + int nf_conntrack_hash_check_insert(struct nf_conn *ct) { @@ -891,6 +918,11 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) zone = nf_ct_zone(ct); + if (!nf_ct_ext_valid_pre(ct->ext)) { + NF_CT_STAT_INC(net, insert_failed); + return -ETIMEDOUT; + } + local_bh_disable(); do { sequence = read_seqcount_begin(&nf_conntrack_generation); @@ -931,6 +963,13 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert); local_bh_enable(); + + if (!nf_ct_ext_valid_post(ct->ext)) { + nf_ct_kill(ct); + NF_CT_STAT_INC(net, drop); + return -ETIMEDOUT; + } + return 0; chaintoolong: NF_CT_STAT_INC(net, chaintoolong); @@ -1198,6 +1237,11 @@ __nf_conntrack_confirm(struct sk_buff *skb) return NF_DROP; } + if (!nf_ct_ext_valid_pre(ct->ext)) { + NF_CT_STAT_INC(net, insert_failed); + goto dying; + } + pr_debug("Confirming conntrack %p\n", ct); /* We have to check the DYING flag after unlink to prevent * a race against nf_ct_get_next_corpse() possibly called from @@ -1254,6 +1298,16 @@ chaintoolong: nf_conntrack_double_unlock(hash, reply_hash); local_bh_enable(); + /* ext area is still valid (rcu read lock is held, + * but will go out of scope soon, we need to remove + * this conntrack again. + */ + if (!nf_ct_ext_valid_post(ct->ext)) { + nf_ct_kill(ct); + NF_CT_STAT_INC(net, drop); + return NF_DROP; + } + help = nfct_help(ct); if (help && help->helper) nf_conntrack_event_cache(IPCT_HELPER, ct); @@ -2491,6 +2545,7 @@ nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data) */ synchronize_net(); + nf_ct_ext_bump_genid(); nf_ct_iterate_cleanup(iter, data, 0, 0); } EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy); diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 1296fda54ac6..0b513f7bf9f3 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -27,6 +27,8 @@ #define NF_CT_EXT_PREALLOC 128u /* conntrack events are on by default */ +atomic_t nf_conntrack_ext_genid __read_mostly = ATOMIC_INIT(1); + static const u8 nf_ct_ext_type_len[NF_CT_EXT_NUM] = { [NF_CT_EXT_HELPER] = sizeof(struct nf_conn_help), #if IS_ENABLED(CONFIG_NF_NAT) @@ -116,8 +118,10 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) if (!new) return NULL; - if (!ct->ext) + if (!ct->ext) { memset(new->offset, 0, sizeof(new->offset)); + new->gen_id = atomic_read(&nf_conntrack_ext_genid); + } new->offset[id] = newoff; new->len = newlen; @@ -127,3 +131,29 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) return (void *)new + newoff; } EXPORT_SYMBOL(nf_ct_ext_add); + +/* Use nf_ct_ext_find wrapper. This is only useful for unconfirmed entries. */ +void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id) +{ + unsigned int gen_id = atomic_read(&nf_conntrack_ext_genid); + unsigned int this_id = READ_ONCE(ext->gen_id); + + if (!__nf_ct_ext_exist(ext, id)) + return NULL; + + if (this_id == 0 || ext->gen_id == gen_id) + return (void *)ext + ext->offset[id]; + + return NULL; +} +EXPORT_SYMBOL(__nf_ct_ext_find); + +void nf_ct_ext_bump_genid(void) +{ + unsigned int value = atomic_inc_return(&nf_conntrack_ext_genid); + + if (value == UINT_MAX) + atomic_set(&nf_conntrack_ext_genid, 1); + + msleep(HZ); +} From 42df4fb9b1bed6a6d2c1f79b974a9ed14de27ea1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:22 +0200 Subject: [PATCH 07/17] netfilter: cttimeout: decouple unlink and free on netns destruction Increment the extid on module removal; this makes sure that even in extreme cases any old uncofirmed entry that happened to be kept e.g. on nfnetlink_queue list will not trip over a stale timeout reference. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink_cttimeout.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index f366b8187915..9bc4ebe65faa 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -656,12 +656,24 @@ err_out: return ret; } +static int untimeout(struct nf_conn *ct, void *timeout) +{ + struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct); + + if (timeout_ext) + RCU_INIT_POINTER(timeout_ext->timeout, NULL); + + return 0; +} + static void __exit cttimeout_exit(void) { nfnetlink_subsys_unregister(&cttimeout_subsys); unregister_pernet_subsys(&cttimeout_ops); RCU_INIT_POINTER(nf_ct_timeout_hook, NULL); + + nf_ct_iterate_destroy(untimeout, NULL); synchronize_rcu(); } From ace53fdc262fa6751acd3d61f3236f84ae3340f1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:23 +0200 Subject: [PATCH 08/17] netfilter: conntrack: remove __nf_ct_unconfirmed_destroy Its not needed anymore: A. If entry is totally new, then the rcu-protected resource must already have been removed from global visibility before call to nf_ct_iterate_destroy. B. If entry was allocated before, but is not yet in the hash table (uncofirmed case), genid gets incremented and synchronize_rcu() call makes sure access has completed. C. Next attempt to peek at extension area will fail for unconfirmed conntracks, because ext->genid != genid. D. Conntracks in the hash are iterated as before. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 46 ++++++++--------------------- net/netfilter/nf_conntrack_helper.c | 5 ---- net/netfilter/nfnetlink_cttimeout.c | 1 - 3 files changed, 13 insertions(+), 39 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 76310940cbd7..7b4b3f5db959 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -2457,34 +2457,6 @@ static int iter_net_only(struct nf_conn *i, void *data) return d->iter(i, d->data); } -static void -__nf_ct_unconfirmed_destroy(struct net *net) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct nf_conntrack_tuple_hash *h; - struct hlist_nulls_node *n; - struct ct_pcpu *pcpu; - - pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); - - spin_lock_bh(&pcpu->lock); - hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { - struct nf_conn *ct; - - ct = nf_ct_tuplehash_to_ctrack(h); - - /* we cannot call iter() on unconfirmed list, the - * owning cpu can reallocate ct->ext at any time. - */ - set_bit(IPS_DYING_BIT, &ct->status); - } - spin_unlock_bh(&pcpu->lock); - cond_resched(); - } -} - void nf_ct_iterate_cleanup_net(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, u32 portid, int report) @@ -2527,26 +2499,34 @@ nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data) if (atomic_read(&cnet->count) == 0) continue; - __nf_ct_unconfirmed_destroy(net); nf_queue_nf_hook_drop(net); } up_read(&net_rwsem); /* Need to wait for netns cleanup worker to finish, if its * running -- it might have deleted a net namespace from - * the global list, so our __nf_ct_unconfirmed_destroy() might - * not have affected all namespaces. + * the global list, so hook drop above might not have + * affected all namespaces. */ net_ns_barrier(); - /* a conntrack could have been unlinked from unconfirmed list - * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy(). + /* a skb w. unconfirmed conntrack could have been reinjected just + * before we called nf_queue_nf_hook_drop(). + * * This makes sure its inserted into conntrack table. */ synchronize_net(); nf_ct_ext_bump_genid(); nf_ct_iterate_cleanup(iter, data, 0, 0); + + /* Another cpu might be in a rcu read section with + * rcu protected pointer cleared in iter callback + * or hidden via nf_ct_ext_bump_genid() above. + * + * Wait until those are done. + */ + synchronize_rcu(); } EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 8dec42ec603e..c12a87ebc3ee 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -468,11 +468,6 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) nf_ct_expect_iterate_destroy(expect_iter_me, NULL); nf_ct_iterate_destroy(unhelp, me); - - /* Maybe someone has gotten the helper already when unhelp above. - * So need to wait it. - */ - synchronize_rcu(); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 9bc4ebe65faa..f069c24c6146 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -674,7 +674,6 @@ static void __exit cttimeout_exit(void) RCU_INIT_POINTER(nf_ct_timeout_hook, NULL); nf_ct_iterate_destroy(untimeout, NULL); - synchronize_rcu(); } module_init(cttimeout_init); From 8a75a2c1741073f0c2d7bee146648e717527e048 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:24 +0200 Subject: [PATCH 09/17] netfilter: conntrack: remove unconfirmed list It has no function anymore and can be removed. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 1 - include/net/netns/conntrack.h | 6 --- net/netfilter/nf_conntrack_core.c | 57 +--------------------------- net/netfilter/nf_conntrack_netlink.c | 44 +-------------------- 4 files changed, 3 insertions(+), 105 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index f60212244b13..3ce9a5b42fe5 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -101,7 +101,6 @@ struct nf_conn { /* Have we seen traffic both ways yet? (bitset) */ unsigned long status; - u16 cpu; possible_net_t ct_net; #if IS_ENABLED(CONFIG_NF_NAT) diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index e985a3010b89..a71cfd4e2f21 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -93,11 +93,6 @@ struct nf_ip_net { #endif }; -struct ct_pcpu { - spinlock_t lock; - struct hlist_nulls_head unconfirmed; -}; - struct netns_ct { #ifdef CONFIG_NF_CONNTRACK_EVENTS bool ecache_dwork_pending; @@ -109,7 +104,6 @@ struct netns_ct { u8 sysctl_tstamp; u8 sysctl_checksum; - struct ct_pcpu __percpu *pcpu_lists; struct ip_conntrack_stat __percpu *stat; struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; struct nf_ip_net nf_ct_proto; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 7b4b3f5db959..de1547a2830e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -525,35 +525,6 @@ clean_from_lists(struct nf_conn *ct) nf_ct_remove_expectations(ct); } -/* must be called with local_bh_disable */ -static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) -{ - struct ct_pcpu *pcpu; - - /* add this conntrack to the (per cpu) unconfirmed list */ - ct->cpu = smp_processor_id(); - pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); - - spin_lock(&pcpu->lock); - hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, - &pcpu->unconfirmed); - spin_unlock(&pcpu->lock); -} - -/* must be called with local_bh_disable */ -static void nf_ct_del_from_unconfirmed_list(struct nf_conn *ct) -{ - struct ct_pcpu *pcpu; - - /* We overload first tuple to link into unconfirmed list.*/ - pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); - - spin_lock(&pcpu->lock); - BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); - hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); - spin_unlock(&pcpu->lock); -} - #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK) /* Released via nf_ct_destroy() */ @@ -625,7 +596,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct) if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE)) destroy_gre_conntrack(ct); - local_bh_disable(); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, @@ -633,11 +603,6 @@ void nf_ct_destroy(struct nf_conntrack *nfct) */ nf_ct_remove_expectations(ct); - if (unlikely(!nf_ct_is_confirmed(ct))) - nf_ct_del_from_unconfirmed_list(ct); - - local_bh_enable(); - if (ct->master) nf_ct_put(ct->master); @@ -1248,7 +1213,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) * user context, else we insert an already 'dead' hash, blocking * further use of that particular connection -JM. */ - nf_ct_del_from_unconfirmed_list(ct); ct->status |= IPS_CONFIRMED; if (unlikely(nf_ct_is_dying(ct))) { @@ -1803,9 +1767,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, if (!exp) __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); - /* Now it is inserted into the unconfirmed list, set refcount to 1. */ + /* Now it is going to be associated with an sk_buff, set refcount to 1. */ refcount_set(&ct->ct_general.use, 1); - nf_ct_add_to_unconfirmed_list(ct); local_bh_enable(); @@ -2594,7 +2557,6 @@ i_see_dead_people: nf_conntrack_ecache_pernet_fini(net); nf_conntrack_expect_pernet_fini(net); free_percpu(net->ct.stat); - free_percpu(net->ct.pcpu_lists); } } @@ -2805,26 +2767,14 @@ int nf_conntrack_init_net(struct net *net) { struct nf_conntrack_net *cnet = nf_ct_pernet(net); int ret = -ENOMEM; - int cpu; BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER); BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS); atomic_set(&cnet->count, 0); - net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu); - if (!net->ct.pcpu_lists) - goto err_stat; - - for_each_possible_cpu(cpu) { - struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); - - spin_lock_init(&pcpu->lock); - INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); - } - net->ct.stat = alloc_percpu(struct ip_conntrack_stat); if (!net->ct.stat) - goto err_pcpu_lists; + return ret; ret = nf_conntrack_expect_pernet_init(net); if (ret < 0) @@ -2840,8 +2790,5 @@ int nf_conntrack_init_net(struct net *net) err_expect: free_percpu(net->ct.stat); -err_pcpu_lists: - free_percpu(net->ct.pcpu_lists); -err_stat: return ret; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 2e9c8183e4a2..eafe640b3387 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1752,49 +1752,7 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb, static int ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb) { - struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; - struct nf_conn *ct, *last; - struct nf_conntrack_tuple_hash *h; - struct hlist_nulls_node *n; - struct net *net = sock_net(skb->sk); - int res, cpu; - - if (ctx->done) - return 0; - - last = ctx->last; - - for (cpu = ctx->cpu; cpu < nr_cpu_ids; cpu++) { - struct ct_pcpu *pcpu; - - if (!cpu_possible(cpu)) - continue; - - pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); - spin_lock_bh(&pcpu->lock); -restart: - hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { - ct = nf_ct_tuplehash_to_ctrack(h); - - res = ctnetlink_dump_one_entry(skb, cb, ct, false); - if (res < 0) { - ctx->cpu = cpu; - spin_unlock_bh(&pcpu->lock); - goto out; - } - } - if (ctx->last) { - ctx->last = NULL; - goto restart; - } - spin_unlock_bh(&pcpu->lock); - } - ctx->done = true; -out: - if (last) - nf_ct_put(last); - - return skb->len; + return 0; } static int From 0bcfbafbcd345f285db0c3788e6359ceac6a008c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 11 Apr 2022 13:01:25 +0200 Subject: [PATCH 10/17] netfilter: conntrack: avoid unconditional local_bh_disable Now that the conntrack entry isn't placed on the pcpu list anymore the bh only needs to be disabled in the 'expectation present' case. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index de1547a2830e..22492f7eb819 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1736,10 +1736,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, ecache ? ecache->expmask : 0, GFP_ATOMIC); - local_bh_disable(); cnet = nf_ct_pernet(net); if (cnet->expect_count) { - spin_lock(&nf_conntrack_expect_lock); + spin_lock_bh(&nf_conntrack_expect_lock); exp = nf_ct_find_expectation(net, zone, tuple); if (exp) { pr_debug("expectation arrives ct=%p exp=%p\n", @@ -1762,7 +1761,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, #endif NF_CT_STAT_INC(net, expect_new); } - spin_unlock(&nf_conntrack_expect_lock); + spin_unlock_bh(&nf_conntrack_expect_lock); } if (!exp) __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); @@ -1770,8 +1769,6 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, /* Now it is going to be associated with an sk_buff, set refcount to 1. */ refcount_set(&ct->ct_general.use, 1); - local_bh_enable(); - if (exp) { if (exp->expectfn) exp->expectfn(ct, exp); From 8169ff584003c871a226719e998bb034231954d6 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 8 Apr 2022 13:10:19 +0200 Subject: [PATCH 11/17] netfilter: conntrack: add nf_ct_iter_data object for nf_ct_iterate_cleanup*() This patch adds a structure to collect all the context data that is passed to the cleanup iterator. struct nf_ct_iter_data { struct net *net; void *data; u32 portid; int report; }; There is a netns field that allows to clean up conntrack entries specifically owned by the specified netns. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 12 ++++-- net/netfilter/nf_conntrack_core.c | 56 +++++++++++----------------- net/netfilter/nf_conntrack_netlink.c | 10 ++++- net/netfilter/nf_conntrack_proto.c | 10 +++-- net/netfilter/nf_conntrack_timeout.c | 7 +++- net/netfilter/nf_nat_masquerade.c | 5 ++- 6 files changed, 56 insertions(+), 44 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 3ce9a5b42fe5..a32be8aa7ed2 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -236,10 +236,16 @@ static inline bool nf_ct_kill(struct nf_conn *ct) return nf_ct_delete(ct, 0, 0); } +struct nf_ct_iter_data { + struct net *net; + void *data; + u32 portid; + int report; +}; + /* Iterate over all conntracks: if iter returns true, it's deleted. */ -void nf_ct_iterate_cleanup_net(struct net *net, - int (*iter)(struct nf_conn *i, void *data), - void *data, u32 portid, int report); +void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data), + const struct nf_ct_iter_data *iter_data); /* also set unconfirmed conntracks as dying. Only use in module exit path. */ void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 22492f7eb819..efbfd67d5c3d 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -2335,7 +2335,7 @@ static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, /* Bring out ya dead! */ static struct nf_conn * get_next_corpse(int (*iter)(struct nf_conn *i, void *data), - void *data, unsigned int *bucket) + const struct nf_ct_iter_data *iter_data, unsigned int *bucket) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; @@ -2366,7 +2366,12 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data), * tuple while iterating. */ ct = nf_ct_tuplehash_to_ctrack(h); - if (iter(ct, data)) + + if (iter_data->net && + !net_eq(iter_data->net, nf_ct_net(ct))) + continue; + + if (iter(ct, iter_data->data)) goto found; } spin_unlock(lockp); @@ -2383,7 +2388,7 @@ found: } static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), - void *data, u32 portid, int report) + const struct nf_ct_iter_data *iter_data) { unsigned int bucket = 0; struct nf_conn *ct; @@ -2391,49 +2396,28 @@ static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), might_sleep(); mutex_lock(&nf_conntrack_mutex); - while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { + while ((ct = get_next_corpse(iter, iter_data, &bucket)) != NULL) { /* Time to push up daises... */ - nf_ct_delete(ct, portid, report); + nf_ct_delete(ct, iter_data->portid, iter_data->report); nf_ct_put(ct); cond_resched(); } mutex_unlock(&nf_conntrack_mutex); } -struct iter_data { - int (*iter)(struct nf_conn *i, void *data); - void *data; - struct net *net; -}; - -static int iter_net_only(struct nf_conn *i, void *data) -{ - struct iter_data *d = data; - - if (!net_eq(d->net, nf_ct_net(i))) - return 0; - - return d->iter(i, d->data); -} - -void nf_ct_iterate_cleanup_net(struct net *net, - int (*iter)(struct nf_conn *i, void *data), - void *data, u32 portid, int report) +void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data), + const struct nf_ct_iter_data *iter_data) { + struct net *net = iter_data->net; struct nf_conntrack_net *cnet = nf_ct_pernet(net); - struct iter_data d; might_sleep(); if (atomic_read(&cnet->count) == 0) return; - d.iter = iter; - d.data = data; - d.net = net; - - nf_ct_iterate_cleanup(iter_net_only, &d, portid, report); + nf_ct_iterate_cleanup(iter, iter_data); } EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net); @@ -2451,6 +2435,7 @@ EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net); void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data) { + struct nf_ct_iter_data iter_data = {}; struct net *net; down_read(&net_rwsem); @@ -2478,7 +2463,8 @@ nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data) synchronize_net(); nf_ct_ext_bump_genid(); - nf_ct_iterate_cleanup(iter, data, 0, 0); + iter_data.data = data; + nf_ct_iterate_cleanup(iter, &iter_data); /* Another cpu might be in a rcu read section with * rcu protected pointer cleared in iter callback @@ -2492,7 +2478,7 @@ EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy); static int kill_all(struct nf_conn *i, void *data) { - return net_eq(nf_ct_net(i), data); + return 1; } void nf_conntrack_cleanup_start(void) @@ -2527,8 +2513,9 @@ void nf_conntrack_cleanup_net(struct net *net) void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list) { - int busy; + struct nf_ct_iter_data iter_data = {}; struct net *net; + int busy; /* * This makes sure all current packets have passed through @@ -2541,7 +2528,8 @@ i_see_dead_people: list_for_each_entry(net, net_exit_list, exit_list) { struct nf_conntrack_net *cnet = nf_ct_pernet(net); - nf_ct_iterate_cleanup(kill_all, net, 0, 0); + iter_data.net = net; + nf_ct_iterate_cleanup_net(kill_all, &iter_data); if (atomic_read(&cnet->count) != 0) busy = 1; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index eafe640b3387..e768f59741a6 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1559,6 +1559,11 @@ static int ctnetlink_flush_conntrack(struct net *net, u32 portid, int report, u8 family) { struct ctnetlink_filter *filter = NULL; + struct nf_ct_iter_data iter = { + .net = net, + .portid = portid, + .report = report, + }; if (ctnetlink_needs_filter(family, cda)) { if (cda[CTA_FILTER]) @@ -1567,10 +1572,11 @@ static int ctnetlink_flush_conntrack(struct net *net, filter = ctnetlink_alloc_filter(cda, family); if (IS_ERR(filter)) return PTR_ERR(filter); + + iter.data = filter; } - nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter, - portid, report); + nf_ct_iterate_cleanup_net(ctnetlink_flush_iterate, &iter); kfree(filter); return 0; diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index d1f2d3c8d2b1..895b09cbd7cf 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -538,9 +538,13 @@ retry: out_unlock: mutex_unlock(&nf_ct_proto_mutex); - if (fixup_needed) - nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup, - (void *)(unsigned long)nfproto, 0, 0); + if (fixup_needed) { + struct nf_ct_iter_data iter_data = { + .net = net, + .data = (void *)(unsigned long)nfproto, + }; + nf_ct_iterate_cleanup_net(nf_ct_tcp_fixup, &iter_data); + } return err; } diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c index cec166ecba77..0f828d05ea60 100644 --- a/net/netfilter/nf_conntrack_timeout.c +++ b/net/netfilter/nf_conntrack_timeout.c @@ -38,7 +38,12 @@ static int untimeout(struct nf_conn *ct, void *timeout) void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout) { - nf_ct_iterate_cleanup_net(net, untimeout, timeout, 0, 0); + struct nf_ct_iter_data iter_data = { + .net = net, + .data = timeout, + }; + + nf_ct_iterate_cleanup_net(untimeout, &iter_data); } EXPORT_SYMBOL_GPL(nf_ct_untimeout); diff --git a/net/netfilter/nf_nat_masquerade.c b/net/netfilter/nf_nat_masquerade.c index e32fac374608..1a506b0c6511 100644 --- a/net/netfilter/nf_nat_masquerade.c +++ b/net/netfilter/nf_nat_masquerade.c @@ -77,11 +77,14 @@ EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); static void iterate_cleanup_work(struct work_struct *work) { + struct nf_ct_iter_data iter_data = {}; struct masq_dev_work *w; w = container_of(work, struct masq_dev_work, work); - nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0); + iter_data.net = w->net; + iter_data.data = (void *)w; + nf_ct_iterate_cleanup_net(w->iter, &iter_data); put_net_track(w->net, &w->ns_tracker); kfree(w); From 2794cdb0b97bfe62d25c996c8afe4832207e78bc Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 25 Apr 2022 15:15:41 +0200 Subject: [PATCH 12/17] netfilter: nfnetlink: allow to detect if ctnetlink listeners exist At this time, every new conntrack gets the 'event cache extension' enabled for it. This is because the 'net.netfilter.nf_conntrack_events' sysctl defaults to 1. Changing the default to 0 means that commands that rely on the event notification extension, e.g. 'conntrack -E' or conntrackd, stop working. We COULD detect if there is a listener by means of 'nfnetlink_has_listeners()' and only add the extension if this is true. The downside is a dependency from conntrack module to nfnetlink module. This adds a different way: inc/dec a counter whenever a ctnetlink group is being (un)subscribed and toggle a flag in struct net. Next patches will take advantage of this and will only add the event extension if the flag is set. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netns/conntrack.h | 1 + net/netfilter/nfnetlink.c | 40 ++++++++++++++++++++++++++++++++--- 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index a71cfd4e2f21..0677cd3de034 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -95,6 +95,7 @@ struct nf_ip_net { struct netns_ct { #ifdef CONFIG_NF_CONNTRACK_EVENTS + bool ctnetlink_has_listener; bool ecache_dwork_pending; #endif u8 sysctl_log_invalid; /* Log invalid packets */ diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 7e2c8dd01408..ad3bbe34ca88 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -45,6 +45,7 @@ MODULE_DESCRIPTION("Netfilter messages via netlink socket"); static unsigned int nfnetlink_pernet_id __read_mostly; struct nfnl_net { + unsigned int ctnetlink_listeners; struct sock *nfnl; }; @@ -654,7 +655,6 @@ static void nfnetlink_rcv(struct sk_buff *skb) netlink_rcv_skb(skb, nfnetlink_rcv_msg); } -#ifdef CONFIG_MODULES static int nfnetlink_bind(struct net *net, int group) { const struct nfnetlink_subsystem *ss; @@ -670,9 +670,44 @@ static int nfnetlink_bind(struct net *net, int group) rcu_read_unlock(); if (!ss) request_module_nowait("nfnetlink-subsys-%d", type); + +#ifdef CONFIG_NF_CONNTRACK_EVENTS + if (type == NFNL_SUBSYS_CTNETLINK) { + struct nfnl_net *nfnlnet = nfnl_pernet(net); + + nfnl_lock(NFNL_SUBSYS_CTNETLINK); + + if (WARN_ON_ONCE(nfnlnet->ctnetlink_listeners == UINT_MAX)) { + nfnl_unlock(NFNL_SUBSYS_CTNETLINK); + return -EOVERFLOW; + } + + nfnlnet->ctnetlink_listeners++; + if (nfnlnet->ctnetlink_listeners == 1) + WRITE_ONCE(net->ct.ctnetlink_has_listener, true); + nfnl_unlock(NFNL_SUBSYS_CTNETLINK); + } +#endif return 0; } + +static void nfnetlink_unbind(struct net *net, int group) +{ +#ifdef CONFIG_NF_CONNTRACK_EVENTS + int type = nfnl_group2type[group]; + + if (type == NFNL_SUBSYS_CTNETLINK) { + struct nfnl_net *nfnlnet = nfnl_pernet(net); + + nfnl_lock(NFNL_SUBSYS_CTNETLINK); + WARN_ON_ONCE(nfnlnet->ctnetlink_listeners == 0); + nfnlnet->ctnetlink_listeners--; + if (nfnlnet->ctnetlink_listeners == 0) + WRITE_ONCE(net->ct.ctnetlink_has_listener, false); + nfnl_unlock(NFNL_SUBSYS_CTNETLINK); + } #endif +} static int __net_init nfnetlink_net_init(struct net *net) { @@ -680,9 +715,8 @@ static int __net_init nfnetlink_net_init(struct net *net) struct netlink_kernel_cfg cfg = { .groups = NFNLGRP_MAX, .input = nfnetlink_rcv, -#ifdef CONFIG_MODULES .bind = nfnetlink_bind, -#endif + .unbind = nfnetlink_unbind, }; nfnlnet->nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg); From b0a7ab4a776583b4344e8313638dc795f7589209 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 25 Apr 2022 15:15:42 +0200 Subject: [PATCH 13/17] netfilter: conntrack: un-inline nf_ct_ecache_ext_add Only called when new ct is allocated or the extension isn't present. This function will be extended, place this in the conntrack module instead of inlining. The callers already depend on nf_conntrack module. Return value is changed to bool, noone used the returned pointer. Make sure that the core drops the newly allocated conntrack if the extension is requested but can't be added. This makes it necessary to ifdef the section, as the stub always returns false we'd drop every new conntrack if the the ecache extension is disabled in kconfig. Add from data path (xt_CT, nft_ct) is unchanged. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_ecache.h | 30 ++++----------------- net/netfilter/nf_conntrack_core.c | 14 +++++++--- net/netfilter/nf_conntrack_ecache.c | 22 +++++++++++++++ 3 files changed, 38 insertions(+), 28 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index b57d73785e4d..2e3d58439e34 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -36,31 +36,6 @@ nf_ct_ecache_find(const struct nf_conn *ct) #endif } -static inline struct nf_conntrack_ecache * -nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp) -{ -#ifdef CONFIG_NF_CONNTRACK_EVENTS - struct net *net = nf_ct_net(ct); - struct nf_conntrack_ecache *e; - - if (!ctmask && !expmask && net->ct.sysctl_events) { - ctmask = ~0; - expmask = ~0; - } - if (!ctmask && !expmask) - return NULL; - - e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp); - if (e) { - e->ctmask = ctmask; - e->expmask = expmask; - } - return e; -#else - return NULL; -#endif -} - #ifdef CONFIG_NF_CONNTRACK_EVENTS /* This structure is passed to event handler */ @@ -89,6 +64,7 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct); int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct, u32 portid, int report); +bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp); #else static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) @@ -103,6 +79,10 @@ static inline int nf_conntrack_eventmask_report(unsigned int eventmask, return 0; } +static inline bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp) +{ + return false; +} #endif static inline void diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index efbfd67d5c3d..7b078ec1f923 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1698,7 +1698,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, struct nf_conn *ct; struct nf_conn_help *help; struct nf_conntrack_tuple repl_tuple; +#ifdef CONFIG_NF_CONNTRACK_EVENTS struct nf_conntrack_ecache *ecache; +#endif struct nf_conntrack_expect *exp = NULL; const struct nf_conntrack_zone *zone; struct nf_conn_timeout *timeout_ext; @@ -1731,10 +1733,16 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); nf_ct_labels_ext_add(ct); +#ifdef CONFIG_NF_CONNTRACK_EVENTS ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; - nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, - ecache ? ecache->expmask : 0, - GFP_ATOMIC); + + if (!nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, + ecache ? ecache->expmask : 0, + GFP_ATOMIC)) { + nf_conntrack_free(ct); + return ERR_PTR(-ENOMEM); + } +#endif cnet = nf_ct_pernet(net); if (cnet->expect_count) { diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 7472c544642f..2f0b52fdcbfa 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -297,6 +297,28 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state) } } +bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp) +{ + struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *e; + + if (!ctmask && !expmask && net->ct.sysctl_events) { + ctmask = ~0; + expmask = ~0; + } + if (!ctmask && !expmask) + return false; + + e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp); + if (e) { + e->ctmask = ctmask; + e->expmask = expmask; + } + + return e != NULL; +} +EXPORT_SYMBOL_GPL(nf_ct_ecache_ext_add); + #define NF_CT_EVENTS_DEFAULT 1 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT; From 90d1daa45849f272b701f29d6ca88b24743c7553 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 25 Apr 2022 15:15:43 +0200 Subject: [PATCH 14/17] netfilter: conntrack: add nf_conntrack_events autodetect mode This adds the new nf_conntrack_events=2 mode and makes it the default. This leverages the earlier flag in struct net to allow to avoid the event extension as long as no event listener is active in the namespace. This avoids, for most cases, allocation of ct->ext area. A followup patch will take further advantage of this by avoiding calls down into the event framework if the extension isn't present. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- .../networking/nf_conntrack-sysctl.rst | 5 +++- net/netfilter/nf_conntrack_core.c | 3 ++- net/netfilter/nf_conntrack_ecache.c | 27 ++++++++++++++----- net/netfilter/nf_conntrack_standalone.c | 2 +- 4 files changed, 28 insertions(+), 9 deletions(-) diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst index 311128abb768..834945ebc4cd 100644 --- a/Documentation/networking/nf_conntrack-sysctl.rst +++ b/Documentation/networking/nf_conntrack-sysctl.rst @@ -34,10 +34,13 @@ nf_conntrack_count - INTEGER (read-only) nf_conntrack_events - BOOLEAN - 0 - disabled - - not 0 - enabled (default) + - 1 - enabled + - 2 - auto (default) If this option is enabled, the connection tracking code will provide userspace with connection tracking events via ctnetlink. + The default allocates the extension if a userspace program is + listening to ctnetlink events. nf_conntrack_expect_max - INTEGER Maximum size of expectation table. Default value is diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 7b078ec1f923..082a2fd8d85b 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1736,7 +1736,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, #ifdef CONFIG_NF_CONNTRACK_EVENTS ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL; - if (!nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, + if ((ecache || net->ct.sysctl_events) && + !nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0, ecache ? ecache->expmask : 0, GFP_ATOMIC)) { nf_conntrack_free(ct); diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 2f0b52fdcbfa..8698b3424646 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -302,12 +302,27 @@ bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp struct net *net = nf_ct_net(ct); struct nf_conntrack_ecache *e; - if (!ctmask && !expmask && net->ct.sysctl_events) { - ctmask = ~0; - expmask = ~0; + switch (net->ct.sysctl_events) { + case 0: + /* assignment via template / ruleset? ignore sysctl. */ + if (ctmask || expmask) + break; + return true; + case 2: /* autodetect: no event listener, don't allocate extension. */ + if (!READ_ONCE(net->ct.ctnetlink_has_listener)) + return true; + fallthrough; + case 1: + /* always allocate an extension. */ + if (!ctmask && !expmask) { + ctmask = ~0; + expmask = ~0; + } + break; + default: + WARN_ON_ONCE(1); + return true; } - if (!ctmask && !expmask) - return false; e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp); if (e) { @@ -319,7 +334,7 @@ bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp } EXPORT_SYMBOL_GPL(nf_ct_ecache_ext_add); -#define NF_CT_EVENTS_DEFAULT 1 +#define NF_CT_EVENTS_DEFAULT 2 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT; void nf_conntrack_ecache_pernet_init(struct net *net) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 3e1afd10a9b6..948884deaca5 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -693,7 +693,7 @@ static struct ctl_table nf_ct_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, + .extra2 = SYSCTL_TWO, }, #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP From 8edc813111001e9be3cce066d3d4091d2ef04a1d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 25 Apr 2022 15:15:44 +0200 Subject: [PATCH 15/17] netfilter: prefer extension check to pointer check The pointer check usually results in a 'false positive': its likely that the ctnetlink module is loaded but no event monitoring is enabled. After recent change to autodetect ctnetlink usage and only allocate the ecache extension if a listener is active, check if the extension is present on a given conntrack. If its not there, there is nothing to report and calls to the notification framework can be elided. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_core.h | 2 +- include/net/netfilter/nf_conntrack_ecache.h | 31 ++++++++++----------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 13807ea94cd2..6406cfee34c2 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -60,7 +60,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) if (ct) { if (!nf_ct_is_confirmed(ct)) ret = __nf_conntrack_confirm(skb); - if (likely(ret == NF_ACCEPT)) + if (ret == NF_ACCEPT && nf_ct_ecache_exist(ct)) nf_ct_deliver_cached_events(ct); } return ret; diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 2e3d58439e34..0c1dac318e02 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -36,6 +36,15 @@ nf_ct_ecache_find(const struct nf_conn *ct) #endif } +static inline bool nf_ct_ecache_exist(const struct nf_conn *ct) +{ +#ifdef CONFIG_NF_CONNTRACK_EVENTS + return nf_ct_ext_exist(ct, NF_CT_EXT_ECACHE); +#else + return false; +#endif +} + #ifdef CONFIG_NF_CONNTRACK_EVENTS /* This structure is passed to event handler */ @@ -108,30 +117,20 @@ nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct, u32 portid, int report) { #ifdef CONFIG_NF_CONNTRACK_EVENTS - const struct net *net = nf_ct_net(ct); - - if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) - return 0; - - return nf_conntrack_eventmask_report(1 << event, ct, portid, report); -#else - return 0; + if (nf_ct_ecache_exist(ct)) + return nf_conntrack_eventmask_report(1 << event, ct, portid, report); #endif + return 0; } static inline int nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_EVENTS - const struct net *net = nf_ct_net(ct); - - if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb)) - return 0; - - return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); -#else - return 0; + if (nf_ct_ecache_exist(ct)) + return nf_conntrack_eventmask_report(1 << event, ct, 0, 0); #endif + return 0; } #ifdef CONFIG_NF_CONNTRACK_EVENTS From 3412e16418286bdc12561827cbd22f94cb8af5e1 Mon Sep 17 00:00:00 2001 From: Sven Auhagen Date: Wed, 27 Apr 2022 09:15:15 +0200 Subject: [PATCH 16/17] netfilter: flowtable: nft_flow_route use more data for reverse route When creating a flow table entry, the reverse route is looked up based on the current packet. There can be scenarios where the user creates a custom ip rule to route the traffic differently. In order to support those scenarios, the lookup needs to add more information based on the current packet. The patch adds multiple new information to the route lookup. Signed-off-by: Sven Auhagen Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_flow_offload.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 900d48c810a1..50991ab1e2d2 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -227,11 +227,19 @@ static int nft_flow_route(const struct nft_pktinfo *pkt, switch (nft_pf(pkt)) { case NFPROTO_IPV4: fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip; + fl.u.ip4.saddr = ct->tuplehash[dir].tuple.dst.u3.ip; fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex; + fl.u.ip4.flowi4_iif = this_dst->dev->ifindex; + fl.u.ip4.flowi4_tos = RT_TOS(ip_hdr(pkt->skb)->tos); + fl.u.ip4.flowi4_mark = pkt->skb->mark; break; case NFPROTO_IPV6: fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6; + fl.u.ip6.saddr = ct->tuplehash[dir].tuple.dst.u3.in6; fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex; + fl.u.ip6.flowi6_iif = this_dst->dev->ifindex; + fl.u.ip6.flowlabel = ip6_flowinfo(ipv6_hdr(pkt->skb)); + fl.u.ip6.flowi6_mark = pkt->skb->mark; break; } From 4f9bd53084d18c2f9f1ec68fa56587b99a2cef00 Mon Sep 17 00:00:00 2001 From: Kevin Mitchell Date: Fri, 29 Apr 2022 20:40:27 -0700 Subject: [PATCH 17/17] netfilter: conntrack: skip verification of zero UDP checksum The checksum is optional for UDP packets. However nf_reject would previously require a valid checksum to elicit a response such as ICMP_DEST_UNREACH. Add some logic to nf_reject_verify_csum to determine if a UDP packet has a zero checksum and should therefore not be verified. Signed-off-by: Kevin Mitchell Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_reject.h | 21 +++++++++++++++++---- net/ipv4/netfilter/nf_reject_ipv4.c | 10 +++++++--- net/ipv6/netfilter/nf_reject_ipv6.c | 4 ++-- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h index 9051c3a0c8e7..7c669792fb9c 100644 --- a/include/net/netfilter/nf_reject.h +++ b/include/net/netfilter/nf_reject.h @@ -5,12 +5,28 @@ #include #include -static inline bool nf_reject_verify_csum(__u8 proto) +static inline bool nf_reject_verify_csum(struct sk_buff *skb, int dataoff, + __u8 proto) { /* Skip protocols that don't use 16-bit one's complement checksum * of the entire payload. */ switch (proto) { + /* Protocols with optional checksums. */ + case IPPROTO_UDP: { + const struct udphdr *udp_hdr; + struct udphdr _udp_hdr; + + udp_hdr = skb_header_pointer(skb, dataoff, + sizeof(_udp_hdr), + &_udp_hdr); + if (!udp_hdr || udp_hdr->check) + return true; + + return false; + } + case IPPROTO_GRE: + /* Protocols with other integrity checks. */ case IPPROTO_AH: case IPPROTO_ESP: @@ -19,9 +35,6 @@ static inline bool nf_reject_verify_csum(__u8 proto) /* Protocols with partial checksums. */ case IPPROTO_UDPLITE: case IPPROTO_DCCP: - - /* Protocols with optional checksums. */ - case IPPROTO_GRE: return false; } return true; diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 4eed5afca392..918c61fda0f3 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -80,6 +80,7 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net, struct iphdr *niph; struct icmphdr *icmph; unsigned int len; + int dataoff; __wsum csum; u8 proto; @@ -99,10 +100,11 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net, if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len))) return NULL; + dataoff = ip_hdrlen(oldskb); proto = ip_hdr(oldskb)->protocol; if (!skb_csum_unnecessary(oldskb) && - nf_reject_verify_csum(proto) && + nf_reject_verify_csum(oldskb, dataoff, proto) && nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto)) return NULL; @@ -311,6 +313,7 @@ EXPORT_SYMBOL_GPL(nf_send_reset); void nf_send_unreach(struct sk_buff *skb_in, int code, int hook) { struct iphdr *iph = ip_hdr(skb_in); + int dataoff = ip_hdrlen(skb_in); u8 proto = iph->protocol; if (iph->frag_off & htons(IP_OFFSET)) @@ -320,12 +323,13 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook) nf_reject_fill_skb_dst(skb_in) < 0) return; - if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) { + if (skb_csum_unnecessary(skb_in) || + !nf_reject_verify_csum(skb_in, dataoff, proto)) { icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); return; } - if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0) + if (nf_ip_checksum(skb_in, hook, dataoff, proto) == 0) icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); } EXPORT_SYMBOL_GPL(nf_send_unreach); diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index dffeaaaadcde..f61d4f18e1cf 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -31,7 +31,7 @@ static bool nf_reject_v6_csum_ok(struct sk_buff *skb, int hook) if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) return false; - if (!nf_reject_verify_csum(proto)) + if (!nf_reject_verify_csum(skb, thoff, proto)) return true; return nf_ip6_checksum(skb, hook, thoff, proto) == 0; @@ -388,7 +388,7 @@ static bool reject6_csum_ok(struct sk_buff *skb, int hook) if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) return false; - if (!nf_reject_verify_csum(proto)) + if (!nf_reject_verify_csum(skb, thoff, proto)) return true; return nf_ip6_checksum(skb, hook, thoff, proto) == 0;