Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patches contains Netfilter updates for net-next:

1) .br_defrag indirection depends on CONFIG_NF_DEFRAG_IPV6, from wenxu.

2) Remove unnecessary memset() in ipset, from Florent Fourcot.

3) Merge control plane addition and deletion in ipset, also from Florent.

4) A few missing check for nla_parse() in ipset, from Aditya Pakki
   and Jozsef Kadlecsik.

5) Incorrect cleanup in error path of xt_set version 3, from Jozsef.

6) Memory accounting problems when resizing in ipset, from Stefano Brivio.

7) Jozsef updates his email to @netfilter.org, this batch comes with a
   conflict resolution with recent SPDX header updates.

8) Add to create custom conntrack expectations via nftables, from
   Stephane Veyret.

9) A lookup optimization for conntrack, from Florian Westphal.

10) Check for supported flags in xt_owner.

11) Support for pernet sysctl in br_netfilter, patches
    from Christian Brauner.

12) Patches to move common synproxy infrastructure to nf_synproxy.c,
    to prepare the synproxy support for nf_tables, patches from
    Fernando Fernandez Mancera.

13) Support to restore expiration time in set element, from Laura Garcia.

14) Fix recent rewrite of netfilter IPv6 to avoid indirections
    when CONFIG_IPV6 is unset, from Arnd Bergmann.

15) Always reset vlan tag on skbuff fraglist when refragmenting in
    bridge conntrack, from wenxu.

16) Support to match IPv4 options in nf_tables, from Stephen Suryaputra.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-06-25 12:42:12 -07:00
commit 27d92807f8
58 changed files with 1611 additions and 1127 deletions

View File

@ -1800,7 +1800,7 @@ S: 2300 Copenhagen S.
S: Denmark
N: Jozsef Kadlecsik
E: kadlec@blackhole.kfki.hu
E: kadlec@netfilter.org
P: 1024D/470DB964 4CB3 1A05 713E 9BF7 FAC5 5809 DD8C B7B1 470D B964
D: netfilter: TCP window tracking code
D: netfilter: raw table

View File

@ -10866,7 +10866,7 @@ F: drivers/net/ethernet/neterion/
NETFILTER
M: Pablo Neira Ayuso <pablo@netfilter.org>
M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
M: Jozsef Kadlecsik <kadlec@netfilter.org>
M: Florian Westphal <fw@strlen.de>
L: netfilter-devel@vger.kernel.org
L: coreteam@netfilter.org

View File

@ -17,7 +17,7 @@
* if SELF_TEST is defined. You can use this free for any purpose. It's in
* the public domain. It has no warranty.
*
* Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
* Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@netfilter.org)
*
* I've modified Bob's hash to be useful in the Linux kernel, and
* any bugs present are my fault.

View File

@ -2,7 +2,7 @@
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
*/
#ifndef _IP_SET_H
#define _IP_SET_H

View File

@ -2,8 +2,7 @@
#ifndef _IP_SET_COUNTER_H
#define _IP_SET_COUNTER_H
/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@netfilter.org> */
#ifdef __KERNEL__

View File

@ -2,8 +2,7 @@
#ifndef _IP_SET_SKBINFO_H
#define _IP_SET_SKBINFO_H
/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2015 Jozsef Kadlecsik <kadlec@netfilter.org> */
#ifdef __KERNEL__

View File

@ -2,8 +2,7 @@
#ifndef _IP_SET_TIMEOUT_H
#define _IP_SET_TIMEOUT_H
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
#ifdef __KERNEL__

View File

@ -8,6 +8,7 @@
#define __LINUX_IP6_NETFILTER_H
#include <uapi/linux/netfilter_ipv6.h>
#include <net/tcp.h>
/* Extra routing may needed on local out, as the QUEUE target never returns
* control to the table.
@ -35,6 +36,10 @@ struct nf_ipv6_ops {
struct in6_addr *saddr);
int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict);
u32 (*cookie_init_sequence)(const struct ipv6hdr *iph,
const struct tcphdr *th, u16 *mssp);
int (*cookie_v6_check)(const struct ipv6hdr *iph,
const struct tcphdr *th, __u32 cookie);
#endif
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
@ -70,8 +75,10 @@ static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
return 1;
return v6_ops->chk_addr(net, addr, dev, strict);
#else
#elif IS_BUILTIN(CONFIG_IPV6)
return ipv6_chk_addr(net, addr, dev, strict);
#else
return 1;
#endif
}
@ -108,8 +115,10 @@ static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
return 1;
return v6_ops->br_defrag(net, skb, user);
#else
#elif IS_BUILTIN(CONFIG_IPV6)
return nf_ct_frag6_gather(net, skb, user);
#else
return 1;
#endif
}
@ -133,8 +142,10 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
return 1;
return v6_ops->br_fragment(net, sk, skb, data, output);
#else
#elif IS_BUILTIN(CONFIG_IPV6)
return br_ip6_fragment(net, sk, skb, data, output);
#else
return 1;
#endif
}
@ -149,11 +160,46 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
return -EHOSTUNREACH;
return v6_ops->route_me_harder(net, skb);
#else
#elif IS_BUILTIN(CONFIG_IPV6)
return ip6_route_me_harder(net, skb);
#else
return -EHOSTUNREACH;
#endif
}
static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph,
const struct tcphdr *th,
u16 *mssp)
{
#if IS_ENABLED(CONFIG_SYN_COOKIES)
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
if (v6_ops)
return v6_ops->cookie_init_sequence(iph, th, mssp);
#elif IS_BUILTIN(CONFIG_IPV6)
return __cookie_v6_init_sequence(iph, th, mssp);
#endif
#endif
return 0;
}
static inline int nf_cookie_v6_check(const struct ipv6hdr *iph,
const struct tcphdr *th, __u32 cookie)
{
#if IS_ENABLED(CONFIG_SYN_COOKIES)
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
if (v6_ops)
return v6_ops->cookie_v6_check(iph, th, cookie);
#elif IS_BUILTIN(CONFIG_IPV6)
return __cookie_v6_check(iph, th, cookie);
#endif
#endif
return 0;
}
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);

View File

@ -42,7 +42,8 @@ static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
return port ? &port->br->fake_rtable : NULL;
}
struct net_device *setup_pre_routing(struct sk_buff *skb);
struct net_device *setup_pre_routing(struct sk_buff *skb,
const struct net *net);
#if IS_ENABLED(CONFIG_IPV6)
int br_validate_ipv6(struct net *net, struct sk_buff *skb);

View File

@ -70,7 +70,8 @@ struct nf_conn {
struct nf_conntrack ct_general;
spinlock_t lock;
u16 cpu;
/* jiffies32 when this ct is considered dead */
u32 timeout;
#ifdef CONFIG_NF_CONNTRACK_ZONES
struct nf_conntrack_zone zone;
@ -82,9 +83,7 @@ struct nf_conn {
/* Have we seen traffic both ways yet? (bitset) */
unsigned long status;
/* jiffies32 when this ct is considered dead */
u32 timeout;
u16 cpu;
possible_net_t ct_net;
#if IS_ENABLED(CONFIG_NF_NAT)

View File

@ -72,21 +72,12 @@ struct synproxy_options {
};
struct tcphdr;
struct xt_synproxy_info;
struct nf_synproxy_info;
bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
const struct tcphdr *th,
struct synproxy_options *opts);
unsigned int synproxy_options_size(const struct synproxy_options *opts);
void synproxy_build_options(struct tcphdr *th,
const struct synproxy_options *opts);
void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
void synproxy_init_timestamp_cookie(const struct nf_synproxy_info *info,
struct synproxy_options *opts);
void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff,
struct tcphdr *th, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct nf_conn_synproxy *synproxy);
#endif /* _NF_CONNTRACK_SYNPROXY_H */

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_SYNPROXY_SHARED_H
#define _NF_SYNPROXY_SHARED_H
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ip6_checksum.h>
#include <net/ip6_route.h>
#include <net/tcp.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
void synproxy_send_client_synack(struct net *net, const struct sk_buff *skb,
const struct tcphdr *th,
const struct synproxy_options *opts);
bool synproxy_recv_client_ack(struct net *net,
const struct sk_buff *skb,
const struct tcphdr *th,
struct synproxy_options *opts, u32 recv_seq);
unsigned int ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *nhs);
int nf_synproxy_ipv4_init(struct synproxy_net *snet, struct net *net);
void nf_synproxy_ipv4_fini(struct synproxy_net *snet, struct net *net);
#if IS_ENABLED(CONFIG_IPV6)
void synproxy_send_client_synack_ipv6(struct net *net,
const struct sk_buff *skb,
const struct tcphdr *th,
const struct synproxy_options *opts);
bool synproxy_recv_client_ack_ipv6(struct net *net, const struct sk_buff *skb,
const struct tcphdr *th,
struct synproxy_options *opts, u32 recv_seq);
unsigned int ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *nhs);
int nf_synproxy_ipv6_init(struct synproxy_net *snet, struct net *net);
void nf_synproxy_ipv6_fini(struct synproxy_net *snet, struct net *net);
#endif /* CONFIG_IPV6 */
#endif /* _NF_SYNPROXY_SHARED_H */

View File

@ -636,7 +636,7 @@ static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext)
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *data,
u64 timeout, gfp_t gfp);
u64 timeout, u64 expiration, gfp_t gfp);
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr);

View File

@ -2,7 +2,7 @@
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_SYNPROXY_H
#define _NF_SYNPROXY_H
#include <linux/types.h>
#define NF_SYNPROXY_OPT_MSS 0x01
#define NF_SYNPROXY_OPT_WSCALE 0x02
#define NF_SYNPROXY_OPT_SACK_PERM 0x04
#define NF_SYNPROXY_OPT_TIMESTAMP 0x08
#define NF_SYNPROXY_OPT_ECN 0x10
struct nf_synproxy_info {
__u8 options;
__u8 wscale;
__u16 mss;
};
#endif /* _NF_SYNPROXY_H */

View File

@ -730,10 +730,12 @@ enum nft_exthdr_flags {
*
* @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers
* @NFT_EXTHDR_OP_TCP: match against tcp options
* @NFT_EXTHDR_OP_IPV4: match against ipv4 options
*/
enum nft_exthdr_op {
NFT_EXTHDR_OP_IPV6,
NFT_EXTHDR_OP_TCPOPT,
NFT_EXTHDR_OP_IPV4,
__NFT_EXTHDR_OP_MAX
};
#define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1)
@ -1445,6 +1447,17 @@ enum nft_ct_timeout_timeout_attributes {
};
#define NFTA_CT_TIMEOUT_MAX (__NFTA_CT_TIMEOUT_MAX - 1)
enum nft_ct_expectation_attributes {
NFTA_CT_EXPECT_UNSPEC,
NFTA_CT_EXPECT_L3PROTO,
NFTA_CT_EXPECT_L4PROTO,
NFTA_CT_EXPECT_DPORT,
NFTA_CT_EXPECT_TIMEOUT,
NFTA_CT_EXPECT_SIZE,
__NFTA_CT_EXPECT_MAX,
};
#define NFTA_CT_EXPECT_MAX (__NFTA_CT_EXPECT_MAX - 1)
#define NFT_OBJECT_UNSPEC 0
#define NFT_OBJECT_COUNTER 1
#define NFT_OBJECT_QUOTA 2
@ -1454,7 +1467,8 @@ enum nft_ct_timeout_timeout_attributes {
#define NFT_OBJECT_TUNNEL 6
#define NFT_OBJECT_CT_TIMEOUT 7
#define NFT_OBJECT_SECMARK 8
#define __NFT_OBJECT_MAX 9
#define NFT_OBJECT_CT_EXPECT 9
#define __NFT_OBJECT_MAX 10
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**

View File

@ -2,18 +2,14 @@
#ifndef _XT_SYNPROXY_H
#define _XT_SYNPROXY_H
#include <linux/types.h>
#include <linux/netfilter/nf_SYNPROXY.h>
#define XT_SYNPROXY_OPT_MSS 0x01
#define XT_SYNPROXY_OPT_WSCALE 0x02
#define XT_SYNPROXY_OPT_SACK_PERM 0x04
#define XT_SYNPROXY_OPT_TIMESTAMP 0x08
#define XT_SYNPROXY_OPT_ECN 0x10
#define XT_SYNPROXY_OPT_MSS NF_SYNPROXY_OPT_MSS
#define XT_SYNPROXY_OPT_WSCALE NF_SYNPROXY_OPT_WSCALE
#define XT_SYNPROXY_OPT_SACK_PERM NF_SYNPROXY_OPT_SACK_PERM
#define XT_SYNPROXY_OPT_TIMESTAMP NF_SYNPROXY_OPT_TIMESTAMP
#define XT_SYNPROXY_OPT_ECN NF_SYNPROXY_OPT_ECN
struct xt_synproxy_info {
__u8 options;
__u8 wscale;
__u16 mss;
};
#define xt_synproxy_info nf_synproxy_info
#endif /* _XT_SYNPROXY_H */

View File

@ -11,6 +11,11 @@ enum {
XT_OWNER_SUPPL_GROUPS = 1 << 3,
};
#define XT_OWNER_MASK (XT_OWNER_UID | \
XT_OWNER_GID | \
XT_OWNER_SOCKET | \
XT_OWNER_SUPPL_GROUPS)
struct xt_owner_match_info {
__u32 uid_min, uid_max;
__u32 gid_min, gid_max;

View File

@ -47,25 +47,22 @@ static unsigned int brnf_net_id __read_mostly;
struct brnf_net {
bool enabled;
};
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables __read_mostly = 1;
static int brnf_call_ip6tables __read_mostly = 1;
static int brnf_call_arptables __read_mostly = 1;
static int brnf_filter_vlan_tagged __read_mostly;
static int brnf_filter_pppoe_tagged __read_mostly;
static int brnf_pass_vlan_indev __read_mostly;
#else
#define brnf_call_iptables 1
#define brnf_call_ip6tables 1
#define brnf_call_arptables 1
#define brnf_filter_vlan_tagged 0
#define brnf_filter_pppoe_tagged 0
#define brnf_pass_vlan_indev 0
struct ctl_table_header *ctl_hdr;
#endif
/* default value is 1 */
int call_iptables;
int call_ip6tables;
int call_arptables;
/* default value is 0 */
int filter_vlan_tagged;
int filter_pppoe_tagged;
int pass_vlan_indev;
};
#define IS_IP(skb) \
(!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
@ -85,17 +82,28 @@ static inline __be16 vlan_proto(const struct sk_buff *skb)
return 0;
}
#define IS_VLAN_IP(skb) \
(vlan_proto(skb) == htons(ETH_P_IP) && \
brnf_filter_vlan_tagged)
static inline bool is_vlan_ip(const struct sk_buff *skb, const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
#define IS_VLAN_IPV6(skb) \
(vlan_proto(skb) == htons(ETH_P_IPV6) && \
brnf_filter_vlan_tagged)
return vlan_proto(skb) == htons(ETH_P_IP) && brnet->filter_vlan_tagged;
}
#define IS_VLAN_ARP(skb) \
(vlan_proto(skb) == htons(ETH_P_ARP) && \
brnf_filter_vlan_tagged)
static inline bool is_vlan_ipv6(const struct sk_buff *skb,
const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
return vlan_proto(skb) == htons(ETH_P_IPV6) &&
brnet->filter_vlan_tagged;
}
static inline bool is_vlan_arp(const struct sk_buff *skb, const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
return vlan_proto(skb) == htons(ETH_P_ARP) && brnet->filter_vlan_tagged;
}
static inline __be16 pppoe_proto(const struct sk_buff *skb)
{
@ -103,15 +111,23 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
sizeof(struct pppoe_hdr)));
}
#define IS_PPPOE_IP(skb) \
(skb->protocol == htons(ETH_P_PPP_SES) && \
pppoe_proto(skb) == htons(PPP_IP) && \
brnf_filter_pppoe_tagged)
static inline bool is_pppoe_ip(const struct sk_buff *skb, const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
#define IS_PPPOE_IPV6(skb) \
(skb->protocol == htons(ETH_P_PPP_SES) && \
pppoe_proto(skb) == htons(PPP_IPV6) && \
brnf_filter_pppoe_tagged)
return skb->protocol == htons(ETH_P_PPP_SES) &&
pppoe_proto(skb) == htons(PPP_IP) && brnet->filter_pppoe_tagged;
}
static inline bool is_pppoe_ipv6(const struct sk_buff *skb,
const struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
return skb->protocol == htons(ETH_P_PPP_SES) &&
pppoe_proto(skb) == htons(PPP_IPV6) &&
brnet->filter_pppoe_tagged;
}
/* largest possible L2 header, see br_nf_dev_queue_xmit() */
#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
@ -408,12 +424,16 @@ bridged_dnat:
return 0;
}
static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
static struct net_device *brnf_get_logical_dev(struct sk_buff *skb,
const struct net_device *dev,
const struct net *net)
{
struct net_device *vlan, *br;
struct brnf_net *brnet = net_generic(net, brnf_net_id);
br = bridge_parent(dev);
if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
if (brnet->pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
return br;
vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
@ -423,7 +443,7 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
}
/* Some common code for IPv4/IPv6 */
struct net_device *setup_pre_routing(struct sk_buff *skb)
struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
{
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
@ -434,7 +454,7 @@ struct net_device *setup_pre_routing(struct sk_buff *skb)
nf_bridge->in_prerouting = 1;
nf_bridge->physindev = skb->dev;
skb->dev = brnf_get_logical_dev(skb, skb->dev);
skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
if (skb->protocol == htons(ETH_P_8021Q))
nf_bridge->orig_proto = BRNF_PROTO_8021Q;
@ -460,6 +480,7 @@ static unsigned int br_nf_pre_routing(void *priv,
struct net_bridge_port *p;
struct net_bridge *br;
__u32 len = nf_bridge_encap_header_len(skb);
struct brnf_net *brnet;
if (unlikely(!pskb_may_pull(skb, len)))
return NF_DROP;
@ -469,8 +490,10 @@ static unsigned int br_nf_pre_routing(void *priv,
return NF_DROP;
br = p->br;
if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
if (!brnf_call_ip6tables &&
brnet = net_generic(state->net, brnf_net_id);
if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
is_pppoe_ipv6(skb, state->net)) {
if (!brnet->call_ip6tables &&
!br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
return NF_ACCEPT;
@ -478,10 +501,11 @@ static unsigned int br_nf_pre_routing(void *priv,
return br_nf_pre_routing_ipv6(priv, skb, state);
}
if (!brnf_call_iptables && !br_opt_get(br, BROPT_NF_CALL_IPTABLES))
if (!brnet->call_iptables && !br_opt_get(br, BROPT_NF_CALL_IPTABLES))
return NF_ACCEPT;
if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
if (!IS_IP(skb) && !is_vlan_ip(skb, state->net) &&
!is_pppoe_ip(skb, state->net))
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
@ -491,7 +515,7 @@ static unsigned int br_nf_pre_routing(void *priv,
if (!nf_bridge_alloc(skb))
return NF_DROP;
if (!setup_pre_routing(skb))
if (!setup_pre_routing(skb, state->net))
return NF_DROP;
nf_bridge = nf_bridge_info_get(skb);
@ -514,7 +538,7 @@ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
struct net_device *in;
if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
if (!IS_ARP(skb) && !is_vlan_arp(skb, net)) {
if (skb->protocol == htons(ETH_P_IP))
nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
@ -569,9 +593,11 @@ static unsigned int br_nf_forward_ip(void *priv,
if (!parent)
return NF_DROP;
if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
is_pppoe_ip(skb, state->net))
pf = NFPROTO_IPV4;
else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
is_pppoe_ipv6(skb, state->net))
pf = NFPROTO_IPV6;
else
return NF_ACCEPT;
@ -602,7 +628,7 @@ static unsigned int br_nf_forward_ip(void *priv,
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
brnf_get_logical_dev(skb, state->in),
brnf_get_logical_dev(skb, state->in, state->net),
parent, br_nf_forward_finish);
return NF_STOLEN;
@ -615,23 +641,25 @@ static unsigned int br_nf_forward_arp(void *priv,
struct net_bridge_port *p;
struct net_bridge *br;
struct net_device **d = (struct net_device **)(skb->cb);
struct brnf_net *brnet;
p = br_port_get_rcu(state->out);
if (p == NULL)
return NF_ACCEPT;
br = p->br;
if (!brnf_call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES))
brnet = net_generic(state->net, brnf_net_id);
if (!brnet->call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES))
return NF_ACCEPT;
if (!IS_ARP(skb)) {
if (!IS_VLAN_ARP(skb))
if (!is_vlan_arp(skb, state->net))
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
}
if (arp_hdr(skb)->ar_pln != 4) {
if (IS_VLAN_ARP(skb))
if (is_vlan_arp(skb, state->net))
nf_bridge_push_encap_header(skb);
return NF_ACCEPT;
}
@ -791,9 +819,11 @@ static unsigned int br_nf_post_routing(void *priv,
if (!realoutdev)
return NF_DROP;
if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
is_pppoe_ip(skb, state->net))
pf = NFPROTO_IPV4;
else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
is_pppoe_ipv6(skb, state->net))
pf = NFPROTO_IPV6;
else
return NF_ACCEPT;
@ -946,23 +976,6 @@ static int brnf_device_event(struct notifier_block *unused, unsigned long event,
return NOTIFY_OK;
}
static void __net_exit brnf_exit_net(struct net *net)
{
struct brnf_net *brnet = net_generic(net, brnf_net_id);
if (!brnet->enabled)
return;
nf_unregister_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
brnet->enabled = false;
}
static struct pernet_operations brnf_net_ops __read_mostly = {
.exit = brnf_exit_net,
.id = &brnf_net_id,
.size = sizeof(struct brnf_net),
};
static struct notifier_block brnf_notifier __read_mostly = {
.notifier_call = brnf_device_event,
};
@ -1021,50 +1034,125 @@ int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
static struct ctl_table brnf_table[] = {
{
.procname = "bridge-nf-call-arptables",
.data = &brnf_call_arptables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-call-iptables",
.data = &brnf_call_iptables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-call-ip6tables",
.data = &brnf_call_ip6tables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-filter-vlan-tagged",
.data = &brnf_filter_vlan_tagged,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-filter-pppoe-tagged",
.data = &brnf_filter_pppoe_tagged,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-pass-vlan-input-dev",
.data = &brnf_pass_vlan_indev,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{ }
};
static inline void br_netfilter_sysctl_default(struct brnf_net *brnf)
{
brnf->call_iptables = 1;
brnf->call_ip6tables = 1;
brnf->call_arptables = 1;
brnf->filter_vlan_tagged = 0;
brnf->filter_pppoe_tagged = 0;
brnf->pass_vlan_indev = 0;
}
static int br_netfilter_sysctl_init_net(struct net *net)
{
struct ctl_table *table = brnf_table;
struct brnf_net *brnet;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(brnf_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
}
brnet = net_generic(net, brnf_net_id);
table[0].data = &brnet->call_arptables;
table[1].data = &brnet->call_iptables;
table[2].data = &brnet->call_ip6tables;
table[3].data = &brnet->filter_vlan_tagged;
table[4].data = &brnet->filter_pppoe_tagged;
table[5].data = &brnet->pass_vlan_indev;
br_netfilter_sysctl_default(brnet);
brnet->ctl_hdr = register_net_sysctl(net, "net/bridge", table);
if (!brnet->ctl_hdr) {
if (!net_eq(net, &init_net))
kfree(table);
return -ENOMEM;
}
return 0;
}
static void br_netfilter_sysctl_exit_net(struct net *net,
struct brnf_net *brnet)
{
struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(brnet->ctl_hdr);
if (!net_eq(net, &init_net))
kfree(table);
}
static int __net_init brnf_init_net(struct net *net)
{
return br_netfilter_sysctl_init_net(net);
}
#endif
static void __net_exit brnf_exit_net(struct net *net)
{
struct brnf_net *brnet;
brnet = net_generic(net, brnf_net_id);
if (brnet->enabled) {
nf_unregister_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
brnet->enabled = false;
}
#ifdef CONFIG_SYSCTL
br_netfilter_sysctl_exit_net(net, brnet);
#endif
}
static struct pernet_operations brnf_net_ops __read_mostly = {
#ifdef CONFIG_SYSCTL
.init = brnf_init_net,
#endif
.exit = brnf_exit_net,
.id = &brnf_net_id,
.size = sizeof(struct brnf_net),
};
static int __init br_netfilter_init(void)
{
int ret;
@ -1079,16 +1167,6 @@ static int __init br_netfilter_init(void)
return ret;
}
#ifdef CONFIG_SYSCTL
brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
if (brnf_sysctl_header == NULL) {
printk(KERN_WARNING
"br_netfilter: can't register to sysctl.\n");
unregister_netdevice_notifier(&brnf_notifier);
unregister_pernet_subsys(&brnf_net_ops);
return -ENOMEM;
}
#endif
RCU_INIT_POINTER(nf_br_ops, &br_ops);
printk(KERN_NOTICE "Bridge firewalling registered\n");
return 0;
@ -1099,9 +1177,6 @@ static void __exit br_netfilter_fini(void)
RCU_INIT_POINTER(nf_br_ops, NULL);
unregister_netdevice_notifier(&brnf_notifier);
unregister_pernet_subsys(&brnf_net_ops);
#ifdef CONFIG_SYSCTL
unregister_net_sysctl_table(brnf_sysctl_header);
#endif
}
module_init(br_netfilter_init);

View File

@ -224,7 +224,7 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
nf_bridge = nf_bridge_alloc(skb);
if (!nf_bridge)
return NF_DROP;
if (!setup_pre_routing(skb))
if (!setup_pre_routing(skb, state->net))
return NF_DROP;
nf_bridge = nf_bridge_info_get(skb);

View File

@ -331,6 +331,8 @@ static int nf_ct_bridge_frag_restore(struct sk_buff *skb,
}
if (data->vlan_present)
__vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
else if (skb_vlan_tag_present(skb))
__vlan_hwaccel_clear_tag(skb);
skb_copy_to_linear_data_offset(skb, -ETH_HLEN, data->mac, ETH_HLEN);
skb_reset_mac_header(skb);

View File

@ -473,6 +473,7 @@ error:
*info = htonl((pp_ptr-iph)<<24);
return -EINVAL;
}
EXPORT_SYMBOL(__ip_options_compile);
int ip_options_compile(struct net *net,
struct ip_options *opt, struct sk_buff *skb)

View File

@ -3,258 +3,11 @@
* Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/tcp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_SYNPROXY.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_conntrack_ecache.h>
static struct iphdr *
synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr,
__be32 daddr)
{
struct iphdr *iph;
skb_reset_network_header(skb);
iph = skb_put(skb, sizeof(*iph));
iph->version = 4;
iph->ihl = sizeof(*iph) / 4;
iph->tos = 0;
iph->id = 0;
iph->frag_off = htons(IP_DF);
iph->ttl = net->ipv4.sysctl_ip_default_ttl;
iph->protocol = IPPROTO_TCP;
iph->check = 0;
iph->saddr = saddr;
iph->daddr = daddr;
return iph;
}
static void
synproxy_send_tcp(struct net *net,
const struct sk_buff *skb, struct sk_buff *nskb,
struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
struct iphdr *niph, struct tcphdr *nth,
unsigned int tcp_hdr_size)
{
nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (unsigned char *)nth - nskb->head;
nskb->csum_offset = offsetof(struct tcphdr, check);
skb_dst_set_noref(nskb, skb_dst(skb));
nskb->protocol = htons(ETH_P_IP);
if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
goto free_nskb;
if (nfct) {
nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
nf_conntrack_get(nfct);
}
ip_local_out(net, nskb->sk, nskb);
return;
free_nskb:
kfree_skb(nskb);
}
static void
synproxy_send_client_synack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct iphdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
u16 mss = opts->mss;
iph = ip_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (nskb == NULL)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(__cookie_v4_init_sequence(iph, th, &mss));
nth->ack_seq = htonl(ntohl(th->seq) + 1);
tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK;
if (opts->options & XT_SYNPROXY_OPT_ECN)
tcp_flag_word(nth) |= TCP_FLAG_ECE;
nth->doff = tcp_hdr_size / 4;
nth->window = 0;
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
}
static void
synproxy_send_server_syn(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts, u32 recv_seq)
{
struct synproxy_net *snet = synproxy_pernet(net);
struct sk_buff *nskb;
struct iphdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ip_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (nskb == NULL)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(recv_seq - 1);
/* ack_seq is used to relay our ISN to the synproxy hook to initialize
* sequence number translation once a connection tracking entry exists.
*/
nth->ack_seq = htonl(ntohl(th->ack_seq) - 1);
tcp_flag_word(nth) = TCP_FLAG_SYN;
if (opts->options & XT_SYNPROXY_OPT_ECN)
tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR;
nth->doff = tcp_hdr_size / 4;
nth->window = th->window;
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
niph, nth, tcp_hdr_size);
}
static void
synproxy_send_server_ack(struct net *net,
const struct ip_ct_tcp *state,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct iphdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ip_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (nskb == NULL)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(ntohl(th->ack_seq));
nth->ack_seq = htonl(ntohl(th->seq) + 1);
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin);
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
}
static void
synproxy_send_client_ack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct iphdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ip_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (nskb == NULL)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(ntohl(th->seq) + 1);
nth->ack_seq = th->ack_seq;
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
nth->window = htons(ntohs(th->window) >> opts->wscale);
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
}
static bool
synproxy_recv_client_ack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
struct synproxy_options *opts, u32 recv_seq)
{
struct synproxy_net *snet = synproxy_pernet(net);
int mss;
mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1);
if (mss == 0) {
this_cpu_inc(snet->stats->cookie_invalid);
return false;
}
this_cpu_inc(snet->stats->cookie_valid);
opts->mss = mss;
opts->options |= XT_SYNPROXY_OPT_MSS;
if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy_check_timestamp_cookie(opts);
synproxy_send_server_syn(net, skb, th, opts, recv_seq);
return true;
}
#include <net/netfilter/nf_synproxy.h>
static unsigned int
synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
@ -306,135 +59,6 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
static unsigned int ipv4_synproxy_hook(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *nhs)
{
struct net *net = nhs->net;
struct synproxy_net *snet = synproxy_pernet(net);
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
struct nf_conn_synproxy *synproxy;
struct synproxy_options opts = {};
const struct ip_ct_tcp *state;
struct tcphdr *th, _th;
unsigned int thoff;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return NF_ACCEPT;
synproxy = nfct_synproxy(ct);
if (synproxy == NULL)
return NF_ACCEPT;
if (nf_is_loopback_packet(skb) ||
ip_hdr(skb)->protocol != IPPROTO_TCP)
return NF_ACCEPT;
thoff = ip_hdrlen(skb);
th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
if (th == NULL)
return NF_DROP;
state = &ct->proto.tcp;
switch (state->state) {
case TCP_CONNTRACK_CLOSE:
if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
ntohl(th->seq) + 1);
break;
}
if (!th->syn || th->ack ||
CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
break;
/* Reopened connection - reset the sequence number and timestamp
* adjustments, they will get initialized once the connection is
* reestablished.
*/
nf_ct_seqadj_init(ct, ctinfo, 0);
synproxy->tsoff = 0;
this_cpu_inc(snet->stats->conn_reopened);
/* fall through */
case TCP_CONNTRACK_SYN_SENT:
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (!th->syn && th->ack &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
/* Keep-Alives are sent with SEG.SEQ = SND.NXT-1,
* therefore we need to add 1 to make the SYN sequence
* number match the one of first SYN.
*/
if (synproxy_recv_client_ack(net, skb, th, &opts,
ntohl(th->seq) + 1)) {
this_cpu_inc(snet->stats->cookie_retrans);
consume_skb(skb);
return NF_STOLEN;
} else {
return NF_DROP;
}
}
synproxy->isn = ntohl(th->ack_seq);
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy->its = opts.tsecr;
nf_conntrack_event_cache(IPCT_SYNPROXY, ct);
break;
case TCP_CONNTRACK_SYN_RECV:
if (!th->syn || !th->ack)
break;
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) {
synproxy->tsoff = opts.tsval - synproxy->its;
nf_conntrack_event_cache(IPCT_SYNPROXY, ct);
}
opts.options &= ~(XT_SYNPROXY_OPT_MSS |
XT_SYNPROXY_OPT_WSCALE |
XT_SYNPROXY_OPT_SACK_PERM);
swap(opts.tsval, opts.tsecr);
synproxy_send_server_ack(net, state, skb, th, &opts);
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
nf_conntrack_event_cache(IPCT_SEQADJ, ct);
swap(opts.tsval, opts.tsecr);
synproxy_send_client_ack(net, skb, th, &opts);
consume_skb(skb);
return NF_STOLEN;
default:
break;
}
synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
return NF_ACCEPT;
}
static const struct nf_hook_ops ipv4_synproxy_ops[] = {
{
.hook = ipv4_synproxy_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
},
{
.hook = ipv4_synproxy_hook,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
},
};
static int synproxy_tg4_check(const struct xt_tgchk_param *par)
{
struct synproxy_net *snet = synproxy_pernet(par->net);
@ -449,16 +73,12 @@ static int synproxy_tg4_check(const struct xt_tgchk_param *par)
if (err)
return err;
if (snet->hook_ref4 == 0) {
err = nf_register_net_hooks(par->net, ipv4_synproxy_ops,
ARRAY_SIZE(ipv4_synproxy_ops));
if (err) {
nf_ct_netns_put(par->net, par->family);
return err;
}
err = nf_synproxy_ipv4_init(snet, par->net);
if (err) {
nf_ct_netns_put(par->net, par->family);
return err;
}
snet->hook_ref4++;
return err;
}
@ -466,10 +86,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par)
{
struct synproxy_net *snet = synproxy_pernet(par->net);
snet->hook_ref4--;
if (snet->hook_ref4 == 0)
nf_unregister_net_hooks(par->net, ipv4_synproxy_ops,
ARRAY_SIZE(ipv4_synproxy_ops));
nf_synproxy_ipv4_fini(snet, par->net);
nf_ct_netns_put(par->net, par->family);
}

View File

@ -2,7 +2,7 @@
/*
* 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT .
*
* Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Copyright (C) 2003 Jozsef Kadlecsik <kadlec@netfilter.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>

View File

@ -6,7 +6,7 @@
* Copyright (c) 2006-2012 Patrick McHardy <kaber@trash.net>
*
* Based on the 'brute force' H.323 NAT module by
* Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Jozsef Kadlecsik <kadlec@netfilter.org>
*/
#include <linux/module.h>

View File

@ -234,12 +234,18 @@ static const struct nf_ipv6_ops ipv6ops = {
.route_me_harder = ip6_route_me_harder,
.dev_get_saddr = ipv6_dev_get_saddr,
.route = __nf_ip6_route,
#if IS_ENABLED(CONFIG_SYN_COOKIES)
.cookie_init_sequence = __cookie_v6_init_sequence,
.cookie_v6_check = __cookie_v6_check,
#endif
#endif
.route_input = ip6_route_input,
.fragment = ip6_fragment,
.reroute = nf_ip6_reroute,
#if IS_MODULE(CONFIG_IPV6)
#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
.br_defrag = nf_ct_frag6_gather,
#endif
#if IS_MODULE(CONFIG_IPV6)
.br_fragment = br_ip6_fragment,
#endif
};

View File

@ -3,272 +3,11 @@
* Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ip6_checksum.h>
#include <net/ip6_route.h>
#include <net/tcp.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_SYNPROXY.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_conntrack_ecache.h>
static struct ipv6hdr *
synproxy_build_ip(struct net *net, struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr)
{
struct ipv6hdr *iph;
skb_reset_network_header(skb);
iph = skb_put(skb, sizeof(*iph));
ip6_flow_hdr(iph, 0, 0);
iph->hop_limit = net->ipv6.devconf_all->hop_limit;
iph->nexthdr = IPPROTO_TCP;
iph->saddr = *saddr;
iph->daddr = *daddr;
return iph;
}
static void
synproxy_send_tcp(struct net *net,
const struct sk_buff *skb, struct sk_buff *nskb,
struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
struct ipv6hdr *niph, struct tcphdr *nth,
unsigned int tcp_hdr_size)
{
struct dst_entry *dst;
struct flowi6 fl6;
nth->check = ~tcp_v6_check(tcp_hdr_size, &niph->saddr, &niph->daddr, 0);
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum_start = (unsigned char *)nth - nskb->head;
nskb->csum_offset = offsetof(struct tcphdr, check);
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
fl6.saddr = niph->saddr;
fl6.daddr = niph->daddr;
fl6.fl6_sport = nth->source;
fl6.fl6_dport = nth->dest;
security_skb_classify_flow((struct sk_buff *)skb, flowi6_to_flowi(&fl6));
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
dst_release(dst);
goto free_nskb;
}
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
if (IS_ERR(dst))
goto free_nskb;
skb_dst_set(nskb, dst);
if (nfct) {
nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
nf_conntrack_get(nfct);
}
ip6_local_out(net, nskb->sk, nskb);
return;
free_nskb:
kfree_skb(nskb);
}
static void
synproxy_send_client_synack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct ipv6hdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
u16 mss = opts->mss;
iph = ipv6_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (nskb == NULL)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(__cookie_v6_init_sequence(iph, th, &mss));
nth->ack_seq = htonl(ntohl(th->seq) + 1);
tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK;
if (opts->options & XT_SYNPROXY_OPT_ECN)
tcp_flag_word(nth) |= TCP_FLAG_ECE;
nth->doff = tcp_hdr_size / 4;
nth->window = 0;
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
}
static void
synproxy_send_server_syn(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts, u32 recv_seq)
{
struct synproxy_net *snet = synproxy_pernet(net);
struct sk_buff *nskb;
struct ipv6hdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ipv6_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (nskb == NULL)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(recv_seq - 1);
/* ack_seq is used to relay our ISN to the synproxy hook to initialize
* sequence number translation once a connection tracking entry exists.
*/
nth->ack_seq = htonl(ntohl(th->ack_seq) - 1);
tcp_flag_word(nth) = TCP_FLAG_SYN;
if (opts->options & XT_SYNPROXY_OPT_ECN)
tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR;
nth->doff = tcp_hdr_size / 4;
nth->window = th->window;
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
niph, nth, tcp_hdr_size);
}
static void
synproxy_send_server_ack(struct net *net,
const struct ip_ct_tcp *state,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct ipv6hdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ipv6_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (nskb == NULL)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, &iph->daddr, &iph->saddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->dest;
nth->dest = th->source;
nth->seq = htonl(ntohl(th->ack_seq));
nth->ack_seq = htonl(ntohl(th->seq) + 1);
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin);
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
}
static void
synproxy_send_client_ack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
struct ipv6hdr *iph, *niph;
struct tcphdr *nth;
unsigned int tcp_hdr_size;
iph = ipv6_hdr(skb);
tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
GFP_ATOMIC);
if (nskb == NULL)
return;
skb_reserve(nskb, MAX_TCP_HEADER);
niph = synproxy_build_ip(net, nskb, &iph->saddr, &iph->daddr);
skb_reset_transport_header(nskb);
nth = skb_put(nskb, tcp_hdr_size);
nth->source = th->source;
nth->dest = th->dest;
nth->seq = htonl(ntohl(th->seq) + 1);
nth->ack_seq = th->ack_seq;
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
nth->window = htons(ntohs(th->window) >> opts->wscale);
nth->check = 0;
nth->urg_ptr = 0;
synproxy_build_options(nth, opts);
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
}
static bool
synproxy_recv_client_ack(struct net *net,
const struct sk_buff *skb, const struct tcphdr *th,
struct synproxy_options *opts, u32 recv_seq)
{
struct synproxy_net *snet = synproxy_pernet(net);
int mss;
mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1);
if (mss == 0) {
this_cpu_inc(snet->stats->cookie_invalid);
return false;
}
this_cpu_inc(snet->stats->cookie_valid);
opts->mss = mss;
opts->options |= XT_SYNPROXY_OPT_MSS;
if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy_check_timestamp_cookie(opts);
synproxy_send_server_syn(net, skb, th, opts, recv_seq);
return true;
}
#include <net/netfilter/nf_synproxy.h>
static unsigned int
synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
@ -304,13 +43,14 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
XT_SYNPROXY_OPT_SACK_PERM |
XT_SYNPROXY_OPT_ECN);
synproxy_send_client_synack(net, skb, th, &opts);
synproxy_send_client_synack_ipv6(net, skb, th, &opts);
consume_skb(skb);
return NF_STOLEN;
} else if (th->ack && !(th->fin || th->rst || th->syn)) {
/* ACK from client */
if (synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq))) {
if (synproxy_recv_client_ack_ipv6(net, skb, th, &opts,
ntohl(th->seq))) {
consume_skb(skb);
return NF_STOLEN;
} else {
@ -321,141 +61,6 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
static unsigned int ipv6_synproxy_hook(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *nhs)
{
struct net *net = nhs->net;
struct synproxy_net *snet = synproxy_pernet(net);
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
struct nf_conn_synproxy *synproxy;
struct synproxy_options opts = {};
const struct ip_ct_tcp *state;
struct tcphdr *th, _th;
__be16 frag_off;
u8 nexthdr;
int thoff;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return NF_ACCEPT;
synproxy = nfct_synproxy(ct);
if (synproxy == NULL)
return NF_ACCEPT;
if (nf_is_loopback_packet(skb))
return NF_ACCEPT;
nexthdr = ipv6_hdr(skb)->nexthdr;
thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
&frag_off);
if (thoff < 0 || nexthdr != IPPROTO_TCP)
return NF_ACCEPT;
th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
if (th == NULL)
return NF_DROP;
state = &ct->proto.tcp;
switch (state->state) {
case TCP_CONNTRACK_CLOSE:
if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
ntohl(th->seq) + 1);
break;
}
if (!th->syn || th->ack ||
CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
break;
/* Reopened connection - reset the sequence number and timestamp
* adjustments, they will get initialized once the connection is
* reestablished.
*/
nf_ct_seqadj_init(ct, ctinfo, 0);
synproxy->tsoff = 0;
this_cpu_inc(snet->stats->conn_reopened);
/* fall through */
case TCP_CONNTRACK_SYN_SENT:
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (!th->syn && th->ack &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
/* Keep-Alives are sent with SEG.SEQ = SND.NXT-1,
* therefore we need to add 1 to make the SYN sequence
* number match the one of first SYN.
*/
if (synproxy_recv_client_ack(net, skb, th, &opts,
ntohl(th->seq) + 1)) {
this_cpu_inc(snet->stats->cookie_retrans);
consume_skb(skb);
return NF_STOLEN;
} else {
return NF_DROP;
}
}
synproxy->isn = ntohl(th->ack_seq);
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
synproxy->its = opts.tsecr;
nf_conntrack_event_cache(IPCT_SYNPROXY, ct);
break;
case TCP_CONNTRACK_SYN_RECV:
if (!th->syn || !th->ack)
break;
if (!synproxy_parse_options(skb, thoff, th, &opts))
return NF_DROP;
if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) {
synproxy->tsoff = opts.tsval - synproxy->its;
nf_conntrack_event_cache(IPCT_SYNPROXY, ct);
}
opts.options &= ~(XT_SYNPROXY_OPT_MSS |
XT_SYNPROXY_OPT_WSCALE |
XT_SYNPROXY_OPT_SACK_PERM);
swap(opts.tsval, opts.tsecr);
synproxy_send_server_ack(net, state, skb, th, &opts);
nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
nf_conntrack_event_cache(IPCT_SEQADJ, ct);
swap(opts.tsval, opts.tsecr);
synproxy_send_client_ack(net, skb, th, &opts);
consume_skb(skb);
return NF_STOLEN;
default:
break;
}
synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
return NF_ACCEPT;
}
static const struct nf_hook_ops ipv6_synproxy_ops[] = {
{
.hook = ipv6_synproxy_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
},
{
.hook = ipv6_synproxy_hook,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
},
};
static int synproxy_tg6_check(const struct xt_tgchk_param *par)
{
struct synproxy_net *snet = synproxy_pernet(par->net);
@ -471,16 +76,12 @@ static int synproxy_tg6_check(const struct xt_tgchk_param *par)
if (err)
return err;
if (snet->hook_ref6 == 0) {
err = nf_register_net_hooks(par->net, ipv6_synproxy_ops,
ARRAY_SIZE(ipv6_synproxy_ops));
if (err) {
nf_ct_netns_put(par->net, par->family);
return err;
}
err = nf_synproxy_ipv6_init(snet, par->net);
if (err) {
nf_ct_netns_put(par->net, par->family);
return err;
}
snet->hook_ref6++;
return err;
}
@ -488,10 +89,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par)
{
struct synproxy_net *snet = synproxy_pernet(par->net);
snet->hook_ref6--;
if (snet->hook_ref6 == 0)
nf_unregister_net_hooks(par->net, ipv6_synproxy_ops,
ARRAY_SIZE(ipv6_synproxy_ops));
nf_synproxy_ipv6_fini(snet, par->net);
nf_ct_netns_put(par->net, par->family);
}

View File

@ -2,7 +2,7 @@
/*
* IPv6 raw table, a port of the IPv4 raw table to IPv6
*
* Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Copyright (C) 2003 Jozsef Kadlecsik <kadlec@netfilter.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>

View File

@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
#ifndef __IP_SET_BITMAP_IP_GEN_H
#define __IP_SET_BITMAP_IP_GEN_H

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
*/
/* Kernel module implementing an IP set type: the bitmap:ip type */
@ -28,7 +28,7 @@
#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("bitmap:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:ip");

View File

@ -2,7 +2,6 @@
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Kernel module implementing an IP set type: the bitmap:ip,mac type */
@ -28,7 +27,7 @@
#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("bitmap:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:ip,mac");

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the bitmap:port type */
@ -23,7 +22,7 @@
#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("bitmap:port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:port");

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
*/
/* Kernel module for IP set management */
@ -48,7 +48,7 @@ static unsigned int max_sets;
module_param(max_sets, int, 0600);
MODULE_PARM_DESC(max_sets, "maximal number of sets");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
@ -1290,11 +1290,13 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
struct nlattr *attr = (void *)nlh + min_len;
u32 dump_type;
ip_set_id_t index;
int ret;
/* Second pass, so parser can't fail */
nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, attr,
nlh->nlmsg_len - min_len, ip_set_setname_policy,
NULL);
ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, attr,
nlh->nlmsg_len - min_len,
ip_set_setname_policy, NULL);
if (ret)
return ret;
cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
if (cda[IPSET_ATTR_SETNAME]) {
@ -1541,10 +1543,14 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
cmdattr = (void *)&errmsg->msg + min_len;
nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, cmdattr,
nlh->nlmsg_len - min_len,
ip_set_adt_policy, NULL);
ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, cmdattr,
nlh->nlmsg_len - min_len,
ip_set_adt_policy, NULL);
if (ret) {
nlmsg_free(skb2);
return ret;
}
errline = nla_data(cda[IPSET_ATTR_LINENO]);
*errline = lineno;
@ -1558,10 +1564,12 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
return ret;
}
static int ip_set_uadd(struct net *net, struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[],
struct netlink_ext_ack *extack)
static int ip_set_ad(struct net *net, struct sock *ctnl,
struct sk_buff *skb,
enum ipset_adt adt,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[],
struct netlink_ext_ack *extack)
{
struct ip_set_net *inst = ip_set_pernet(net);
struct ip_set *set;
@ -1590,18 +1598,17 @@ static int ip_set_uadd(struct net *net, struct sock *ctnl, struct sk_buff *skb,
if (attr[IPSET_ATTR_DATA]) {
if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags,
ret = call_ad(ctnl, skb, set, tb, adt, flags,
use_lineno);
} else {
int nla_rem;
nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
memset(tb, 0, sizeof(tb));
if (nla_type(nla) != IPSET_ATTR_DATA ||
!flag_nested(nla) ||
nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, nla, set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
ret = call_ad(ctnl, skb, set, tb, IPSET_ADD,
ret = call_ad(ctnl, skb, set, tb, adt,
flags, use_lineno);
if (ret < 0)
return ret;
@ -1610,56 +1617,22 @@ static int ip_set_uadd(struct net *net, struct sock *ctnl, struct sk_buff *skb,
return ret;
}
static int ip_set_udel(struct net *net, struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
static int ip_set_uadd(struct net *net, struct sock *ctnl,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const attr[],
struct netlink_ext_ack *extack)
{
struct ip_set_net *inst = ip_set_pernet(net);
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
const struct nlattr *nla;
u32 flags = flag_exist(nlh);
bool use_lineno;
int ret = 0;
return ip_set_ad(net, ctnl, skb,
IPSET_ADD, nlh, attr, extack);
}
if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!((attr[IPSET_ATTR_DATA] != NULL) ^
(attr[IPSET_ATTR_ADT] != NULL)) ||
(attr[IPSET_ATTR_DATA] &&
!flag_nested(attr[IPSET_ATTR_DATA])) ||
(attr[IPSET_ATTR_ADT] &&
(!flag_nested(attr[IPSET_ATTR_ADT]) ||
!attr[IPSET_ATTR_LINENO]))))
return -IPSET_ERR_PROTOCOL;
set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
if (!set)
return -ENOENT;
use_lineno = !!attr[IPSET_ATTR_LINENO];
if (attr[IPSET_ATTR_DATA]) {
if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags,
use_lineno);
} else {
int nla_rem;
nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
memset(tb, 0, sizeof(*tb));
if (nla_type(nla) != IPSET_ATTR_DATA ||
!flag_nested(nla) ||
nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, nla, set->type->adt_policy, NULL))
return -IPSET_ERR_PROTOCOL;
ret = call_ad(ctnl, skb, set, tb, IPSET_DEL,
flags, use_lineno);
if (ret < 0)
return ret;
}
}
return ret;
static int ip_set_udel(struct net *net, struct sock *ctnl,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const attr[],
struct netlink_ext_ack *extack)
{
return ip_set_ad(net, ctnl, skb,
IPSET_DEL, nlh, attr, extack);
}
static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,

View File

@ -1,5 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* Get Layer-4 data from the packets */

View File

@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
#ifndef _IP_SET_HASH_GEN_H
#define _IP_SET_HASH_GEN_H
@ -622,7 +621,7 @@ retry:
goto cleanup;
}
m->size = AHASH_INIT_SIZE;
extsize = ext_size(AHASH_INIT_SIZE, dsize);
extsize += ext_size(AHASH_INIT_SIZE, dsize);
RCU_INIT_POINTER(hbucket(t, key), m);
} else if (m->pos >= m->size) {
struct hbucket *ht;

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:ip type */
@ -27,7 +26,7 @@
#define IPSET_TYPE_REV_MAX 4 /* skbinfo support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("hash:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip");

View File

@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Copyright (C) 2013 Smoothwall Ltd. <vytas.dauksa@smoothwall.net>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:ip,mark type */

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:ip,port type */
@ -29,7 +28,7 @@
#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("hash:ip,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,port");

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:ip,port,ip type */
@ -29,7 +28,7 @@
#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("hash:ip,port,ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,port,ip");

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:ip,port,net type */
@ -31,7 +30,7 @@
#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:ip,port,net");

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2014 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2014 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:mac type */
@ -20,7 +19,7 @@
#define IPSET_TYPE_REV_MAX 0
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("hash:mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:mac");

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:net type */
@ -28,7 +27,7 @@
#define IPSET_TYPE_REV_MAX 6 /* skbinfo mapping support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("hash:net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net");

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2011-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2011-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:net,iface type */
@ -29,7 +28,7 @@
#define IPSET_TYPE_REV_MAX 6 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net,iface");

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
*/

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:net,port type */
@ -30,7 +29,7 @@
#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("hash:net,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_hash:net,port");

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the hash:ip,port,net type */

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2008-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
/* Copyright (C) 2008-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
/* Kernel module implementing an IP set type: the list:set type */
@ -19,7 +18,7 @@
#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_list:set");

View File

@ -749,9 +749,6 @@ begin:
continue;
}
if (nf_ct_is_dying(ct))
continue;
if (nf_ct_key_equal(h, tuple, zone, net))
return h;
}
@ -777,20 +774,24 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
struct nf_conn *ct;
rcu_read_lock();
begin:
h = ____nf_conntrack_find(net, zone, tuple, hash);
if (h) {
/* We have a candidate that matches the tuple we're interested
* in, try to obtain a reference and re-check tuple
*/
ct = nf_ct_tuplehash_to_ctrack(h);
if (unlikely(nf_ct_is_dying(ct) ||
!atomic_inc_not_zero(&ct->ct_general.use)))
h = NULL;
else {
if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
nf_ct_put(ct);
goto begin;
}
if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
if (likely(nf_ct_key_equal(h, tuple, zone, net)))
goto found;
/* TYPESAFE_BY_RCU recycled the candidate */
nf_ct_put(ct);
}
h = NULL;
}
found:
rcu_read_unlock();
return h;

View File

@ -6,7 +6,7 @@
* Copyright (c) 2006-2012 Patrick McHardy <kaber@trash.net>
*
* Based on the 'brute force' H.323 connection tracking module by
* Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Jozsef Kadlecsik <kadlec@netfilter.org>
*
* For more information, please see http://nath323.sourceforge.net/
*/

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2002-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* (C) 2002-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
* (C) 2006-2012 Patrick McHardy <kaber@trash.net>
*/

File diff suppressed because it is too large Load Diff

View File

@ -3870,6 +3870,7 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
[NFTA_SET_ELEM_DATA] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_FLAGS] = { .type = NLA_U32 },
[NFTA_SET_ELEM_TIMEOUT] = { .type = NLA_U64 },
[NFTA_SET_ELEM_EXPIRATION] = { .type = NLA_U64 },
[NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY,
.len = NFT_USERDATA_MAXLEN },
[NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED },
@ -4323,7 +4324,7 @@ static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
const u32 *key, const u32 *data,
u64 timeout, gfp_t gfp)
u64 timeout, u64 expiration, gfp_t gfp)
{
struct nft_set_ext *ext;
void *elem;
@ -4338,9 +4339,11 @@ void *nft_set_elem_init(const struct nft_set *set,
memcpy(nft_set_ext_key(ext), key, set->klen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
memcpy(nft_set_ext_data(ext), data, set->dlen);
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION))
*nft_set_ext_expiration(ext) =
get_jiffies_64() + timeout;
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
*nft_set_ext_expiration(ext) = get_jiffies_64() + expiration;
if (expiration == 0)
*nft_set_ext_expiration(ext) += timeout;
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
*nft_set_ext_timeout(ext) = timeout;
@ -4405,6 +4408,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
struct nft_trans *trans;
u32 flags = 0;
u64 timeout;
u64 expiration;
u8 ulen;
int err;
@ -4448,6 +4452,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
timeout = set->timeout;
}
expiration = 0;
if (nla[NFTA_SET_ELEM_EXPIRATION] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
err = nf_msecs_to_jiffies64(nla[NFTA_SET_ELEM_EXPIRATION],
&expiration);
if (err)
return err;
}
err = nft_data_init(ctx, &elem.key.val, sizeof(elem.key), &d1,
nla[NFTA_SET_ELEM_KEY]);
if (err < 0)
@ -4530,7 +4544,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -ENOMEM;
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, data.data,
timeout, GFP_KERNEL);
timeout, expiration, GFP_KERNEL);
if (elem.priv == NULL)
goto err3;
@ -4732,7 +4746,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
err = -ENOMEM;
elem.priv = nft_set_elem_init(set, &tmpl, elem.key.val.data, NULL, 0,
GFP_KERNEL);
0, GFP_KERNEL);
if (elem.priv == NULL)
goto err2;

View File

@ -21,6 +21,7 @@
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
struct nft_ct {
enum nft_ct_keys key:8;
@ -1153,6 +1154,135 @@ static struct nft_object_type nft_ct_helper_obj_type __read_mostly = {
.owner = THIS_MODULE,
};
struct nft_ct_expect_obj {
u16 l3num;
__be16 dport;
u8 l4proto;
u8 size;
u32 timeout;
};
static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
struct nft_ct_expect_obj *priv = nft_obj_data(obj);
if (!tb[NFTA_CT_EXPECT_L4PROTO] ||
!tb[NFTA_CT_EXPECT_DPORT] ||
!tb[NFTA_CT_EXPECT_TIMEOUT] ||
!tb[NFTA_CT_EXPECT_SIZE])
return -EINVAL;
priv->l3num = ctx->family;
if (tb[NFTA_CT_EXPECT_L3PROTO])
priv->l3num = ntohs(nla_get_be16(tb[NFTA_CT_EXPECT_L3PROTO]));
priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
priv->dport = nla_get_be16(tb[NFTA_CT_EXPECT_DPORT]);
priv->timeout = nla_get_u32(tb[NFTA_CT_EXPECT_TIMEOUT]);
priv->size = nla_get_u8(tb[NFTA_CT_EXPECT_SIZE]);
return nf_ct_netns_get(ctx->net, ctx->family);
}
static void nft_ct_expect_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
nf_ct_netns_put(ctx->net, ctx->family);
}
static int nft_ct_expect_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
const struct nft_ct_expect_obj *priv = nft_obj_data(obj);
if (nla_put_be16(skb, NFTA_CT_EXPECT_L3PROTO, htons(priv->l3num)) ||
nla_put_u8(skb, NFTA_CT_EXPECT_L4PROTO, priv->l4proto) ||
nla_put_be16(skb, NFTA_CT_EXPECT_DPORT, priv->dport) ||
nla_put_u32(skb, NFTA_CT_EXPECT_TIMEOUT, priv->timeout) ||
nla_put_u8(skb, NFTA_CT_EXPECT_SIZE, priv->size))
return -1;
return 0;
}
static void nft_ct_expect_obj_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_ct_expect_obj *priv = nft_obj_data(obj);
struct nf_conntrack_expect *exp;
enum ip_conntrack_info ctinfo;
struct nf_conn_help *help;
enum ip_conntrack_dir dir;
u16 l3num = priv->l3num;
struct nf_conn *ct;
ct = nf_ct_get(pkt->skb, &ctinfo);
if (!ct || ctinfo == IP_CT_UNTRACKED) {
regs->verdict.code = NFT_BREAK;
return;
}
dir = CTINFO2DIR(ctinfo);
help = nfct_help(ct);
if (!help)
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (!help) {
regs->verdict.code = NF_DROP;
return;
}
if (help->expecting[NF_CT_EXPECT_CLASS_DEFAULT] >= priv->size) {
regs->verdict.code = NFT_BREAK;
return;
}
if (l3num == NFPROTO_INET)
l3num = nf_ct_l3num(ct);
exp = nf_ct_expect_alloc(ct);
if (exp == NULL) {
regs->verdict.code = NF_DROP;
return;
}
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, l3num,
&ct->tuplehash[!dir].tuple.src.u3,
&ct->tuplehash[!dir].tuple.dst.u3,
priv->l4proto, NULL, &priv->dport);
exp->timeout.expires = jiffies + priv->timeout * HZ;
if (nf_ct_expect_related(exp) != 0)
regs->verdict.code = NF_DROP;
}
static const struct nla_policy nft_ct_expect_policy[NFTA_CT_EXPECT_MAX + 1] = {
[NFTA_CT_EXPECT_L3PROTO] = { .type = NLA_U16 },
[NFTA_CT_EXPECT_L4PROTO] = { .type = NLA_U8 },
[NFTA_CT_EXPECT_DPORT] = { .type = NLA_U16 },
[NFTA_CT_EXPECT_TIMEOUT] = { .type = NLA_U32 },
[NFTA_CT_EXPECT_SIZE] = { .type = NLA_U8 },
};
static struct nft_object_type nft_ct_expect_obj_type;
static const struct nft_object_ops nft_ct_expect_obj_ops = {
.type = &nft_ct_expect_obj_type,
.size = sizeof(struct nft_ct_expect_obj),
.eval = nft_ct_expect_obj_eval,
.init = nft_ct_expect_obj_init,
.destroy = nft_ct_expect_obj_destroy,
.dump = nft_ct_expect_obj_dump,
};
static struct nft_object_type nft_ct_expect_obj_type __read_mostly = {
.type = NFT_OBJECT_CT_EXPECT,
.ops = &nft_ct_expect_obj_ops,
.maxattr = NFTA_CT_EXPECT_MAX,
.policy = nft_ct_expect_policy,
.owner = THIS_MODULE,
};
static int __init nft_ct_module_init(void)
{
int err;
@ -1170,17 +1300,23 @@ static int __init nft_ct_module_init(void)
err = nft_register_obj(&nft_ct_helper_obj_type);
if (err < 0)
goto err2;
err = nft_register_obj(&nft_ct_expect_obj_type);
if (err < 0)
goto err3;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
err = nft_register_obj(&nft_ct_timeout_obj_type);
if (err < 0)
goto err3;
goto err4;
#endif
return 0;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
err4:
nft_unregister_obj(&nft_ct_expect_obj_type);
#endif
err3:
nft_unregister_obj(&nft_ct_helper_obj_type);
#endif
err2:
nft_unregister_expr(&nft_notrack_type);
err1:
@ -1193,6 +1329,7 @@ static void __exit nft_ct_module_exit(void)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
nft_unregister_obj(&nft_ct_timeout_obj_type);
#endif
nft_unregister_obj(&nft_ct_expect_obj_type);
nft_unregister_obj(&nft_ct_helper_obj_type);
nft_unregister_expr(&nft_notrack_type);
nft_unregister_expr(&nft_ct_type);
@ -1207,3 +1344,4 @@ MODULE_ALIAS_NFT_EXPR("ct");
MODULE_ALIAS_NFT_EXPR("notrack");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT);
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_EXPECT);

View File

@ -56,7 +56,7 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
elem = nft_set_elem_init(set, &priv->tmpl,
&regs->data[priv->sreg_key],
&regs->data[priv->sreg_data],
timeout, GFP_ATOMIC);
timeout, 0, GFP_ATOMIC);
if (elem == NULL)
goto err1;

View File

@ -59,6 +59,103 @@ err:
regs->verdict.code = NFT_BREAK;
}
/* find the offset to specified option.
*
* If target header is found, its offset is set in *offset and return option
* number. Otherwise, return negative error.
*
* If the first fragment doesn't contain the End of Options it is considered
* invalid.
*/
static int ipv4_find_option(struct net *net, struct sk_buff *skb,
unsigned int *offset, int target)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options *opt = (struct ip_options *)optbuf;
struct iphdr *iph, _iph;
unsigned int start;
bool found = false;
__be32 info;
int optlen;
iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
if (!iph)
return -EBADMSG;
start = sizeof(struct iphdr);
optlen = iph->ihl * 4 - (int)sizeof(struct iphdr);
if (optlen <= 0)
return -ENOENT;
memset(opt, 0, sizeof(struct ip_options));
/* Copy the options since __ip_options_compile() modifies
* the options.
*/
if (skb_copy_bits(skb, start, opt->__data, optlen))
return -EBADMSG;
opt->optlen = optlen;
if (__ip_options_compile(net, opt, NULL, &info))
return -EBADMSG;
switch (target) {
case IPOPT_SSRR:
case IPOPT_LSRR:
if (!opt->srr)
break;
found = target == IPOPT_SSRR ? opt->is_strictroute :
!opt->is_strictroute;
if (found)
*offset = opt->srr + start;
break;
case IPOPT_RR:
if (!opt->rr)
break;
*offset = opt->rr + start;
found = true;
break;
case IPOPT_RA:
if (!opt->router_alert)
break;
*offset = opt->router_alert + start;
found = true;
break;
default:
return -EOPNOTSUPP;
}
return found ? target : -ENOENT;
}
static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
struct nft_exthdr *priv = nft_expr_priv(expr);
u32 *dest = &regs->data[priv->dreg];
struct sk_buff *skb = pkt->skb;
unsigned int offset;
int err;
if (skb->protocol != htons(ETH_P_IP))
goto err;
err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
*dest = (err >= 0);
return;
} else if (err < 0) {
goto err;
}
offset += priv->offset;
dest[priv->len / NFT_REG32_SIZE] = 0;
if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
regs->verdict.code = NFT_BREAK;
}
static void *
nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
unsigned int len, void *buffer, unsigned int *tcphdr_len)
@ -312,6 +409,28 @@ static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
return nft_validate_register_load(priv->sreg, priv->len);
}
static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
int err = nft_exthdr_init(ctx, expr, tb);
if (err < 0)
return err;
switch (priv->type) {
case IPOPT_SSRR:
case IPOPT_LSRR:
case IPOPT_RR:
case IPOPT_RA:
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
{
if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
@ -358,6 +477,14 @@ static const struct nft_expr_ops nft_exthdr_ipv6_ops = {
.dump = nft_exthdr_dump,
};
static const struct nft_expr_ops nft_exthdr_ipv4_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
.eval = nft_exthdr_ipv4_eval,
.init = nft_exthdr_ipv4_init,
.dump = nft_exthdr_dump,
};
static const struct nft_expr_ops nft_exthdr_tcp_ops = {
.type = &nft_exthdr_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
@ -398,6 +525,12 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx,
if (tb[NFTA_EXTHDR_DREG])
return &nft_exthdr_ipv6_ops;
break;
case NFT_EXTHDR_OP_IPV4:
if (ctx->family != NFPROTO_IPV6) {
if (tb[NFTA_EXTHDR_DREG])
return &nft_exthdr_ipv4_ops;
}
break;
}
return ERR_PTR(-EOPNOTSUPP);

View File

@ -2,7 +2,7 @@
/*
* xt_iprange - Netfilter module to match IP address ranges
*
* (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* (C) 2003 Jozsef Kadlecsik <kadlec@netfilter.org>
* (C) CC Computer Consultants GmbH, 2008
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@ -130,7 +130,7 @@ static void __exit iprange_mt_exit(void)
module_init(iprange_mt_init);
module_exit(iprange_mt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching");
MODULE_ALIAS("ipt_iprange");

View File

@ -22,6 +22,9 @@ static int owner_check(const struct xt_mtchk_param *par)
struct xt_owner_match_info *info = par->matchinfo;
struct net *net = par->net;
if (info->match & ~XT_OWNER_MASK)
return -EINVAL;
/* Only allow the common case where the userns of the writer
* matches the userns of the network namespace.
*/

View File

@ -2,7 +2,7 @@
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
* Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
*/
/* Kernel module which implements the set match and SET target
@ -18,7 +18,7 @@
#include <uapi/linux/netfilter/xt_set.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
MODULE_DESCRIPTION("Xtables: IP set match and target module");
MODULE_ALIAS("xt_SET");
MODULE_ALIAS("ipt_set");
@ -436,6 +436,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
{
const struct xt_set_info_target_v3 *info = par->targinfo;
ip_set_id_t index;
int ret = 0;
if (info->add_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net,
@ -453,17 +454,16 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find del_set index %u as target\n",
info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net,
info->add_set.index);
return -ENOENT;
ret = -ENOENT;
goto cleanup_add;
}
}
if (info->map_set.index != IPSET_INVALID_ID) {
if (strncmp(par->table, "mangle", 7)) {
pr_info_ratelimited("--map-set only usable from mangle table\n");
return -EINVAL;
ret = -EINVAL;
goto cleanup_del;
}
if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
(info->flags & IPSET_FLAG_MAP_SKBQUEUE)) &&
@ -471,20 +471,16 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
1 << NF_INET_LOCAL_OUT |
1 << NF_INET_POST_ROUTING))) {
pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
return -EINVAL;
ret = -EINVAL;
goto cleanup_del;
}
index = ip_set_nfnl_get_byindex(par->net,
info->map_set.index);
if (index == IPSET_INVALID_ID) {
pr_info_ratelimited("Cannot find map_set index %u as target\n",
info->map_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net,
info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net,
info->del_set.index);
return -ENOENT;
ret = -ENOENT;
goto cleanup_del;
}
}
@ -492,16 +488,21 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
info->del_set.dim > IPSET_DIM_MAX ||
info->map_set.dim > IPSET_DIM_MAX) {
pr_info_ratelimited("SET target dimension over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->del_set.index);
if (info->map_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->map_set.index);
return -ERANGE;
ret = -ERANGE;
goto cleanup_mark;
}
return 0;
cleanup_mark:
if (info->map_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->map_set.index);
cleanup_del:
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->del_set.index);
cleanup_add:
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
return ret;
}
static void