Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains Netfilter updates for net-next,
they are:

1) Don't use userspace datatypes in bridge netfilter code, from
   Tobin Harding.

2) Iterate only once over the expectation table when removing the
   helper module, instead of once per-netns, from Florian Westphal.

3) Extra sanitization in xt_hook_ops_alloc() to return error in case
   we ever pass zero hooks, xt_hook_ops_alloc():

4) Handle NFPROTO_INET from the logging core infrastructure, from
   Liping Zhang.

5) Autoload loggers when TRACE target is used from rules, this doesn't
   change the behaviour in case the user already selected nfnetlink_log
   as preferred way to print tracing logs, also from Liping Zhang.

6) Conntrack slabs with SLAB_HWCACHE_ALIGN to allow rearranging fields
   by cache lines, increases the size of entries in 11% per entry.
   From Florian Westphal.

7) Skip zone comparison if CONFIG_NF_CONNTRACK_ZONES=n, from Florian.

8) Remove useless defensive check in nf_logger_find_get() from Shivani
   Bhardwaj.

9) Remove zone extension as place it in the conntrack object, this is
   always include in the hashing and we expect more intensive use of
   zones since containers are in place. Also from Florian Westphal.

10) Owner match now works from any namespace, from Eric Bierdeman.

11) Make sure we only reply with TCP reset to TCP traffic from
    nf_reject_ipv4, patch from Liping Zhang.

12) Introduce --nflog-size to indicate amount of network packet bytes
    that are copied to userspace via log message, from Vishwanath Pai.
    This obsoletes --nflog-range that has never worked, it was designed
    to achieve this but it has never worked.

13) Introduce generic macros for nf_tables object generation masks.

14) Use generation mask in table, chain and set objects in nf_tables.
    This allows fixes interferences with ongoing preparation phase of
    the commit protocol and object listings going on at the same time.
    This update is introduced in three patches, one per object.

15) Check if the object is active in the next generation for element
    deactivation in the rbtree implementation, given that deactivation
    happens from the commit phase path we have to observe the future
    status of the object.

16) Support for deletion of just added elements in the hash set type.

17) Allow to resize hashtable from /proc entry, not only from the
    obscure /sys entry that maps to the module parameter, from Florian
    Westphal.

18) Get rid of NFT_BASECHAIN_DISABLED, this code is not exercised
    anymore since we tear down the ruleset whenever the netdevice
    goes away.

19) Support for matching inverted set lookups, from Arturo Borrero.

20) Simplify the iptables_mangle_hook() by removing a superfluous
    extra branch.

21) Introduce ether_addr_equal_masked() and use it from the netfilter
    codebase, from Joe Perches.

22) Remove references to "Use netfilter MARK value as routing key"
    from the Netfilter Kconfig description given that this toggle
    doesn't exists already for 10 years, from Moritz Sichert.

23) Introduce generic NF_INVF() and use it from the xtables codebase,
    from Joe Perches.

24) Setting logger to NONE via /proc was not working unless explicit
    nul-termination was included in the string. This fixes seems to
    leave the former behaviour there, so we don't break backward.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2016-07-06 09:15:15 -07:00
40 changed files with 699 additions and 532 deletions

View File

@ -85,6 +85,9 @@ struct nf_conn {
spinlock_t lock;
u16 cpu;
#ifdef CONFIG_NF_CONNTRACK_ZONES
struct nf_conntrack_zone zone;
#endif
/* XXX should I move this to the tail ? - Y.K */
/* These are my tuples; original and reply */
struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
@ -287,6 +290,7 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
int nf_conntrack_hash_resize(unsigned int hashsize);
extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;

View File

@ -15,9 +15,6 @@ enum nf_ct_ext_id {
#ifdef CONFIG_NF_CONNTRACK_EVENTS
NF_CT_EXT_ECACHE,
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
NF_CT_EXT_ZONE,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
NF_CT_EXT_TSTAMP,
#endif
@ -38,7 +35,6 @@ enum nf_ct_ext_id {
#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj
#define NF_CT_EXT_ACCT_TYPE struct nf_conn_acct
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels

View File

@ -9,12 +9,11 @@
static inline const struct nf_conntrack_zone *
nf_ct_zone(const struct nf_conn *ct)
{
const struct nf_conntrack_zone *nf_ct_zone = NULL;
#ifdef CONFIG_NF_CONNTRACK_ZONES
nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
return &ct->zone;
#else
return &nf_ct_zone_dflt;
#endif
return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
}
static inline const struct nf_conntrack_zone *
@ -31,32 +30,22 @@ static inline const struct nf_conntrack_zone *
nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
struct nf_conntrack_zone *tmp)
{
const struct nf_conntrack_zone *zone;
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (!tmpl)
return &nf_ct_zone_dflt;
zone = nf_ct_zone(tmpl);
if (zone->flags & NF_CT_FLAG_MARK)
zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0);
return zone;
if (tmpl->zone.flags & NF_CT_FLAG_MARK)
return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0);
#endif
return nf_ct_zone(tmpl);
}
static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags,
const struct nf_conntrack_zone *info)
static inline void nf_ct_zone_add(struct nf_conn *ct,
const struct nf_conntrack_zone *zone)
{
#ifdef CONFIG_NF_CONNTRACK_ZONES
struct nf_conntrack_zone *nf_ct_zone;
nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
if (!nf_ct_zone)
return -ENOMEM;
nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
info->flags);
ct->zone = *zone;
#endif
return 0;
}
static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
@ -68,22 +57,34 @@ static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
enum ip_conntrack_dir dir)
{
#ifdef CONFIG_NF_CONNTRACK_ZONES
return nf_ct_zone_matches_dir(zone, dir) ?
zone->id : NF_CT_DEFAULT_ZONE_ID;
#else
return NF_CT_DEFAULT_ZONE_ID;
#endif
}
static inline bool nf_ct_zone_equal(const struct nf_conn *a,
const struct nf_conntrack_zone *b,
enum ip_conntrack_dir dir)
{
#ifdef CONFIG_NF_CONNTRACK_ZONES
return nf_ct_zone_id(nf_ct_zone(a), dir) ==
nf_ct_zone_id(b, dir);
#else
return true;
#endif
}
static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
const struct nf_conntrack_zone *b)
{
#ifdef CONFIG_NF_CONNTRACK_ZONES
return nf_ct_zone(a)->id == b->id;
#else
return true;
#endif
}
#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
#endif /* _NF_CONNTRACK_ZONES_H */

View File

@ -12,6 +12,9 @@
#define NF_LOG_UID 0x08 /* Log UID owning local socket */
#define NF_LOG_MASK 0x0f
/* This flag indicates that copy_len field in nf_loginfo is set */
#define NF_LOG_F_COPY_LEN 0x1
enum nf_log_type {
NF_LOG_TYPE_LOG = 0,
NF_LOG_TYPE_ULOG,
@ -22,9 +25,13 @@ struct nf_loginfo {
u_int8_t type;
union {
struct {
/* copy_len will be used iff you set
* NF_LOG_F_COPY_LEN in flags
*/
u_int32_t copy_len;
u_int16_t group;
u_int16_t qthreshold;
u_int16_t flags;
} ulog;
struct {
u_int8_t level;

View File

@ -297,6 +297,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
* @ops: set ops
* @pnet: network namespace
* @flags: set flags
* @genmask: generation mask
* @klen: key length
* @dlen: data length
* @data: private set data
@ -318,7 +319,8 @@ struct nft_set {
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
possible_net_t pnet;
u16 flags;
u16 flags:14,
genmask:2;
u8 klen;
u8 dlen;
unsigned char data[]
@ -336,9 +338,9 @@ static inline struct nft_set *nft_set_container_of(const void *priv)
}
struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
const struct nlattr *nla);
const struct nlattr *nla, u8 genmask);
struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
const struct nlattr *nla);
const struct nlattr *nla, u8 genmask);
static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
{
@ -733,7 +735,6 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
enum nft_chain_flags {
NFT_BASE_CHAIN = 0x1,
NFT_CHAIN_INACTIVE = 0x2,
};
/**
@ -755,7 +756,8 @@ struct nft_chain {
u64 handle;
u32 use;
u16 level;
u8 flags;
u8 flags:6,
genmask:2;
char name[NFT_CHAIN_MAXNAMELEN];
};
@ -797,7 +799,6 @@ struct nft_stats {
};
#define NFT_HOOK_OPS_MAX 2
#define NFT_BASECHAIN_DISABLED (1 << 0)
/**
* struct nft_base_chain - nf_tables base chain
@ -839,6 +840,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
* @hgenerator: handle generator state
* @use: number of chain references to this table
* @flags: table flag (see enum nft_table_flags)
* @genmask: generation mask
* @name: name of the table
*/
struct nft_table {
@ -847,7 +849,8 @@ struct nft_table {
struct list_head sets;
u64 hgenerator;
u32 use;
u16 flags;
u16 flags:14,
genmask:2;
char name[NFT_TABLE_MAXNAMELEN];
};
@ -970,6 +973,32 @@ static inline u8 nft_genmask_cur(const struct net *net)
#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1))
/*
* Generic transaction helpers
*/
/* Check if this object is currently active. */
#define nft_is_active(__net, __obj) \
(((__obj)->genmask & nft_genmask_cur(__net)) == 0)
/* Check if this object is active in the next generation. */
#define nft_is_active_next(__net, __obj) \
(((__obj)->genmask & nft_genmask_next(__net)) == 0)
/* This object becomes active in the next generation. */
#define nft_activate_next(__net, __obj) \
(__obj)->genmask = nft_genmask_cur(__net)
/* This object becomes inactive in the next generation. */
#define nft_deactivate_next(__net, __obj) \
(__obj)->genmask = nft_genmask_next(__net)
/* After committing the ruleset, clear the stale generation bit. */
#define nft_clear(__net, __obj) \
(__obj)->genmask &= ~nft_genmask_next(__net)
#define nft_active_genmask(__obj, __genmask) \
!((__obj)->genmask & __genmask)
/*
* Set element transaction helpers
*/