netfilter: nftables_offload: VLAN id needs host byteorder in flow dissector

The flow dissector representation expects the VLAN id in host byteorder.
Add the NFT_OFFLOAD_F_NETWORK2HOST flag to swap the bytes from nft_cmp.

Fixes: a82055af59 ("netfilter: nft_payload: add VLAN offload support")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
Pablo Neira Ayuso 2021-04-12 14:20:15 +02:00
parent 14c20643ef
commit ff4d90a89d
3 changed files with 55 additions and 7 deletions

View File

@ -4,11 +4,16 @@
#include <net/flow_offload.h>
#include <net/netfilter/nf_tables.h>
enum nft_offload_reg_flags {
NFT_OFFLOAD_F_NETWORK2HOST = (1 << 0),
};
struct nft_offload_reg {
u32 key;
u32 len;
u32 base_offset;
u32 offset;
u32 flags;
struct nft_data data;
struct nft_data mask;
};
@ -72,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul
void nft_flow_rule_destroy(struct nft_flow_rule *flow);
int nft_flow_rule_offload_commit(struct net *net);
#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
(__reg)->base_offset = \
offsetof(struct nft_flow_key, __base); \
(__reg)->offset = \
offsetof(struct nft_flow_key, __base.__field); \
(__reg)->len = __len; \
(__reg)->key = __key; \
(__reg)->flags = __flags;
#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
#define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg) \
NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \

View File

@ -114,19 +114,56 @@ nla_put_failure:
return -1;
}
union nft_cmp_offload_data {
u16 val16;
u32 val32;
u64 val64;
};
static void nft_payload_n2h(union nft_cmp_offload_data *data,
const u8 *val, u32 len)
{
switch (len) {
case 2:
data->val16 = ntohs(*((u16 *)val));
break;
case 4:
data->val32 = ntohl(*((u32 *)val));
break;
case 8:
data->val64 = be64_to_cpu(*((u64 *)val));
break;
default:
WARN_ON_ONCE(1);
break;
}
}
static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_cmp_expr *priv)
{
struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
union nft_cmp_offload_data _data, _datamask;
u8 *mask = (u8 *)&flow->match.mask;
u8 *key = (u8 *)&flow->match.key;
u8 *data, *datamask;
if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
return -EOPNOTSUPP;
memcpy(key + reg->offset, &priv->data, reg->len);
memcpy(mask + reg->offset, &reg->mask, reg->len);
if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
data = (u8 *)&_data;
datamask = (u8 *)&_datamask;
} else {
data = (u8 *)&priv->data;
datamask = (u8 *)&reg->mask;
}
memcpy(key + reg->offset, data, reg->len);
memcpy(mask + reg->offset, datamask, reg->len);
flow->match.dissector.used_keys |= BIT(reg->key);
flow->match.dissector.offset[reg->key] = reg->base_offset;

View File

@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
vlan_tci, sizeof(__be16), reg);
NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
vlan_tci, sizeof(__be16), reg,
NFT_OFFLOAD_F_NETWORK2HOST);
break;
case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
@ -241,8 +242,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
return -EOPNOTSUPP;
NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
vlan_tci, sizeof(__be16), reg);
NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
vlan_tci, sizeof(__be16), reg,
NFT_OFFLOAD_F_NETWORK2HOST);
break;
case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
sizeof(struct vlan_hdr):