Merge branch 'ice-pfcp-filter'

Alexander Lobakin says:

====================
ice: add PFCP filter support

Add support for creating PFCP filters in switchdev mode. Add pfcp module
that allows to create a PFCP-type netdev. The netdev then can be passed to
tc when creating a filter to indicate that PFCP filter should be created.

To add a PFCP filter, a special netdev must be created and passed to tc
command:

  ip link add pfcp0 type pfcp
  tc filter add dev eth0 ingress prio 1 flower pfcp_opts \
    1:12ab/ff:fffffffffffffff0 skip_hw action mirred egress redirect \
    dev pfcp0

Changes in iproute2 [1] are required to use pfcp_opts in tc.

ICE COMMS package is required as it contains PFCP profiles.

Part of this patchset modifies IP_TUNNEL_*_OPTs, which were previously
stored in a __be16. All possible values have already been used, making
it impossible to add new ones.

* 1-3: add new bitmap_{read,write}(), which is used later in the IP
       tunnel flags code (from Alexander's ARM64 MTE series[2]);
* 4-14: some bitmap code preparations also used later in IP tunnels;
* 15-17: convert IP tunnel flags from __be16 to a bitmap;
* 18-21: add PFCP module and support for it in ice.

[1] https://lore.kernel.org/netdev/20230614091758.11180-1-marcin.szycik@linux.intel.com
[2] https://lore.kernel.org/linux-kernel/20231218124033.551770-1-glider@google.com
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2024-04-01 10:49:29 +01:00
commit d823265dd4
78 changed files with 1984 additions and 620 deletions

View File

@ -465,11 +465,6 @@ static void __destroy_persistent_data_structures(struct dm_clone_metadata *cmd)
/*---------------------------------------------------------------------------*/
static size_t bitmap_size(unsigned long nr_bits)
{
return BITS_TO_LONGS(nr_bits) * sizeof(long);
}
static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words,
unsigned long nr_regions)
{

View File

@ -290,6 +290,19 @@ config GTP
To compile this drivers as a module, choose M here: the module
will be called gtp.
config PFCP
tristate "Packet Forwarding Control Protocol (PFCP)"
depends on INET
select NET_UDP_TUNNEL
help
This allows one to create PFCP virtual interfaces that allows to
set up software and hardware offload of PFCP packets.
Note that this module does not support PFCP protocol in the kernel space.
There is no support for parsing any PFCP messages.
To compile this drivers as a module, choose M here: the module
will be called pfcp.
config AMT
tristate "Automatic Multicast Tunneling (AMT)"
depends on INET && IP_MULTICAST

View File

@ -38,6 +38,7 @@ obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_PFCP) += pfcp.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
obj-$(CONFIG_MHI_NET) += mhi_net.o

View File

@ -61,6 +61,7 @@ struct bareudp_dev {
static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct metadata_dst *tun_dst = NULL;
IP_TUNNEL_DECLARE_FLAGS(key) = { };
struct bareudp_dev *bareudp;
unsigned short family;
unsigned int len;
@ -137,7 +138,10 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
bareudp->dev->stats.rx_dropped++;
goto drop;
}
tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
__set_bit(IP_TUNNEL_KEY_BIT, key);
tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
bareudp->dev->stats.rx_dropped++;
goto drop;
@ -285,10 +289,10 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
__be16 sport, df;
@ -316,7 +320,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ?
htons(IP_DF) : 0;
skb_scrub_packet(skb, xnet);
err = -ENOSPC;
@ -338,7 +343,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, bareudp->port,
!net_eq(bareudp->net, dev_net(bareudp->dev)),
!(info->key.tun_flags & TUNNEL_CSUM));
!test_bit(IP_TUNNEL_CSUM_BIT,
info->key.tun_flags));
return 0;
free_dst:
@ -350,10 +356,10 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct dst_entry *dst = NULL;
struct in6_addr saddr, daddr;
@ -402,7 +408,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
&saddr, &daddr, prio, ttl,
info->key.label, sport, bareudp->port,
!(info->key.tun_flags & TUNNEL_CSUM));
!test_bit(IP_TUNNEL_CSUM_BIT,
info->key.tun_flags));
return 0;
free_dst:

View File

@ -721,6 +721,12 @@ static bool ice_is_gtp_c_profile(u16 prof_idx)
}
}
static bool ice_is_pfcp_profile(u16 prof_idx)
{
return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE &&
prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION;
}
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
@ -738,6 +744,9 @@ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw,
if (ice_is_gtp_u_profile(prof_idx))
return ICE_PROF_TUN_GTPU;
if (ice_is_pfcp_profile(prof_idx))
return ICE_PROF_TUN_PFCP;
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&

View File

@ -93,6 +93,7 @@ enum ice_tunnel_type {
TNL_GRETAP,
TNL_GTPC,
TNL_GTPU,
TNL_PFCP,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@ -358,7 +359,8 @@ enum ice_prof_type {
ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_GTPU = 0x8,
ICE_PROF_TUN_GTPC = 0x10,
ICE_PROF_TUN_ALL = 0x1E,
ICE_PROF_TUN_PFCP = 0x20,
ICE_PROF_TUN_ALL = 0x3E,
ICE_PROF_ALL = 0xFF,
};

View File

@ -43,6 +43,7 @@ enum ice_protocol_type {
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
ICE_PFCP,
ICE_PPPOE,
ICE_L2TPV3,
ICE_VLAN_EX,
@ -61,6 +62,7 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_NVGRE,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
ICE_SW_TUN_PFCP,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@ -202,6 +204,15 @@ struct ice_udp_gtp_hdr {
u8 rsvrd;
};
struct ice_pfcp_hdr {
u8 flags;
u8 msg_type;
__be16 length;
__be64 seid;
__be32 seq;
u8 spare;
} __packed __aligned(__alignof__(u16));
struct ice_pppoe_hdr {
u8 rsrvd_ver_type;
u8 rsrvd_code;
@ -418,6 +429,7 @@ union ice_prot_hdr {
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
struct ice_pfcp_hdr pfcp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
struct ice_hw_metadata metadata;

View File

@ -42,6 +42,7 @@ enum {
ICE_PKT_KMALLOC = BIT(9),
ICE_PKT_PPPOE = BIT(10),
ICE_PKT_L2TPV3 = BIT(11),
ICE_PKT_PFCP = BIT(12),
};
struct ice_dummy_pkt_offsets {
@ -1110,6 +1111,77 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00,
};
ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_ILOS, 34 },
{ ICE_PFCP, 42 },
{ ICE_PROTOCOL_LAST, 0 },
};
ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x08, 0x00, /* ICE_ETYPE_OL 12 */
0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */
0x00, 0x01, 0x00, 0x00,
0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */
0x00, 0x18, 0x00, 0x00,
0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
{ ICE_UDP_ILOS, 54 },
{ ICE_PFCP, 62 },
{ ICE_PROTOCOL_LAST, 0 },
};
ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x86, 0xdd, /* ICE_ETYPE_OL 12 */
0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
0x00, 0x10, 0x11, 0x00, /* Next header UDP */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */
0x00, 0x18, 0x00, 0x00,
0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@ -1343,6 +1415,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6),
ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP),
ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
ICE_PKT_INNER_UDP),
ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
@ -4532,6 +4606,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
@ -4565,6 +4640,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, ICE_GRE_OF_HW },
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
{ ICE_PFCP, ICE_UDP_ILOS_HW },
{ ICE_PPPOE, ICE_PPPOE_HW },
{ ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
@ -5272,6 +5348,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_GTPC:
prof_type = ICE_PROF_TUN_GTPC;
break;
case ICE_SW_TUN_PFCP:
prof_type = ICE_PROF_TUN_PFCP;
break;
case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
@ -5556,6 +5635,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_SW_TUN_VXLAN:
match |= ICE_PKT_TUN_UDP;
break;
case ICE_SW_TUN_PFCP:
match |= ICE_PKT_PFCP;
break;
default:
break;
}
@ -5696,6 +5778,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GTP:
len = sizeof(struct ice_udp_gtp_hdr);
break;
case ICE_PFCP:
len = sizeof(struct ice_pfcp_hdr);
break;
case ICE_PPPOE:
len = sizeof(struct ice_pppoe_hdr);
break;

View File

@ -22,6 +22,8 @@
#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
#define ICE_PROFID_IPV4_PFCP_NODE 79
#define ICE_PROFID_IPV6_PFCP_SESSION 82
#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))

View File

@ -35,7 +35,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS)
lkups_cnt++;
if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS)
lkups_cnt++;
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
@ -138,6 +141,8 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GTP;
case TNL_GTPC:
return ICE_GTP_NO_PAY;
case TNL_PFCP:
return ICE_PFCP;
default:
return 0;
}
@ -157,6 +162,8 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GTPU;
case TNL_GTPC:
return ICE_SW_TUN_GTPC;
case TNL_PFCP:
return ICE_SW_TUN_PFCP;
default:
return ICE_NON_TUN;
}
@ -219,8 +226,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
(fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) {
list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
if (fltr->gtp_pdu_info_masks.pdu_type) {
@ -237,6 +243,22 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) {
struct ice_pfcp_hdr *hdr_h, *hdr_m;
hdr_h = &list[i].h_u.pfcp_hdr;
hdr_m = &list[i].m_u.pfcp_hdr;
list[i].type = ICE_PFCP;
hdr_h->flags = fltr->pfcp_meta_keys.type;
hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01;
hdr_h->seid = fltr->pfcp_meta_keys.seid;
hdr_m->seid = fltr->pfcp_meta_masks.seid;
i++;
}
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false);
@ -367,8 +389,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
headers = &tc_fltr->inner_headers;
inner = true;
/* PFCP is considered non-tunneled - don't swap headers. */
if (tc_fltr->tunnel_type != TNL_PFCP) {
headers = &tc_fltr->inner_headers;
inner = true;
}
}
if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
@ -622,6 +647,8 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
*/
if (netif_is_gtp(tunnel_dev))
return TNL_GTPU;
if (netif_is_pfcp(tunnel_dev))
return TNL_PFCP;
return TNL_LAST;
}
@ -1401,7 +1428,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
(fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
struct flow_match_enc_opts match;
flow_rule_match_enc_opts(rule, &match);
@ -1412,7 +1440,21 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
sizeof(struct gtp_pdu_session_info));
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
fltr->tunnel_type == TNL_PFCP) {
struct flow_match_enc_opts match;
flow_rule_match_enc_opts(rule, &match);
memcpy(&fltr->pfcp_meta_keys, match.key->data,
sizeof(struct pfcp_metadata));
memcpy(&fltr->pfcp_meta_masks, match.mask->data,
sizeof(struct pfcp_metadata));
fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS;
}
return 0;
@ -1473,10 +1515,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return err;
}
/* header pointers should point to the inner headers, outer
* header were already set by ice_parse_tunnel_attr
*/
headers = &fltr->inner_headers;
/* PFCP is considered non-tunneled - don't swap headers. */
if (fltr->tunnel_type != TNL_PFCP) {
/* Header pointers should point to the inner headers,
* outer header were already set by
* ice_parse_tunnel_attr().
*/
headers = &fltr->inner_headers;
}
} else if (dissector->used_keys &
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |

View File

@ -4,6 +4,9 @@
#ifndef _ICE_TC_LIB_H_
#define _ICE_TC_LIB_H_
#include <linux/bits.h>
#include <net/pfcp.h>
#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0)
#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1)
#define ICE_TC_FLWR_FIELD_VLAN BIT(2)
@ -22,7 +25,7 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
#define ICE_TC_FLWR_FIELD_GTP_OPTS BIT(18)
#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
@ -34,6 +37,7 @@
#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29)
#define ICE_TC_FLWR_FIELD_PFCP_OPTS BIT(30)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@ -161,6 +165,8 @@ struct ice_tc_flower_fltr {
__be32 tenant_id;
struct gtp_pdu_session_info gtp_pdu_info_keys;
struct gtp_pdu_session_info gtp_pdu_info_masks;
struct pfcp_metadata pfcp_meta_keys;
struct pfcp_metadata pfcp_meta_masks;
u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action;

View File

@ -117,7 +117,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b,
__be16 tun_flags);
u32 tun_type);
#endif /* CONFIG_MLX5_ESWITCH */
#endif //__MLX5_EN_TC_TUNNEL_H__

View File

@ -587,7 +587,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b,
__be16 tun_flags)
u32 tun_type)
{
struct ip_tunnel_info *a_info;
struct ip_tunnel_info *b_info;
@ -596,8 +596,8 @@ bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
return false;
a_has_opts = !!(a->ip_tun_key->tun_flags & tun_flags);
b_has_opts = !!(b->ip_tun_key->tun_flags & tun_flags);
a_has_opts = test_bit(tun_type, a->ip_tun_key->tun_flags);
b_has_opts = test_bit(tun_type, b->ip_tun_key->tun_flags);
/* keys are equal when both don't have any options attached */
if (!a_has_opts && !b_has_opts)

View File

@ -106,12 +106,13 @@ static int mlx5e_gen_ip_tunnel_header_geneve(char buf[],
memset(geneveh, 0, sizeof(*geneveh));
geneveh->ver = MLX5E_GENEVE_VER;
geneveh->opt_len = tun_info->options_len / 4;
geneveh->oam = !!(tun_info->key.tun_flags & TUNNEL_OAM);
geneveh->critical = !!(tun_info->key.tun_flags & TUNNEL_CRIT_OPT);
geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, tun_info->key.tun_flags);
geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT,
tun_info->key.tun_flags);
mlx5e_tunnel_id_to_vni(tun_info->key.tun_id, geneveh->vni);
geneveh->proto_type = htons(ETH_P_TEB);
if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT) {
if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags)) {
if (!geneveh->opt_len)
return -EOPNOTSUPP;
ip_tunnel_info_opts_get(geneveh->options, tun_info);
@ -188,7 +189,7 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
/* make sure that we're talking about GENEVE options */
if (enc_opts.key->dst_opt_type != TUNNEL_GENEVE_OPT) {
if (enc_opts.key->dst_opt_type != IP_TUNNEL_GENEVE_OPT_BIT) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on GENEVE options: option type is not GENEVE");
netdev_warn(priv->netdev,
@ -337,7 +338,8 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_GENEVE_OPT);
return mlx5e_tc_tun_encap_info_equal_options(a, b,
IP_TUNNEL_GENEVE_OPT_BIT);
}
struct mlx5e_tc_tunnel geneve_tunnel = {

View File

@ -31,12 +31,16 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
IP_TUNNEL_DECLARE_FLAGS(unsupp) = { };
int hdr_len;
*ip_proto = IPPROTO_GRE;
/* the HW does not calculate GRE csum or sequences */
if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
__set_bit(IP_TUNNEL_CSUM_BIT, unsupp);
__set_bit(IP_TUNNEL_SEQ_BIT, unsupp);
if (ip_tunnel_flags_intersect(tun_key->tun_flags, unsupp))
return -EOPNOTSUPP;
greh->protocol = htons(ETH_P_TEB);
@ -44,7 +48,7 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
/* GRE key */
hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e);
greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
if (tun_key->tun_flags & TUNNEL_KEY) {
if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
*ptr = tun_id;
}

View File

@ -90,7 +90,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
const struct vxlan_metadata *md;
struct vxlanhdr *vxh;
if ((tun_key->tun_flags & TUNNEL_VXLAN_OPT) &&
if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags) &&
e->tun_info->options_len != sizeof(*md))
return -EOPNOTSUPP;
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
@ -99,7 +99,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
udp->dest = tun_key->tp_dst;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(tun_id);
if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) {
if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags)) {
md = ip_tunnel_info_opts(e->tun_info);
vxlan_build_gbp_hdr(vxh, md);
}
@ -125,7 +125,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
if (enc_opts.key->dst_opt_type != TUNNEL_VXLAN_OPT) {
if (enc_opts.key->dst_opt_type != IP_TUNNEL_VXLAN_OPT_BIT) {
NL_SET_ERR_MSG_MOD(extack, "Wrong VxLAN option type: not GBP");
return -EOPNOTSUPP;
}
@ -208,7 +208,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
static bool mlx5e_tc_tun_encap_info_equal_vxlan(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_VXLAN_OPT);
return mlx5e_tc_tun_encap_info_equal_options(a, b,
IP_TUNNEL_VXLAN_OPT_BIT);
}
static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)

View File

@ -5464,6 +5464,7 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct tunnel_match_enc_opts enc_opts = {};
struct mlx5_rep_uplink_priv *uplink_priv;
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct mlx5e_rep_priv *uplink_rpriv;
struct metadata_dst *tun_dst;
struct tunnel_match_key key;
@ -5471,6 +5472,8 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
struct net_device *dev;
int err;
__set_bit(IP_TUNNEL_KEY_BIT, flags);
enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
tun_id = tunnel_id >> ENC_OPTS_BITS;
@ -5503,14 +5506,14 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
key.enc_ip.tos, key.enc_ip.ttl,
key.enc_tp.dst, TUNNEL_KEY,
key.enc_tp.dst, flags,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
key.enc_ip.tos, key.enc_ip.ttl,
key.enc_tp.dst, 0, TUNNEL_KEY,
key.enc_tp.dst, 0, flags,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
@ -5528,11 +5531,16 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
if (enc_opts.key.len)
if (enc_opts.key.len) {
ip_tunnel_flags_zero(flags);
if (enc_opts.key.dst_opt_type)
__set_bit(enc_opts.key.dst_opt_type, flags);
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
enc_opts.key.data,
enc_opts.key.len,
enc_opts.key.dst_opt_type);
flags);
}
skb_dst_set(skb, (struct dst_entry *)tun_dst);
dev = dev_get_by_index(&init_net, key.filter_ifindex);

View File

@ -8,7 +8,7 @@
#include "spectrum_ipip.h"
#include "reg.h"
struct ip_tunnel_parm
struct ip_tunnel_parm_kern
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
{
struct ip_tunnel *tun = netdev_priv(ol_dev);
@ -24,27 +24,29 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
return tun->parms;
}
static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms)
static bool
mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm_kern *parms)
{
return !!(parms->i_flags & TUNNEL_KEY);
return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags);
}
static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms)
{
return !!(parms->i_flags & TUNNEL_KEY);
return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags);
}
static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms)
static bool
mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm_kern *parms)
{
return !!(parms->o_flags & TUNNEL_KEY);
return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags);
}
static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms)
{
return !!(parms->o_flags & TUNNEL_KEY);
return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags);
}
static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms)
static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm_kern *parms)
{
return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
be32_to_cpu(parms->i_key) : 0;
@ -56,7 +58,7 @@ static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms)
be32_to_cpu(parms->i_key) : 0;
}
static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms)
static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm_kern *parms)
{
return mlxsw_sp_ipip_parms4_has_okey(parms) ?
be32_to_cpu(parms->o_key) : 0;
@ -69,7 +71,7 @@ static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms)
}
static union mlxsw_sp_l3addr
mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms)
mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm_kern *parms)
{
return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr };
}
@ -81,7 +83,7 @@ mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms)
}
static union mlxsw_sp_l3addr
mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms)
mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm_kern *parms)
{
return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr };
}
@ -96,7 +98,7 @@ union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
struct ip_tunnel_parm parms4;
struct ip_tunnel_parm_kern parms4;
struct __ip6_tnl_parm parms6;
switch (proto) {
@ -115,7 +117,9 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
{
struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
struct ip_tunnel_parm_kern parms4;
parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4;
}
@ -124,7 +128,7 @@ static union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
struct ip_tunnel_parm parms4;
struct ip_tunnel_parm_kern parms4;
struct __ip6_tnl_parm parms6;
switch (proto) {
@ -150,7 +154,7 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
static struct mlxsw_sp_ipip_parms
mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev)
{
struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
return (struct mlxsw_sp_ipip_parms) {
.proto = MLXSW_SP_L3_PROTO_IPV4,
@ -187,8 +191,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
struct ip_tunnel_parm_kern parms;
char rtdp_pl[MLXSW_REG_RTDP_LEN];
struct ip_tunnel_parm parms;
unsigned int type_check;
bool has_ikey;
u32 daddr4;
@ -238,12 +242,15 @@ static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
struct ip_tunnel *tunnel = netdev_priv(ol_dev);
__be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
bool inherit_ttl = tunnel->parms.iph.ttl == 0;
bool inherit_tos = tunnel->parms.iph.tos & 0x1;
IP_TUNNEL_DECLARE_FLAGS(okflags) = { };
return (tunnel->parms.i_flags & ~okflags) == 0 &&
(tunnel->parms.o_flags & ~okflags) == 0 &&
/* We can't offload any other features. */
__set_bit(IP_TUNNEL_KEY_BIT, okflags);
return ip_tunnel_flags_subset(tunnel->parms.i_flags, okflags) &&
ip_tunnel_flags_subset(tunnel->parms.o_flags, okflags) &&
inherit_ttl && inherit_tos &&
mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV4, ol_dev);
}
@ -252,7 +259,7 @@ static struct mlxsw_sp_rif_ipip_lb_config
mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ?
@ -439,10 +446,13 @@ static bool mlxsw_sp_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
bool inherit_ttl = tparm.hop_limit == 0;
__be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
IP_TUNNEL_DECLARE_FLAGS(okflags) = { };
return (tparm.i_flags & ~okflags) == 0 &&
(tparm.o_flags & ~okflags) == 0 &&
/* We can't offload any other features. */
__set_bit(IP_TUNNEL_KEY_BIT, okflags);
return ip_tunnel_flags_subset(tparm.i_flags, okflags) &&
ip_tunnel_flags_subset(tparm.o_flags, okflags) &&
inherit_ttl && inherit_tos &&
mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev);
}

View File

@ -9,7 +9,7 @@
#include <linux/if_tunnel.h>
#include <net/ip6_tunnel.h>
struct ip_tunnel_parm
struct ip_tunnel_parm_kern
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
struct __ip6_tnl_parm
mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev);

View File

@ -413,8 +413,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
__be32 *saddrp, __be32 *daddrp)
{
struct ip_tunnel *tun = netdev_priv(to_dev);
struct ip_tunnel_parm_kern parms;
struct net_device *dev = NULL;
struct ip_tunnel_parm parms;
struct rtable *rt = NULL;
struct flowi4 fl4;
@ -451,7 +451,7 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev,
struct mlxsw_sp_span_parms *sparmsp)
{
struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
bool inherit_tos = tparm.iph.tos & 0x1;
@ -461,7 +461,8 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
if (!(to_dev->flags & IFF_UP) ||
/* Reject tunnels with GRE keys, checksums, etc. */
tparm.i_flags || tparm.o_flags ||
!ip_tunnel_flags_empty(tparm.i_flags) ||
!ip_tunnel_flags_empty(tparm.o_flags) ||
/* Require a fixed TTL and a TOS copied from the mirrored packet. */
inherit_ttl || !inherit_tos ||
/* A destination address may not be "any". */
@ -565,7 +566,8 @@ mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp,
if (!(to_dev->flags & IFF_UP) ||
/* Reject tunnels with GRE keys, checksums, etc. */
tparm.i_flags || tparm.o_flags ||
!ip_tunnel_flags_empty(tparm.i_flags) ||
!ip_tunnel_flags_empty(tparm.o_flags) ||
/* Require a fixed TTL and a TOS copied from the mirrored packet. */
inherit_ttl || !inherit_tos ||
/* A destination address may not be "any". */

View File

@ -396,6 +396,17 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
return 0;
}
#define NFP_FL_CHECK(flag) ({ \
IP_TUNNEL_DECLARE_FLAGS(__check) = { }; \
__be16 __res; \
\
__set_bit(IP_TUNNEL_##flag##_BIT, __check); \
__res = ip_tunnel_flags_to_be16(__check); \
\
BUILD_BUG_ON(__builtin_constant_p(__res) && \
NFP_FL_TUNNEL_##flag != __res); \
})
static int
nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
const struct flow_action_entry *act,
@ -410,6 +421,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
__be16 tun_flags;
if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
return -EOPNOTSUPP;
@ -417,9 +429,10 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
return -EOPNOTSUPP;
BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
NFP_FL_CHECK(CSUM);
NFP_FL_CHECK(KEY);
NFP_FL_CHECK(GENEVE_OPT);
if (ip_tun->options_len &&
(tun_type != NFP_FL_TUNNEL_GENEVE ||
!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
@ -427,7 +440,9 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
return -EOPNOTSUPP;
}
if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
tun_flags = ip_tunnel_flags_to_be16(ip_tun->key.tun_flags);
if (!ip_tunnel_flags_is_be16_compat(ip_tun->key.tun_flags) ||
(tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS)) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
@ -442,7 +457,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY)
if (tun_flags & NFP_FL_TUNNEL_KEY)
set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
@ -486,7 +501,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
}
set_tun->tos = ip_tun->key.tos;
set_tun->tun_flags = ip_tun->key.tun_flags;
set_tun->tun_flags = tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
set_tun->tun_proto = htons(ETH_P_TEB);

View File

@ -225,10 +225,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
__be16 flags;
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) |
(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
__set_bit(IP_TUNNEL_KEY_BIT, flags);
__assign_bit(IP_TUNNEL_OAM_BIT, flags, gnvh->oam);
__assign_bit(IP_TUNNEL_CRIT_OPT_BIT, flags, gnvh->critical);
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
vni_to_tunnel_id(gnvh->vni),
@ -238,9 +239,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
goto drop;
}
/* Update tunnel dst according to Geneve options. */
ip_tunnel_flags_zero(flags);
__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, flags);
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
gnvh->options, gnvh->opt_len * 4,
TUNNEL_GENEVE_OPT);
flags);
} else {
/* Drop packets w/ critical options,
* since we don't support any...
@ -745,14 +748,15 @@ static void geneve_build_header(struct genevehdr *geneveh,
{
geneveh->ver = GENEVE_VER;
geneveh->opt_len = info->options_len / 4;
geneveh->oam = !!(info->key.tun_flags & TUNNEL_OAM);
geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT);
geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, info->key.tun_flags);
geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT,
info->key.tun_flags);
geneveh->rsvd1 = 0;
tunnel_id_to_vni(info->key.tun_id, geneveh->vni);
geneveh->proto_type = inner_proto;
geneveh->rsvd2 = 0;
if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags))
ip_tunnel_info_opts_get(geneveh->options, info);
}
@ -761,7 +765,7 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
bool xnet, int ip_hdr_len,
bool inner_proto_inherit)
{
bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
struct genevehdr *gnvh;
__be16 inner_proto;
int min_headroom;
@ -878,7 +882,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (geneve->cfg.collect_md) {
ttl = key->ttl;
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ?
htons(IP_DF) : 0;
} else {
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
@ -910,7 +915,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, geneve->cfg.info.key.tp_dst,
!net_eq(geneve->net, dev_net(geneve->dev)),
!(info->key.tun_flags & TUNNEL_CSUM));
!test_bit(IP_TUNNEL_CSUM_BIT,
info->key.tun_flags));
return 0;
}
@ -998,7 +1004,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
&saddr, &key->u.ipv6.dst, prio, ttl,
info->key.label, sport, geneve->cfg.info.key.tp_dst,
!(info->key.tun_flags & TUNNEL_CSUM));
!test_bit(IP_TUNNEL_CSUM_BIT,
info->key.tun_flags));
return 0;
}
#endif
@ -1297,7 +1304,8 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
static bool is_tnl_info_zero(const struct ip_tunnel_info *info)
{
return !(info->key.tun_id || info->key.tun_flags || info->key.tos ||
return !(info->key.tun_id || info->key.tos ||
!ip_tunnel_flags_empty(info->key.tun_flags) ||
info->key.ttl || info->key.label || info->key.tp_src ||
memchr_inv(&info->key.u, 0, sizeof(info->key.u)));
}
@ -1435,7 +1443,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
"Remote IPv6 address cannot be Multicast");
return -EINVAL;
}
info->key.tun_flags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
cfg->use_udp6_rx_checksums = true;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6],
@ -1510,7 +1518,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
info->key.tun_flags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
@ -1520,7 +1528,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
info->key.tun_flags &= ~TUNNEL_CSUM;
__clear_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
"IPv6 support not enabled in the kernel");
@ -1753,7 +1761,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
info->key.u.ipv4.dst))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
!!(info->key.tun_flags & TUNNEL_CSUM)))
test_bit(IP_TUNNEL_CSUM_BIT,
info->key.tun_flags)))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
@ -1762,7 +1771,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
&info->key.u.ipv6.dst))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
!(info->key.tun_flags & TUNNEL_CSUM)))
!test_bit(IP_TUNNEL_CSUM_BIT,
info->key.tun_flags)))
goto nla_put_failure;
#endif
}

302
drivers/net/pfcp.c Normal file
View File

@ -0,0 +1,302 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PFCP according to 3GPP TS 29.244
*
* Copyright (C) 2022, Intel Corporation.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include <net/pfcp.h>
struct pfcp_dev {
struct list_head list;
struct socket *sock;
struct net_device *dev;
struct net *net;
struct gro_cells gro_cells;
};
static unsigned int pfcp_net_id __read_mostly;
struct pfcp_net {
struct list_head pfcp_dev_list;
};
static void
pfcp_session_recv(struct pfcp_dev *pfcp, struct sk_buff *skb,
struct pfcp_metadata *md)
{
struct pfcphdr_session *unparsed = pfcp_hdr_session(skb);
md->seid = unparsed->seid;
md->type = PFCP_TYPE_SESSION;
}
static void
pfcp_node_recv(struct pfcp_dev *pfcp, struct sk_buff *skb,
struct pfcp_metadata *md)
{
md->type = PFCP_TYPE_NODE;
}
static int pfcp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *tun_dst;
struct pfcp_metadata *md;
struct pfcphdr *unparsed;
struct pfcp_dev *pfcp;
if (unlikely(!pskb_may_pull(skb, PFCP_HLEN)))
goto drop;
pfcp = rcu_dereference_sk_user_data(sk);
if (unlikely(!pfcp))
goto drop;
unparsed = pfcp_hdr(skb);
ip_tunnel_flags_zero(flags);
tun_dst = udp_tun_rx_dst(skb, sk->sk_family, flags, 0,
sizeof(*md));
if (unlikely(!tun_dst))
goto drop;
md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
if (unlikely(!md))
goto drop;
if (unparsed->flags & PFCP_SEID_FLAG)
pfcp_session_recv(pfcp, skb, md);
else
pfcp_node_recv(pfcp, skb, md);
__set_bit(IP_TUNNEL_PFCP_OPT_BIT, flags);
ip_tunnel_info_opts_set(&tun_dst->u.tun_info, md, sizeof(*md),
flags);
if (unlikely(iptunnel_pull_header(skb, PFCP_HLEN, skb->protocol,
!net_eq(sock_net(sk),
dev_net(pfcp->dev)))))
goto drop;
skb_dst_set(skb, (struct dst_entry *)tun_dst);
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
skb->dev = pfcp->dev;
gro_cells_receive(&pfcp->gro_cells, skb);
return 0;
drop:
kfree_skb(skb);
return 0;
}
static void pfcp_del_sock(struct pfcp_dev *pfcp)
{
udp_tunnel_sock_release(pfcp->sock);
pfcp->sock = NULL;
}
static void pfcp_dev_uninit(struct net_device *dev)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
gro_cells_destroy(&pfcp->gro_cells);
pfcp_del_sock(pfcp);
}
static int pfcp_dev_init(struct net_device *dev)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
pfcp->dev = dev;
return gro_cells_init(&pfcp->gro_cells, dev);
}
static const struct net_device_ops pfcp_netdev_ops = {
.ndo_init = pfcp_dev_init,
.ndo_uninit = pfcp_dev_uninit,
.ndo_get_stats64 = dev_get_tstats64,
};
static const struct device_type pfcp_type = {
.name = "pfcp",
};
static void pfcp_link_setup(struct net_device *dev)
{
dev->netdev_ops = &pfcp_netdev_ops;
dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &pfcp_type);
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->priv_flags |= IFF_NO_QUEUE;
netif_keep_dst(dev);
}
static struct socket *pfcp_create_sock(struct pfcp_dev *pfcp)
{
struct udp_tunnel_sock_cfg tuncfg = {};
struct udp_port_cfg udp_conf = {
.local_ip.s_addr = htonl(INADDR_ANY),
.family = AF_INET,
};
struct net *net = pfcp->net;
struct socket *sock;
int err;
udp_conf.local_udp_port = htons(PFCP_PORT);
err = udp_sock_create(net, &udp_conf, &sock);
if (err)
return ERR_PTR(err);
tuncfg.sk_user_data = pfcp;
tuncfg.encap_rcv = pfcp_encap_recv;
tuncfg.encap_type = 1;
setup_udp_tunnel_sock(net, sock, &tuncfg);
return sock;
}
static int pfcp_add_sock(struct pfcp_dev *pfcp)
{
pfcp->sock = pfcp_create_sock(pfcp);
return PTR_ERR_OR_ZERO(pfcp->sock);
}
static int pfcp_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
struct pfcp_net *pn;
int err;
pfcp->net = net;
err = pfcp_add_sock(pfcp);
if (err) {
netdev_dbg(dev, "failed to add pfcp socket %d\n", err);
goto exit_err;
}
err = register_netdevice(dev);
if (err) {
netdev_dbg(dev, "failed to register pfcp netdev %d\n", err);
goto exit_del_pfcp_sock;
}
pn = net_generic(dev_net(dev), pfcp_net_id);
list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
netdev_dbg(dev, "registered new PFCP interface\n");
return 0;
exit_del_pfcp_sock:
pfcp_del_sock(pfcp);
exit_err:
pfcp->net = NULL;
return err;
}
static void pfcp_dellink(struct net_device *dev, struct list_head *head)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
list_del_rcu(&pfcp->list);
unregister_netdevice_queue(dev, head);
}
static struct rtnl_link_ops pfcp_link_ops __read_mostly = {
.kind = "pfcp",
.priv_size = sizeof(struct pfcp_dev),
.setup = pfcp_link_setup,
.newlink = pfcp_newlink,
.dellink = pfcp_dellink,
};
static int __net_init pfcp_net_init(struct net *net)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
INIT_LIST_HEAD(&pn->pfcp_dev_list);
return 0;
}
static void __net_exit pfcp_net_exit(struct net *net)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
struct pfcp_dev *pfcp;
LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
pfcp_dellink(pfcp->dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
}
static struct pernet_operations pfcp_net_ops = {
.init = pfcp_net_init,
.exit = pfcp_net_exit,
.id = &pfcp_net_id,
.size = sizeof(struct pfcp_net),
};
static int __init pfcp_init(void)
{
int err;
err = register_pernet_subsys(&pfcp_net_ops);
if (err)
goto exit_err;
err = rtnl_link_register(&pfcp_link_ops);
if (err)
goto exit_unregister_subsys;
return 0;
exit_unregister_subsys:
unregister_pernet_subsys(&pfcp_net_ops);
exit_err:
pr_err("loading PFCP module failed: err %d\n", err);
return err;
}
late_initcall(pfcp_init);
static void __exit pfcp_exit(void)
{
rtnl_link_unregister(&pfcp_link_ops);
unregister_pernet_subsys(&pfcp_net_ops);
pr_info("PFCP module unloaded\n");
}
module_exit(pfcp_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Wojciech Drewek <wojciech.drewek@intel.com>");
MODULE_DESCRIPTION("Interface driver for PFCP encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("pfcp");

View File

@ -1584,7 +1584,8 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
tun_dst = (struct metadata_dst *)skb_dst(skb);
if (tun_dst) {
tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
__set_bit(IP_TUNNEL_VXLAN_OPT_BIT,
tun_dst->u.tun_info.key.tun_flags);
tun_dst->u.tun_info.options_len = sizeof(*md);
}
if (gbp->dont_learn)
@ -1716,9 +1717,11 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
if (vxlan_collect_metadata(vs)) {
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *tun_dst;
tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
__set_bit(IP_TUNNEL_KEY_BIT, flags);
tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), flags,
key32_to_tunnel_id(vni), sizeof(*md));
if (!tun_dst)
@ -2403,14 +2406,14 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) {
if (info->options_len < sizeof(*md))
goto drop;
md = ip_tunnel_info_opts(info);
}
ttl = info->key.ttl;
tos = info->key.tos;
udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
}
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
@ -2451,7 +2454,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
old_iph->frag_off & htons(IP_DF)))
df = htons(IP_DF);
}
} else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
} else if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
info->key.tun_flags)) {
df = htons(IP_DF);
}

View File

@ -16,20 +16,21 @@ struct idset {
unsigned long bitmap[];
};
static inline unsigned long bitmap_size(int num_ssid, int num_id)
static inline unsigned long idset_bitmap_size(int num_ssid, int num_id)
{
return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
return bitmap_size(size_mul(num_ssid, num_id));
}
static struct idset *idset_new(int num_ssid, int num_id)
{
struct idset *set;
set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
set = vmalloc(sizeof(struct idset) +
idset_bitmap_size(num_ssid, num_id));
if (set) {
set->num_ssid = num_ssid;
set->num_id = num_id;
memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
memset(set->bitmap, 0, idset_bitmap_size(num_ssid, num_id));
}
return set;
}
@ -41,7 +42,8 @@ void idset_free(struct idset *set)
void idset_fill(struct idset *set)
{
memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
memset(set->bitmap, 0xff,
idset_bitmap_size(set->num_ssid, set->num_id));
}
static inline void idset_add(struct idset *set, int ssid, int id)

View File

@ -1911,9 +1911,9 @@ static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
ctl->free_space -= bytes;
}
static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
u64 bytes)
static void btrfs_bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
u64 bytes)
{
unsigned long start, count, end;
int extent_delta = 1;
@ -2249,7 +2249,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
bytes_to_set = min(end - offset, bytes);
bitmap_set_bits(ctl, info, offset, bytes_to_set);
btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set);
return bytes_to_set;

View File

@ -654,7 +654,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
wnd->total_zeroes = nbits;
wnd->extent_max = MINUS_ONE_T;
wnd->zone_bit = wnd->zone_end = 0;
wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
wnd->nwnd = bytes_to_block(sb, ntfs3_bitmap_size(nbits));
wnd->bits_last = nbits & (wbits - 1);
if (!wnd->bits_last)
wnd->bits_last = wbits;
@ -1347,7 +1347,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
return -EINVAL;
/* Align to 8 byte boundary. */
new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
new_wnd = bytes_to_block(sb, ntfs3_bitmap_size(new_bits));
new_last = new_bits & (wbits - 1);
if (!new_last)
new_last = wbits;

View File

@ -522,7 +522,7 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
ni->mi.dirty = true;
/* Step 2: Resize $MFT::BITMAP. */
new_bitmap_bytes = bitmap_size(new_mft_total);
new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
new_bitmap_bytes, &new_bitmap_bytes, true, NULL);

View File

@ -1456,8 +1456,8 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size);
err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name,
in->name_len, &bitmap, NULL, NULL);
err = ni_insert_resident(ni, ntfs3_bitmap_size(1), ATTR_BITMAP,
in->name, in->name_len, &bitmap, NULL, NULL);
if (err)
goto out2;
@ -1518,8 +1518,9 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
if (bmp) {
/* Increase bitmap. */
err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
&indx->bitmap_run, bitmap_size(bit + 1),
NULL, true, NULL);
&indx->bitmap_run,
ntfs3_bitmap_size(bit + 1), NULL, true,
NULL);
if (err)
goto out1;
}
@ -2092,7 +2093,7 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
if (in->name == I30_NAME)
i_size_write(&ni->vfs_inode, new_data);
bpb = bitmap_size(bit);
bpb = ntfs3_bitmap_size(bit);
if (bpb * 8 == nbits)
return 0;

View File

@ -966,9 +966,9 @@ static inline bool run_is_empty(struct runs_tree *run)
}
/* NTFS uses quad aligned bitmaps. */
static inline size_t bitmap_size(size_t bits)
static inline size_t ntfs3_bitmap_size(size_t bits)
{
return ALIGN((bits + 7) >> 3, 8);
return BITS_TO_U64(bits) * sizeof(u64);
}
#define _100ns2seconds 10000000

View File

@ -1341,7 +1341,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
/* Check bitmap boundary. */
tt = sbi->used.bitmap.nbits;
if (inode->i_size < bitmap_size(tt)) {
if (inode->i_size < ntfs3_bitmap_size(tt)) {
ntfs_err(sb, "$Bitmap is corrupted.");
err = -EINVAL;
goto put_inode_out;

View File

@ -83,6 +83,10 @@ struct device;
* bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_get_value8(map, start) Get 8bit value from map at start
* bitmap_set_value8(map, value, start) Set 8bit value to map at start
* bitmap_read(map, start, nbits) Read an nbits-sized value from
* map at start
* bitmap_write(map, value, start, nbits) Write an nbits-sized value to
* map at start
*
* Note, bitmap_zero() and bitmap_fill() operate over the region of
* unsigned longs, that is, bits behind bitmap till the unsigned long
@ -222,9 +226,11 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
unsigned int len = bitmap_size(nbits);
if (small_const_nbits(nbits))
*dst = 0;
@ -234,7 +240,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
unsigned int len = bitmap_size(nbits);
if (small_const_nbits(nbits))
*dst = ~0UL;
@ -245,7 +251,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
unsigned int len = bitmap_size(nbits);
if (small_const_nbits(nbits))
*dst = *src;
@ -722,38 +728,83 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
}
/**
* bitmap_get_value8 - get an 8-bit value within a memory region
* bitmap_read - read a value of n-bits from the memory region
* @map: address to the bitmap memory region
* @start: bit offset of the 8-bit value; must be a multiple of 8
* @start: bit offset of the n-bit value
* @nbits: size of value in bits, nonzero, up to BITS_PER_LONG
*
* Returns the 8-bit value located at the @start bit offset within the @src
* memory region.
* Returns: value of @nbits bits located at the @start bit offset within the
* @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
* value is undefined.
*/
static inline unsigned long bitmap_get_value8(const unsigned long *map,
unsigned long start)
static inline unsigned long bitmap_read(const unsigned long *map,
unsigned long start,
unsigned long nbits)
{
const size_t index = BIT_WORD(start);
const unsigned long offset = start % BITS_PER_LONG;
size_t index = BIT_WORD(start);
unsigned long offset = start % BITS_PER_LONG;
unsigned long space = BITS_PER_LONG - offset;
unsigned long value_low, value_high;
return (map[index] >> offset) & 0xFF;
if (unlikely(!nbits || nbits > BITS_PER_LONG))
return 0;
if (space >= nbits)
return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits);
value_low = map[index] & BITMAP_FIRST_WORD_MASK(start);
value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits);
return (value_low >> offset) | (value_high << space);
}
/**
* bitmap_set_value8 - set an 8-bit value within a memory region
* bitmap_write - write n-bit value within a memory region
* @map: address to the bitmap memory region
* @value: the 8-bit value; values wider than 8 bits may clobber bitmap
* @start: bit offset of the 8-bit value; must be a multiple of 8
* @value: value to write, clamped to nbits
* @start: bit offset of the n-bit value
* @nbits: size of value in bits, nonzero, up to BITS_PER_LONG.
*
* bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(),
* i.e. bits beyond @nbits are ignored:
*
* for (bit = 0; bit < nbits; bit++)
* __assign_bit(start + bit, bitmap, val & BIT(bit));
*
* For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
*/
static inline void bitmap_set_value8(unsigned long *map, unsigned long value,
unsigned long start)
static inline void bitmap_write(unsigned long *map, unsigned long value,
unsigned long start, unsigned long nbits)
{
const size_t index = BIT_WORD(start);
const unsigned long offset = start % BITS_PER_LONG;
size_t index;
unsigned long offset;
unsigned long space;
unsigned long mask;
bool fit;
map[index] &= ~(0xFFUL << offset);
if (unlikely(!nbits || nbits > BITS_PER_LONG))
return;
mask = BITMAP_LAST_WORD_MASK(nbits);
value &= mask;
offset = start % BITS_PER_LONG;
space = BITS_PER_LONG - offset;
fit = space >= nbits;
index = BIT_WORD(start);
map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start));
map[index] |= value << offset;
if (fit)
return;
map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits);
map[index + 1] |= (value >> space);
}
#define bitmap_get_value8(map, start) \
bitmap_read(map, start, BITS_PER_BYTE)
#define bitmap_set_value8(map, value, start) \
bitmap_write(map, value, start, BITS_PER_BYTE)
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */

View File

@ -21,6 +21,8 @@
#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
@ -80,6 +82,7 @@ __check_bitop_pr(__test_and_set_bit);
__check_bitop_pr(__test_and_clear_bit);
__check_bitop_pr(__test_and_change_bit);
__check_bitop_pr(test_bit);
__check_bitop_pr(test_bit_acquire);
#undef __check_bitop_pr
@ -272,23 +275,11 @@ static inline unsigned long fns(unsigned long word, unsigned int n)
* @addr: the address to start counting from
* @value: the value to assign
*/
static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
bool value)
{
if (value)
set_bit(nr, addr);
else
clear_bit(nr, addr);
}
#define assign_bit(nr, addr, value) \
((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr)))
static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
bool value)
{
if (value)
__set_bit(nr, addr);
else
__clear_bit(nr, addr);
}
#define __assign_bit(nr, addr, value) \
((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr)))
/**
* __ptr_set_bit - Set bit in a pointer's value

View File

@ -853,7 +853,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*/
static inline unsigned int cpumask_size(void)
{
return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long);
return bitmap_size(large_cpumask_bits);
}
/*

View File

@ -43,29 +43,10 @@ static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
{
__set_bit(nr, addr);
}
static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
{
__clear_bit(nr, addr);
}
static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
int set)
{
if (set)
linkmode_set_bit(nr, addr);
else
linkmode_clear_bit(nr, addr);
}
static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
{
return test_bit(nr, addr);
}
#define linkmode_test_bit test_bit
#define linkmode_set_bit __set_bit
#define linkmode_clear_bit __clear_bit
#define linkmode_mod_bit __assign_bit
static inline void linkmode_set_bit_array(const int *array, int array_size,
unsigned long *addr)

View File

@ -59,7 +59,7 @@ struct ethtool_ops;
struct kernel_hwtstamp_config;
struct phy_device;
struct dsa_port;
struct ip_tunnel_parm;
struct ip_tunnel_parm_kern;
struct macsec_context;
struct macsec_ops;
struct netdev_name_node;
@ -1327,7 +1327,7 @@ struct netdev_net_notifier {
* queue id bound to an AF_XDP socket. The flags field specifies if
* only RX, only Tx, or both should be woken up using the flags
* XDP_WAKEUP_RX and XDP_WAKEUP_TX.
* int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
* int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p,
* int cmd);
* Add, change, delete or get information on an IPv4 tunnel.
* struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
@ -1583,7 +1583,8 @@ struct net_device_ops {
int (*ndo_xsk_wakeup)(struct net_device *dev,
u32 queue_id, u32 flags);
int (*ndo_tunnel_ctl)(struct net_device *dev,
struct ip_tunnel_parm *p, int cmd);
struct ip_tunnel_parm_kern *p,
int cmd);
struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
struct net_device_path *path);

View File

@ -198,7 +198,7 @@ static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
__be32 daddr,
__u8 tos, __u8 ttl,
__be16 tp_dst,
__be16 flags,
const unsigned long *flags,
__be64 tunnel_id,
int md_size)
{
@ -215,7 +215,7 @@ static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
}
static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
const unsigned long *flags,
__be64 tunnel_id,
int md_size)
{
@ -230,7 +230,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad
__u8 tos, __u8 ttl,
__be16 tp_dst,
__be32 label,
__be16 flags,
const unsigned long *flags,
__be64 tunnel_id,
int md_size)
{
@ -243,7 +243,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad
info = &tun_dst->u.tun_info;
info->mode = IP_TUNNEL_INFO_IPV6;
info->key.tun_flags = flags;
ip_tunnel_flags_copy(info->key.tun_flags, flags);
info->key.tun_id = tunnel_id;
info->key.tp_src = 0;
info->key.tp_dst = tp_dst;
@ -259,7 +259,7 @@ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *sad
}
static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
const unsigned long *flags,
__be64 tunnel_id,
int md_size)
{

View File

@ -97,7 +97,7 @@ struct flow_dissector_key_enc_opts {
* here but seems difficult to #include
*/
u8 len;
__be16 dst_opt_type;
u32 dst_opt_type;
};
struct flow_dissector_key_keyid {

View File

@ -49,67 +49,61 @@ static inline bool netif_is_ip6gretap(const struct net_device *dev)
!strcmp(dev->rtnl_link_ops->kind, "ip6gretap");
}
static inline int gre_calc_hlen(__be16 o_flags)
static inline int gre_calc_hlen(const unsigned long *o_flags)
{
int addend = 4;
if (o_flags & TUNNEL_CSUM)
if (test_bit(IP_TUNNEL_CSUM_BIT, o_flags))
addend += 4;
if (o_flags & TUNNEL_KEY)
if (test_bit(IP_TUNNEL_KEY_BIT, o_flags))
addend += 4;
if (o_flags & TUNNEL_SEQ)
if (test_bit(IP_TUNNEL_SEQ_BIT, o_flags))
addend += 4;
return addend;
}
static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
static inline void gre_flags_to_tnl_flags(unsigned long *dst, __be16 flags)
{
__be16 tflags = 0;
IP_TUNNEL_DECLARE_FLAGS(res) = { };
if (flags & GRE_CSUM)
tflags |= TUNNEL_CSUM;
if (flags & GRE_ROUTING)
tflags |= TUNNEL_ROUTING;
if (flags & GRE_KEY)
tflags |= TUNNEL_KEY;
if (flags & GRE_SEQ)
tflags |= TUNNEL_SEQ;
if (flags & GRE_STRICT)
tflags |= TUNNEL_STRICT;
if (flags & GRE_REC)
tflags |= TUNNEL_REC;
if (flags & GRE_VERSION)
tflags |= TUNNEL_VERSION;
__assign_bit(IP_TUNNEL_CSUM_BIT, res, flags & GRE_CSUM);
__assign_bit(IP_TUNNEL_ROUTING_BIT, res, flags & GRE_ROUTING);
__assign_bit(IP_TUNNEL_KEY_BIT, res, flags & GRE_KEY);
__assign_bit(IP_TUNNEL_SEQ_BIT, res, flags & GRE_SEQ);
__assign_bit(IP_TUNNEL_STRICT_BIT, res, flags & GRE_STRICT);
__assign_bit(IP_TUNNEL_REC_BIT, res, flags & GRE_REC);
__assign_bit(IP_TUNNEL_VERSION_BIT, res, flags & GRE_VERSION);
return tflags;
ip_tunnel_flags_copy(dst, res);
}
static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
static inline __be16 gre_tnl_flags_to_gre_flags(const unsigned long *tflags)
{
__be16 flags = 0;
if (tflags & TUNNEL_CSUM)
if (test_bit(IP_TUNNEL_CSUM_BIT, tflags))
flags |= GRE_CSUM;
if (tflags & TUNNEL_ROUTING)
if (test_bit(IP_TUNNEL_ROUTING_BIT, tflags))
flags |= GRE_ROUTING;
if (tflags & TUNNEL_KEY)
if (test_bit(IP_TUNNEL_KEY_BIT, tflags))
flags |= GRE_KEY;
if (tflags & TUNNEL_SEQ)
if (test_bit(IP_TUNNEL_SEQ_BIT, tflags))
flags |= GRE_SEQ;
if (tflags & TUNNEL_STRICT)
if (test_bit(IP_TUNNEL_STRICT_BIT, tflags))
flags |= GRE_STRICT;
if (tflags & TUNNEL_REC)
if (test_bit(IP_TUNNEL_REC_BIT, tflags))
flags |= GRE_REC;
if (tflags & TUNNEL_VERSION)
if (test_bit(IP_TUNNEL_VERSION_BIT, tflags))
flags |= GRE_VERSION;
return flags;
}
static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
__be16 flags, __be16 proto,
const unsigned long *flags, __be16 proto,
__be32 key, __be32 seq)
{
IP_TUNNEL_DECLARE_FLAGS(cond) = { };
struct gre_base_hdr *greh;
skb_push(skb, hdr_len);
@ -120,18 +114,22 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
greh->flags = gre_tnl_flags_to_gre_flags(flags);
greh->protocol = proto;
if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
__set_bit(IP_TUNNEL_KEY_BIT, cond);
__set_bit(IP_TUNNEL_CSUM_BIT, cond);
__set_bit(IP_TUNNEL_SEQ_BIT, cond);
if (ip_tunnel_flags_intersect(flags, cond)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
if (flags & TUNNEL_SEQ) {
if (test_bit(IP_TUNNEL_SEQ_BIT, flags)) {
*ptr = seq;
ptr--;
}
if (flags & TUNNEL_KEY) {
if (test_bit(IP_TUNNEL_KEY_BIT, flags)) {
*ptr = key;
ptr--;
}
if (flags & TUNNEL_CSUM &&
if (test_bit(IP_TUNNEL_CSUM_BIT, flags) &&
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0;

View File

@ -30,8 +30,8 @@ struct __ip6_tnl_parm {
struct in6_addr laddr; /* local tunnel end-point address */
struct in6_addr raddr; /* remote tunnel end-point address */
__be16 i_flags;
__be16 o_flags;
IP_TUNNEL_DECLARE_FLAGS(i_flags);
IP_TUNNEL_DECLARE_FLAGS(o_flags);
__be32 i_key;
__be32 o_key;

View File

@ -36,6 +36,24 @@
(sizeof_field(struct ip_tunnel_key, u) - \
sizeof_field(struct ip_tunnel_key, u.ipv4))
#define __ipt_flag_op(op, ...) \
op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM)
#define IP_TUNNEL_DECLARE_FLAGS(...) \
__ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__)
#define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__)
#define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__)
#define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__)
#define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__)
#define ip_tunnel_flags_empty(...) \
__ipt_flag_op(bitmap_empty, __VA_ARGS__)
#define ip_tunnel_flags_intersect(...) \
__ipt_flag_op(bitmap_intersects, __VA_ARGS__)
#define ip_tunnel_flags_subset(...) \
__ipt_flag_op(bitmap_subset, __VA_ARGS__)
struct ip_tunnel_key {
__be64 tun_id;
union {
@ -48,11 +66,11 @@ struct ip_tunnel_key {
struct in6_addr dst;
} ipv6;
} u;
__be16 tun_flags;
u8 tos; /* TOS for IPv4, TC for IPv6 */
u8 ttl; /* TTL for IPv4, HL for IPv6 */
IP_TUNNEL_DECLARE_FLAGS(tun_flags);
__be32 label; /* Flow Label for IPv6 */
u32 nhid;
u8 tos; /* TOS for IPv4, TC for IPv6 */
u8 ttl; /* TTL for IPv4, HL for IPv6 */
__be16 tp_src;
__be16 tp_dst;
__u8 flow_flags;
@ -110,6 +128,17 @@ struct ip_tunnel_prl_entry {
struct metadata_dst;
/* Kernel-side variant of ip_tunnel_parm */
struct ip_tunnel_parm_kern {
char name[IFNAMSIZ];
IP_TUNNEL_DECLARE_FLAGS(i_flags);
IP_TUNNEL_DECLARE_FLAGS(o_flags);
__be32 i_key;
__be32 o_key;
int link;
struct iphdr iph;
};
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
@ -136,7 +165,7 @@ struct ip_tunnel {
struct dst_cache dst_cache;
struct ip_tunnel_parm parms;
struct ip_tunnel_parm_kern parms;
int mlink;
int encap_hlen; /* Encap header length (FOU,GUE) */
@ -157,7 +186,7 @@ struct ip_tunnel {
};
struct tnl_ptk_info {
__be16 flags;
IP_TUNNEL_DECLARE_FLAGS(flags);
__be16 proto;
__be32 key;
__be32 seq;
@ -179,11 +208,80 @@ struct ip_tunnel_net {
int type;
};
static inline void ip_tunnel_set_options_present(unsigned long *flags)
{
IP_TUNNEL_DECLARE_FLAGS(present) = { };
__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
__set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
__set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
ip_tunnel_flags_or(flags, flags, present);
}
static inline void ip_tunnel_clear_options_present(unsigned long *flags)
{
IP_TUNNEL_DECLARE_FLAGS(present) = { };
__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
__set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
__set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
__ipt_flag_op(bitmap_andnot, flags, flags, present);
}
static inline bool ip_tunnel_is_options_present(const unsigned long *flags)
{
IP_TUNNEL_DECLARE_FLAGS(present) = { };
__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present);
__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present);
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present);
__set_bit(IP_TUNNEL_GTP_OPT_BIT, present);
__set_bit(IP_TUNNEL_PFCP_OPT_BIT, present);
return ip_tunnel_flags_intersect(flags, present);
}
static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags)
{
IP_TUNNEL_DECLARE_FLAGS(supp) = { };
bitmap_set(supp, 0, BITS_PER_TYPE(__be16));
__set_bit(IP_TUNNEL_VTI_BIT, supp);
return ip_tunnel_flags_subset(flags, supp);
}
static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags)
{
ip_tunnel_flags_zero(dst);
bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16));
__assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI);
}
static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags)
{
__be16 ret;
ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16)));
if (test_bit(IP_TUNNEL_VTI_BIT, flags))
ret |= VTI_ISVTI;
return ret;
}
static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
__be32 saddr, __be32 daddr,
u8 tos, u8 ttl, __be32 label,
__be16 tp_src, __be16 tp_dst,
__be64 tun_id, __be16 tun_flags)
__be64 tun_id,
const unsigned long *tun_flags)
{
key->tun_id = tun_id;
key->u.ipv4.src = saddr;
@ -193,7 +291,7 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
key->tos = tos;
key->ttl = ttl;
key->label = label;
key->tun_flags = tun_flags;
ip_tunnel_flags_copy(key->tun_flags, tun_flags);
/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
* the upper tunnel are used.
@ -214,12 +312,8 @@ ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
{
if (skb->mark)
return false;
if (!info)
return true;
if (info->key.tun_flags & TUNNEL_NOCACHE)
return false;
return true;
return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags);
}
static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
@ -291,14 +385,18 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const u8 proto, int tunnel_hlen);
int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p,
int cmd);
bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp,
const void __user *data);
bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp);
int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
int link, __be16 flags,
int link, const unsigned long *flags,
__be32 remote, __be32 local,
__be32 key);
@ -307,16 +405,16 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark);
struct ip_tunnel_parm_kern *p, __u32 fwmark);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark);
struct ip_tunnel_parm_kern *p, __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
struct ip_tunnel_encap *encap);
void ip_tunnel_netlink_parms(struct nlattr *data[],
struct ip_tunnel_parm *parms);
struct ip_tunnel_parm_kern *parms);
extern const struct header_ops ip_tunnel_header_ops;
__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
@ -514,12 +612,13 @@ static inline void ip_tunnel_info_opts_get(void *to,
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len,
__be16 flags)
const unsigned long *flags)
{
info->options_len = len;
if (len > 0) {
memcpy(ip_tunnel_info_opts(info), from, len);
info->key.tun_flags |= flags;
ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags,
flags);
}
}
@ -563,7 +662,7 @@ static inline void ip_tunnel_info_opts_get(void *to,
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
const void *from, int len,
__be16 flags)
const unsigned long *flags)
{
info->options_len = 0;
}

90
include/net/pfcp.h Normal file
View File

@ -0,0 +1,90 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PFCP_H_
#define _PFCP_H_
#include <uapi/linux/if_ether.h>
#include <net/dst_metadata.h>
#include <linux/netdevice.h>
#include <uapi/linux/ipv6.h>
#include <net/udp_tunnel.h>
#include <uapi/linux/udp.h>
#include <uapi/linux/ip.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bits.h>
#define PFCP_PORT 8805
/* PFCP protocol header */
struct pfcphdr {
u8 flags;
u8 message_type;
__be16 message_length;
};
/* PFCP header flags */
#define PFCP_SEID_FLAG BIT(0)
#define PFCP_MP_FLAG BIT(1)
#define PFCP_VERSION_MASK GENMASK(4, 0)
#define PFCP_HLEN (sizeof(struct udphdr) + sizeof(struct pfcphdr))
/* PFCP node related messages */
struct pfcphdr_node {
u8 seq_number[3];
u8 reserved;
};
/* PFCP session related messages */
struct pfcphdr_session {
__be64 seid;
u8 seq_number[3];
#ifdef __LITTLE_ENDIAN_BITFIELD
u8 message_priority:4,
reserved:4;
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved:4,
message_priprity:4;
#else
#error "Please fix <asm/byteorder>"
#endif
};
struct pfcp_metadata {
u8 type;
__be64 seid;
} __packed;
enum {
PFCP_TYPE_NODE = 0,
PFCP_TYPE_SESSION = 1,
};
#define PFCP_HEADROOM (sizeof(struct iphdr) + sizeof(struct udphdr) + \
sizeof(struct pfcphdr) + sizeof(struct ethhdr))
#define PFCP6_HEADROOM (sizeof(struct ipv6hdr) + sizeof(struct udphdr) + \
sizeof(struct pfcphdr) + sizeof(struct ethhdr))
static inline struct pfcphdr *pfcp_hdr(struct sk_buff *skb)
{
return (struct pfcphdr *)(udp_hdr(skb) + 1);
}
static inline struct pfcphdr_node *pfcp_hdr_node(struct sk_buff *skb)
{
return (struct pfcphdr_node *)(pfcp_hdr(skb) + 1);
}
static inline struct pfcphdr_session *pfcp_hdr_session(struct sk_buff *skb)
{
return (struct pfcphdr_session *)(pfcp_hdr(skb) + 1);
}
static inline bool netif_is_pfcp(const struct net_device *dev)
{
return dev->rtnl_link_ops &&
!strcmp(dev->rtnl_link_ops->kind, "pfcp");
}
#endif

View File

@ -179,8 +179,8 @@ struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
struct dst_cache *dst_cache);
struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
__be16 flags, __be64 tunnel_id,
int md_size);
const unsigned long *flags,
__be64 tunnel_id, int md_size);
#ifdef CONFIG_INET
static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)

View File

@ -161,6 +161,14 @@ enum {
#define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1)
#ifndef __KERNEL__
/* Historically, tunnel flags have been defined as __be16 and now there are
* no free bits left. It is strongly advised to switch the already existing
* userspace code to the new *_BIT definitions from down below, as __be16
* can't be simply cast to a wider type on LE systems. All new flags and
* code must use *_BIT only.
*/
#define TUNNEL_CSUM __cpu_to_be16(0x01)
#define TUNNEL_ROUTING __cpu_to_be16(0x02)
#define TUNNEL_KEY __cpu_to_be16(0x04)
@ -181,5 +189,33 @@ enum {
#define TUNNEL_OPTIONS_PRESENT \
(TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \
TUNNEL_GTP_OPT)
#endif
enum {
IP_TUNNEL_CSUM_BIT = 0U,
IP_TUNNEL_ROUTING_BIT,
IP_TUNNEL_KEY_BIT,
IP_TUNNEL_SEQ_BIT,
IP_TUNNEL_STRICT_BIT,
IP_TUNNEL_REC_BIT,
IP_TUNNEL_VERSION_BIT,
IP_TUNNEL_NO_KEY_BIT,
IP_TUNNEL_DONT_FRAGMENT_BIT,
IP_TUNNEL_OAM_BIT,
IP_TUNNEL_CRIT_OPT_BIT,
IP_TUNNEL_GENEVE_OPT_BIT, /* OPTIONS_PRESENT */
IP_TUNNEL_VXLAN_OPT_BIT, /* OPTIONS_PRESENT */
IP_TUNNEL_NOCACHE_BIT,
IP_TUNNEL_ERSPAN_OPT_BIT, /* OPTIONS_PRESENT */
IP_TUNNEL_GTP_OPT_BIT, /* OPTIONS_PRESENT */
IP_TUNNEL_VTI_BIT,
IP_TUNNEL_SIT_ISATAP_BIT = IP_TUNNEL_VTI_BIT,
/* Flags starting from here are not available via the old UAPI */
IP_TUNNEL_PFCP_OPT_BIT, /* OPTIONS_PRESENT */
__IP_TUNNEL_FLAG_NUM,
};
#endif /* _UAPI_IF_TUNNEL_H_ */

View File

@ -587,6 +587,10 @@ enum {
* TCA_FLOWER_KEY_ENC_OPT_GTP_
* attributes
*/
TCA_FLOWER_KEY_ENC_OPTS_PFCP, /* Nested
* TCA_FLOWER_KEY_ENC_IPT_PFCP
* attributes
*/
__TCA_FLOWER_KEY_ENC_OPTS_MAX,
};
@ -636,6 +640,16 @@ enum {
#define TCA_FLOWER_KEY_ENC_OPT_GTP_MAX \
(__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX - 1)
enum {
TCA_FLOWER_KEY_ENC_OPT_PFCP_UNSPEC,
TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, /* u8 */
TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID, /* be64 */
__TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX,
};
#define TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX \
(__TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX - 1)
enum {
TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC,
TCA_FLOWER_KEY_MPLS_OPTS_LSE,

View File

@ -1180,8 +1180,6 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
return ret;
}
#define BYTES_TO_BITS(nb) ((BITS_PER_LONG * (nb)) / sizeof(long))
/* Bitfield type needs to be parsed into a fetch function */
static int __parse_bitfield_probe_arg(const char *bf,
const struct fetch_type *t,

View File

@ -6,8 +6,6 @@
#include <linux/prime_numbers.h>
#include <linux/slab.h>
#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
struct primes {
struct rcu_head rcu;
unsigned long last, sz;

View File

@ -60,18 +60,17 @@ static const unsigned long exp3_1_0[] __initconst = {
};
static bool __init
__check_eq_uint(const char *srcfile, unsigned int line,
const unsigned int exp_uint, unsigned int x)
__check_eq_ulong(const char *srcfile, unsigned int line,
const unsigned long exp_ulong, unsigned long x)
{
if (exp_uint != x) {
pr_err("[%s:%u] expected %u, got %u\n",
srcfile, line, exp_uint, x);
if (exp_ulong != x) {
pr_err("[%s:%u] expected %lu, got %lu\n",
srcfile, line, exp_ulong, x);
return false;
}
return true;
}
static bool __init
__check_eq_bitmap(const char *srcfile, unsigned int line,
const unsigned long *exp_bmap, const unsigned long *bmap,
@ -185,7 +184,8 @@ __check_eq_str(const char *srcfile, unsigned int line,
result; \
})
#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__)
#define expect_eq_ulong(...) __expect_eq(ulong, ##__VA_ARGS__)
#define expect_eq_uint(x, y) expect_eq_ulong((unsigned int)(x), (unsigned int)(y))
#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__)
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
@ -548,7 +548,7 @@ static void __init test_bitmap_parselist(void)
}
if (ptest.flags & PARSE_TIME)
pr_err("parselist: %d: input is '%s' OK, Time: %llu\n",
pr_info("parselist: %d: input is '%s' OK, Time: %llu\n",
i, ptest.in, time);
#undef ptest
@ -587,7 +587,7 @@ static void __init test_bitmap_printlist(void)
goto out;
}
pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
pr_info("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
out:
kfree(buf);
kfree(bmap);
@ -665,7 +665,7 @@ static void __init test_bitmap_parse(void)
}
if (test.flags & PARSE_TIME)
pr_err("parse: %d: input is '%s' OK, Time: %llu\n",
pr_info("parse: %d: input is '%s' OK, Time: %llu\n",
i, test.in, time);
}
}
@ -1245,14 +1245,7 @@ static void __init test_bitmap_const_eval(void)
* in runtime.
*/
/*
* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`.
* Clang on s390 optimizes bitops at compile-time as intended, but at
* the same time stops treating @bitmap and @bitopvar as compile-time
* constants after regular test_bit() is executed, thus triggering the
* build bugs below. So, call const_test_bit() there directly until
* the compiler is fixed.
*/
/* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }` */
bitmap_clear(bitmap, 0, BITS_PER_LONG);
if (!test_bit(7, bitmap))
bitmap_set(bitmap, 5, 2);
@ -1284,8 +1277,179 @@ static void __init test_bitmap_const_eval(void)
/* ~BIT(25) */
BUILD_BUG_ON(!__builtin_constant_p(~var));
BUILD_BUG_ON(~var != ~BIT(25));
/* ~BIT(25) | BIT(25) == ~0UL */
bitmap_complement(&var, &var, BITS_PER_LONG);
__assign_bit(25, &var, true);
/* !(~(~0UL)) == 1 */
res = bitmap_full(&var, BITS_PER_LONG);
BUILD_BUG_ON(!__builtin_constant_p(res));
BUILD_BUG_ON(!res);
}
/*
* Test bitmap should be big enough to include the cases when start is not in
* the first word, and start+nbits lands in the following word.
*/
#define TEST_BIT_LEN (1000)
/*
* Helper function to test bitmap_write() overwriting the chosen byte pattern.
*/
static void __init test_bitmap_write_helper(const char *pattern)
{
DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
DECLARE_BITMAP(exp_bitmap, TEST_BIT_LEN);
DECLARE_BITMAP(pat_bitmap, TEST_BIT_LEN);
unsigned long w, r, bit;
int i, n, nbits;
/*
* Only parse the pattern once and store the result in the intermediate
* bitmap.
*/
bitmap_parselist(pattern, pat_bitmap, TEST_BIT_LEN);
/*
* Check that writing a single bit does not accidentally touch the
* adjacent bits.
*/
for (i = 0; i < TEST_BIT_LEN; i++) {
bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
for (bit = 0; bit <= 1; bit++) {
bitmap_write(bitmap, bit, i, 1);
__assign_bit(i, exp_bitmap, bit);
expect_eq_bitmap(exp_bitmap, bitmap,
TEST_BIT_LEN);
}
}
/* Ensure writing 0 bits does not change anything. */
bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
for (i = 0; i < TEST_BIT_LEN; i++) {
bitmap_write(bitmap, ~0UL, i, 0);
expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
}
for (nbits = BITS_PER_LONG; nbits >= 1; nbits--) {
w = IS_ENABLED(CONFIG_64BIT) ? 0xdeadbeefdeadbeefUL
: 0xdeadbeefUL;
w >>= (BITS_PER_LONG - nbits);
for (i = 0; i <= TEST_BIT_LEN - nbits; i++) {
bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
for (n = 0; n < nbits; n++)
__assign_bit(i + n, exp_bitmap, w & BIT(n));
bitmap_write(bitmap, w, i, nbits);
expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
r = bitmap_read(bitmap, i, nbits);
expect_eq_ulong(r, w);
}
}
}
static void __init test_bitmap_read_write(void)
{
unsigned char *pattern[3] = {"", "all:1/2", "all"};
DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
unsigned long zero_bits = 0, bits_per_long = BITS_PER_LONG;
unsigned long val;
int i, pi;
/*
* Reading/writing zero bits should not crash the kernel.
* READ_ONCE() prevents constant folding.
*/
bitmap_write(NULL, 0, 0, READ_ONCE(zero_bits));
/* Return value of bitmap_read() is undefined here. */
bitmap_read(NULL, 0, READ_ONCE(zero_bits));
/*
* Reading/writing more than BITS_PER_LONG bits should not crash the
* kernel. READ_ONCE() prevents constant folding.
*/
bitmap_write(NULL, 0, 0, READ_ONCE(bits_per_long) + 1);
/* Return value of bitmap_read() is undefined here. */
bitmap_read(NULL, 0, READ_ONCE(bits_per_long) + 1);
/*
* Ensure that bitmap_read() reads the same value that was previously
* written, and two consequent values are correctly merged.
* The resulting bit pattern is asymmetric to rule out possible issues
* with bit numeration order.
*/
for (i = 0; i < TEST_BIT_LEN - 7; i++) {
bitmap_zero(bitmap, TEST_BIT_LEN);
bitmap_write(bitmap, 0b10101UL, i, 5);
val = bitmap_read(bitmap, i, 5);
expect_eq_ulong(0b10101UL, val);
bitmap_write(bitmap, 0b101UL, i + 5, 3);
val = bitmap_read(bitmap, i + 5, 3);
expect_eq_ulong(0b101UL, val);
val = bitmap_read(bitmap, i, 8);
expect_eq_ulong(0b10110101UL, val);
}
for (pi = 0; pi < ARRAY_SIZE(pattern); pi++)
test_bitmap_write_helper(pattern[pi]);
}
static void __init test_bitmap_read_perf(void)
{
DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
unsigned int cnt, nbits, i;
unsigned long val;
ktime_t time;
bitmap_fill(bitmap, TEST_BIT_LEN);
time = ktime_get();
for (cnt = 0; cnt < 5; cnt++) {
for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) {
for (i = 0; i < TEST_BIT_LEN; i++) {
if (i + nbits > TEST_BIT_LEN)
break;
/*
* Prevent the compiler from optimizing away the
* bitmap_read() by using its value.
*/
WRITE_ONCE(val, bitmap_read(bitmap, i, nbits));
}
}
}
time = ktime_get() - time;
pr_info("Time spent in %s:\t%llu\n", __func__, time);
}
static void __init test_bitmap_write_perf(void)
{
DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
unsigned int cnt, nbits, i;
unsigned long val = 0xfeedface;
ktime_t time;
bitmap_zero(bitmap, TEST_BIT_LEN);
time = ktime_get();
for (cnt = 0; cnt < 5; cnt++) {
for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) {
for (i = 0; i < TEST_BIT_LEN; i++) {
if (i + nbits > TEST_BIT_LEN)
break;
bitmap_write(bitmap, val, i, nbits);
}
}
}
time = ktime_get() - time;
pr_info("Time spent in %s:\t%llu\n", __func__, time);
}
#undef TEST_BIT_LEN
static void __init selftest(void)
{
test_zero_clear();
@ -1303,6 +1467,9 @@ static void __init selftest(void)
test_bitmap_cut();
test_bitmap_print_buf();
test_bitmap_const_eval();
test_bitmap_read_write();
test_bitmap_read_perf();
test_bitmap_write_perf();
test_find_nth_bit();
test_for_each_set_bit();

View File

@ -65,13 +65,14 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
{
struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
int err;
if (metadata)
return -EEXIST;
metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
key, 0);
__set_bit(IP_TUNNEL_KEY_BIT, flags);
metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, flags, key, 0);
if (!metadata)
return -EINVAL;
@ -185,6 +186,7 @@ void br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan)
{
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *tunnel_dst;
__be64 tunnel_id;
int err;
@ -202,7 +204,8 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
return err;
if (BR_INPUT_SKB_CB(skb)->backup_nhid) {
tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
__set_bit(IP_TUNNEL_KEY_BIT, flags);
tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, flags,
tunnel_id, 0);
if (!tunnel_dst)
return -ENOMEM;

View File

@ -41,4 +41,4 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
obj-$(CONFIG_OF) += of_net.o
obj-$(CONFIG_NET_TEST) += gso_test.o
obj-$(CONFIG_NET_TEST) += net_test.o

View File

@ -4662,7 +4662,7 @@ set_compat:
to->tunnel_tos = info->key.tos;
to->tunnel_ttl = info->key.ttl;
if (flags & BPF_F_TUNINFO_FLAGS)
to->tunnel_flags = info->key.tun_flags;
to->tunnel_flags = ip_tunnel_flags_to_be16(info->key.tun_flags);
else
to->tunnel_ext = 0;
@ -4705,7 +4705,7 @@ BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
int err;
if (unlikely(!info ||
!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
!ip_tunnel_is_options_present(info->key.tun_flags))) {
err = -ENOENT;
goto err_clear;
}
@ -4775,15 +4775,15 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
memset(info, 0, sizeof(*info));
info->mode = IP_TUNNEL_INFO_TX;
info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
if (flags & BPF_F_DONT_FRAGMENT)
info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
if (flags & BPF_F_ZERO_CSUM_TX)
info->key.tun_flags &= ~TUNNEL_CSUM;
if (flags & BPF_F_SEQ_NUMBER)
info->key.tun_flags |= TUNNEL_SEQ;
if (flags & BPF_F_NO_TUNNEL_KEY)
info->key.tun_flags &= ~TUNNEL_KEY;
__set_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags);
__assign_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags,
flags & BPF_F_DONT_FRAGMENT);
__assign_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags,
!(flags & BPF_F_ZERO_CSUM_TX));
__assign_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags,
flags & BPF_F_SEQ_NUMBER);
__assign_bit(IP_TUNNEL_KEY_BIT, info->key.tun_flags,
!(flags & BPF_F_NO_TUNNEL_KEY));
info->key.tun_id = cpu_to_be64(from->tunnel_id);
info->key.tos = from->tunnel_tos;
@ -4821,13 +4821,15 @@ BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
const struct metadata_dst *md = this_cpu_ptr(md_dst);
IP_TUNNEL_DECLARE_FLAGS(present) = { };
if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
return -EINVAL;
if (unlikely(size > IP_TUNNEL_OPTS_MAX))
return -ENOMEM;
ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
ip_tunnel_set_options_present(present);
ip_tunnel_info_opts_set(info, from, size, present);
return 0;
}

View File

@ -455,17 +455,25 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
struct flow_dissector_key_enc_opts *enc_opt;
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
u32 val;
enc_opt = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ENC_OPTS,
target_container);
if (info->options_len) {
enc_opt->len = info->options_len;
ip_tunnel_info_opts_get(enc_opt->data, info);
enc_opt->dst_opt_type = info->key.tun_flags &
TUNNEL_OPTIONS_PRESENT;
}
if (!info->options_len)
return;
enc_opt->len = info->options_len;
ip_tunnel_info_opts_get(enc_opt->data, info);
ip_tunnel_set_options_present(flags);
ip_tunnel_flags_and(flags, info->key.tun_flags, flags);
val = find_next_bit(flags, __IP_TUNNEL_FLAG_NUM,
IP_TUNNEL_GENEVE_OPT_BIT);
enc_opt->dst_opt_type = val < __IP_TUNNEL_FLAG_NUM ? val : 0;
}
}
EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);

View File

@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <kunit/test.h>
/* GSO */
#include <linux/skbuff.h>
static const char hdr[] = "abcdefgh";
@ -258,17 +261,127 @@ free_gso_skb:
consume_skb(skb);
}
static struct kunit_case gso_test_cases[] = {
/* IP tunnel flags */
#include <net/ip_tunnels.h>
struct ip_tunnel_flags_test {
const char *name;
const u16 *src_bits;
const u16 *exp_bits;
u8 src_num;
u8 exp_num;
__be16 exp_val;
bool exp_comp;
};
#define IP_TUNNEL_FLAGS_TEST(n, src, comp, eval, exp) { \
.name = (n), \
.src_bits = (src), \
.src_num = ARRAY_SIZE(src), \
.exp_comp = (comp), \
.exp_val = (eval), \
.exp_bits = (exp), \
.exp_num = ARRAY_SIZE(exp), \
}
/* These are __be16-compatible and can be compared as is */
static const u16 ip_tunnel_flags_1[] = {
IP_TUNNEL_KEY_BIT,
IP_TUNNEL_STRICT_BIT,
IP_TUNNEL_ERSPAN_OPT_BIT,
};
/* Due to the previous flags design limitation, setting either
* ``IP_TUNNEL_CSUM_BIT`` (on Big Endian) or ``IP_TUNNEL_DONT_FRAGMENT_BIT``
* (on Little) also sets VTI/ISATAP bit. In the bitmap implementation, they
* correspond to ``BIT(16)``, which is bigger than ``U16_MAX``, but still is
* backward-compatible.
*/
#ifdef __LITTLE_ENDIAN
#define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_DONT_FRAGMENT_BIT
#else
#define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_CSUM_BIT
#endif
static const u16 ip_tunnel_flags_2_src[] = {
IP_TUNNEL_CONFLICT_BIT,
};
static const u16 ip_tunnel_flags_2_exp[] = {
IP_TUNNEL_CONFLICT_BIT,
IP_TUNNEL_SIT_ISATAP_BIT,
};
/* Bits 17 and higher are not compatible with __be16 flags */
static const u16 ip_tunnel_flags_3_src[] = {
IP_TUNNEL_VXLAN_OPT_BIT,
17,
18,
20,
};
static const u16 ip_tunnel_flags_3_exp[] = {
IP_TUNNEL_VXLAN_OPT_BIT,
};
static const struct ip_tunnel_flags_test ip_tunnel_flags_test[] = {
IP_TUNNEL_FLAGS_TEST("compat", ip_tunnel_flags_1, true,
cpu_to_be16(BIT(IP_TUNNEL_KEY_BIT) |
BIT(IP_TUNNEL_STRICT_BIT) |
BIT(IP_TUNNEL_ERSPAN_OPT_BIT)),
ip_tunnel_flags_1),
IP_TUNNEL_FLAGS_TEST("conflict", ip_tunnel_flags_2_src, true,
VTI_ISVTI, ip_tunnel_flags_2_exp),
IP_TUNNEL_FLAGS_TEST("new", ip_tunnel_flags_3_src, false,
cpu_to_be16(BIT(IP_TUNNEL_VXLAN_OPT_BIT)),
ip_tunnel_flags_3_exp),
};
static void
ip_tunnel_flags_test_case_to_desc(const struct ip_tunnel_flags_test *t,
char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(ip_tunnel_flags_test, ip_tunnel_flags_test,
ip_tunnel_flags_test_case_to_desc);
static void ip_tunnel_flags_test_run(struct kunit *test)
{
const struct ip_tunnel_flags_test *t = test->param_value;
IP_TUNNEL_DECLARE_FLAGS(src) = { };
IP_TUNNEL_DECLARE_FLAGS(exp) = { };
IP_TUNNEL_DECLARE_FLAGS(out);
for (u32 j = 0; j < t->src_num; j++)
__set_bit(t->src_bits[j], src);
for (u32 j = 0; j < t->exp_num; j++)
__set_bit(t->exp_bits[j], exp);
KUNIT_ASSERT_EQ(test, t->exp_comp,
ip_tunnel_flags_is_be16_compat(src));
KUNIT_ASSERT_EQ(test, (__force u16)t->exp_val,
(__force u16)ip_tunnel_flags_to_be16(src));
ip_tunnel_flags_from_be16(out, t->exp_val);
KUNIT_ASSERT_TRUE(test, __ipt_flag_op(bitmap_equal, exp, out));
}
static struct kunit_case net_test_cases[] = {
KUNIT_CASE_PARAM(gso_test_func, gso_test_gen_params),
{}
KUNIT_CASE_PARAM(ip_tunnel_flags_test_run,
ip_tunnel_flags_test_gen_params),
{ },
};
static struct kunit_suite gso_test_suite = {
.name = "net_core_gso",
.test_cases = gso_test_cases,
static struct kunit_suite net_test_suite = {
.name = "net_core",
.test_cases = net_test_cases,
};
kunit_test_suite(net_test_suite);
kunit_test_suite(gso_test_suite);
MODULE_DESCRIPTION("KUnit tests for networking core");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("KUnit tests for segmentation offload");

View File

@ -64,7 +64,7 @@ __bpf_kfunc int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx,
info->encap.type = TUNNEL_ENCAP_NONE;
}
if (info->key.tun_flags & TUNNEL_CSUM)
if (test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags))
info->encap.flags |= TUNNEL_ENCAP_FLAG_CSUM;
info->encap.sport = encap->sport;

View File

@ -73,7 +73,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
gre_flags_to_tnl_flags(tpi->flags, greh->flags);
hdr_len = gre_calc_hlen(tpi->flags);
if (!pskb_may_pull(skb, nhs + hdr_len))

View File

@ -265,6 +265,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
struct net *net = dev_net(skb->dev);
struct metadata_dst *tun_dst = NULL;
struct erspan_base_hdr *ershdr;
IP_TUNNEL_DECLARE_FLAGS(flags);
struct ip_tunnel_net *itn;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
@ -272,18 +273,20 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
int ver;
int len;
ip_tunnel_flags_copy(flags, tpi->flags);
itn = net_generic(net, erspan_net_id);
iph = ip_hdr(skb);
if (is_erspan_type1(gre_hdr_len)) {
ver = 0;
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags | TUNNEL_NO_KEY,
__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
iph->saddr, iph->daddr, 0);
} else {
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
ver = ershdr->ver;
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
tpi->flags | TUNNEL_KEY,
__set_bit(IP_TUNNEL_KEY_BIT, flags);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
iph->saddr, iph->daddr, tpi->key);
}
@ -307,10 +310,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
struct ip_tunnel_info *info;
unsigned char *gh;
__be64 tun_id;
__be16 flags;
tpi->flags |= TUNNEL_KEY;
flags = tpi->flags;
__set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
ip_tunnel_flags_copy(flags, tpi->flags);
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ip_tun_rx_dst(skb, flags,
@ -333,7 +335,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
ERSPAN_V2_MDSIZE);
info = &tun_dst->u.tun_info;
info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
info->key.tun_flags);
info->options_len = sizeof(*md);
}
@ -376,10 +379,13 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
tnl_params = &tunnel->parms.iph;
if (tunnel->collect_md || tnl_params->daddr == 0) {
__be16 flags;
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
__be64 tun_id;
flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
__set_bit(IP_TUNNEL_CSUM_BIT, flags);
__set_bit(IP_TUNNEL_KEY_BIT, flags);
ip_tunnel_flags_and(flags, tpi->flags, flags);
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
if (!tun_dst)
@ -459,12 +465,15 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
__be16 flags = tunnel->parms.o_flags;
IP_TUNNEL_DECLARE_FLAGS(flags);
ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
/* Push GRE header. */
gre_build_header(skb, tunnel->tun_hlen,
flags, proto, tunnel->parms.o_key,
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
@ -478,10 +487,10 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
int tunnel_hlen;
__be16 flags;
tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
@ -495,14 +504,19 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
goto err_free_skb;
/* Push Tunnel header. */
if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
tunnel->parms.o_flags)))
goto err_free_skb;
flags = tun_info->key.tun_flags &
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
__set_bit(IP_TUNNEL_CSUM_BIT, flags);
__set_bit(IP_TUNNEL_KEY_BIT, flags);
__set_bit(IP_TUNNEL_SEQ_BIT, flags);
ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags);
gre_build_header(skb, tunnel_hlen, flags, proto,
tunnel_id_to_key32(tun_info->key.tun_id),
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
@ -516,6 +530,7 @@ err_free_skb:
static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
struct erspan_metadata *md;
@ -531,7 +546,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_free_skb;
key = &tun_info->key;
if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
goto err_free_skb;
if (tun_info->options_len < sizeof(*md))
goto err_free_skb;
@ -584,8 +599,9 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_free_skb;
}
gre_build_header(skb, 8, TUNNEL_SEQ,
proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
__set_bit(IP_TUNNEL_SEQ_BIT, flags);
gre_build_header(skb, 8, flags, proto, 0,
htonl(atomic_fetch_inc(&tunnel->o_seqno)));
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
@ -659,7 +675,8 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
tnl_params = &tunnel->parms.iph;
}
if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
tunnel->parms.o_flags)))
goto free_skb;
__gre_xmit(skb, dev, tnl_params, skb->protocol);
@ -701,7 +718,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
/* Push ERSPAN header */
if (tunnel->erspan_ver == 0) {
proto = htons(ETH_P_ERSPAN);
tunnel->parms.o_flags &= ~TUNNEL_SEQ;
__clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags);
} else if (tunnel->erspan_ver == 1) {
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
tunnel->index,
@ -716,7 +733,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
goto free_skb;
}
tunnel->parms.o_flags &= ~TUNNEL_KEY;
__clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags);
__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
return NETDEV_TX_OK;
@ -739,7 +756,8 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
tunnel->parms.o_flags)))
goto free_skb;
if (skb_cow_head(skb, dev->needed_headroom))
@ -757,7 +775,6 @@ free_skb:
static void ipgre_link_update(struct net_device *dev, bool set_mtu)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
__be16 flags;
int len;
len = tunnel->tun_hlen;
@ -773,10 +790,9 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
if (set_mtu)
dev->mtu = max_t(int, dev->mtu - len, 68);
flags = tunnel->parms.o_flags;
if (flags & TUNNEL_SEQ ||
(flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) ||
(test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
dev->features &= ~NETIF_F_GSO_SOFTWARE;
dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
} else {
@ -785,20 +801,29 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
}
}
static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
static int ipgre_tunnel_ctl(struct net_device *dev,
struct ip_tunnel_parm_kern *p,
int cmd)
{
__be16 i_flags, o_flags;
int err;
if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
!ip_tunnel_flags_is_be16_compat(p->o_flags))
return -EOVERFLOW;
i_flags = ip_tunnel_flags_to_be16(p->i_flags);
o_flags = ip_tunnel_flags_to_be16(p->o_flags);
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
}
p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
gre_flags_to_tnl_flags(p->i_flags, i_flags);
gre_flags_to_tnl_flags(p->o_flags, o_flags);
err = ip_tunnel_ctl(dev, p, cmd);
if (err)
@ -807,15 +832,18 @@ static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
if (cmd == SIOCCHGTUNNEL) {
struct ip_tunnel *t = netdev_priv(dev);
t->parms.i_flags = p->i_flags;
t->parms.o_flags = p->o_flags;
ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
ipgre_link_update(dev, true);
}
p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
ip_tunnel_flags_from_be16(p->i_flags, i_flags);
o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
ip_tunnel_flags_from_be16(p->o_flags, o_flags);
return 0;
}
@ -955,7 +983,6 @@ static void ipgre_tunnel_setup(struct net_device *dev)
static void __gre_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel;
__be16 flags;
tunnel = netdev_priv(dev);
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
@ -967,14 +994,13 @@ static void __gre_tunnel_init(struct net_device *dev)
dev->features |= GRE_FEATURES | NETIF_F_LLTX;
dev->hw_features |= GRE_FEATURES;
flags = tunnel->parms.o_flags;
/* TCP offload with GRE SEQ is not supported, nor can we support 2
* levels of outer headers requiring an update.
*/
if (flags & TUNNEL_SEQ)
if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags))
return;
if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
tunnel->encap.type != TUNNEL_ENCAP_NONE)
return;
dev->features |= NETIF_F_GSO_SOFTWARE;
@ -1131,7 +1157,7 @@ static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
static int ipgre_netlink_parms(struct net_device *dev,
struct nlattr *data[],
struct nlattr *tb[],
struct ip_tunnel_parm *parms,
struct ip_tunnel_parm_kern *parms,
__u32 *fwmark)
{
struct ip_tunnel *t = netdev_priv(dev);
@ -1147,10 +1173,12 @@ static int ipgre_netlink_parms(struct net_device *dev,
parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
if (data[IFLA_GRE_IFLAGS])
parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
gre_flags_to_tnl_flags(parms->i_flags,
nla_get_be16(data[IFLA_GRE_IFLAGS]));
if (data[IFLA_GRE_OFLAGS])
parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
gre_flags_to_tnl_flags(parms->o_flags,
nla_get_be16(data[IFLA_GRE_OFLAGS]));
if (data[IFLA_GRE_IKEY])
parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
@ -1198,7 +1226,7 @@ static int ipgre_netlink_parms(struct net_device *dev,
static int erspan_netlink_parms(struct net_device *dev,
struct nlattr *data[],
struct nlattr *tb[],
struct ip_tunnel_parm *parms,
struct ip_tunnel_parm_kern *parms,
__u32 *fwmark)
{
struct ip_tunnel *t = netdev_priv(dev);
@ -1357,7 +1385,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel_parm p;
struct ip_tunnel_parm_kern p;
__u32 fwmark = 0;
int err;
@ -1375,7 +1403,7 @@ static int erspan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel_parm p;
struct ip_tunnel_parm_kern p;
__u32 fwmark = 0;
int err;
@ -1394,8 +1422,8 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm_kern p;
__u32 fwmark = t->fwmark;
struct ip_tunnel_parm p;
int err;
err = ipgre_newlink_encap_setup(dev, data);
@ -1410,8 +1438,8 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
if (err < 0)
return err;
t->parms.i_flags = p.i_flags;
t->parms.o_flags = p.o_flags;
ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
ipgre_link_update(dev, !tb[IFLA_MTU]);
@ -1423,8 +1451,8 @@ static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm_kern p;
__u32 fwmark = t->fwmark;
struct ip_tunnel_parm p;
int err;
err = ipgre_newlink_encap_setup(dev, data);
@ -1439,8 +1467,8 @@ static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err < 0)
return err;
t->parms.i_flags = p.i_flags;
t->parms.o_flags = p.o_flags;
ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
return 0;
}
@ -1496,8 +1524,10 @@ static size_t ipgre_get_size(const struct net_device *dev)
static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm *p = &t->parms;
__be16 o_flags = p->o_flags;
struct ip_tunnel_parm_kern *p = &t->parms;
IP_TUNNEL_DECLARE_FLAGS(o_flags);
ip_tunnel_flags_copy(o_flags, p->o_flags);
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
@ -1545,7 +1575,7 @@ static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (t->erspan_ver <= 2) {
if (t->erspan_ver != 0 && !t->collect_md)
t->parms.o_flags |= TUNNEL_KEY;
__set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
goto nla_put_failure;

View File

@ -56,17 +56,13 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
IP_TNL_HASH_BITS);
}
static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
__be16 flags, __be32 key)
static bool ip_tunnel_key_match(const struct ip_tunnel_parm_kern *p,
const unsigned long *flags, __be32 key)
{
if (p->i_flags & TUNNEL_KEY) {
if (flags & TUNNEL_KEY)
return key == p->i_key;
else
/* key expected, none present */
return false;
} else
return !(flags & TUNNEL_KEY);
if (!test_bit(IP_TUNNEL_KEY_BIT, flags))
return !test_bit(IP_TUNNEL_KEY_BIT, p->i_flags);
return test_bit(IP_TUNNEL_KEY_BIT, p->i_flags) && p->i_key == key;
}
/* Fallback tunnel: no source, no destination, no key, no options
@ -81,7 +77,7 @@ static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
Given src, dst and key, find appropriate for input tunnel.
*/
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
int link, __be16 flags,
int link, const unsigned long *flags,
__be32 remote, __be32 local,
__be32 key)
{
@ -143,7 +139,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
}
hlist_for_each_entry_rcu(t, head, hash_node) {
if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
if ((!test_bit(IP_TUNNEL_NO_KEY_BIT, flags) &&
t->parms.i_key != key) ||
t->parms.iph.saddr != 0 ||
t->parms.iph.daddr != 0 ||
!(t->dev->flags & IFF_UP))
@ -171,7 +168,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
struct ip_tunnel_parm *parms)
struct ip_tunnel_parm_kern *parms)
{
unsigned int h;
__be32 remote;
@ -182,7 +179,8 @@ static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
else
remote = 0;
if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
if (!test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags) &&
test_bit(IP_TUNNEL_VTI_BIT, parms->i_flags))
i_key = 0;
h = ip_tunnel_hash(i_key, remote);
@ -206,17 +204,19 @@ static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
}
static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
struct ip_tunnel_parm *parms,
struct ip_tunnel_parm_kern *parms,
int type)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
IP_TUNNEL_DECLARE_FLAGS(flags);
__be32 key = parms->i_key;
__be16 flags = parms->i_flags;
int link = parms->link;
struct ip_tunnel *t = NULL;
struct hlist_head *head = ip_bucket(itn, parms);
ip_tunnel_flags_copy(flags, parms->i_flags);
hlist_for_each_entry_rcu(t, head, hash_node) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
@ -230,7 +230,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
static struct net_device *__ip_tunnel_create(struct net *net,
const struct rtnl_link_ops *ops,
struct ip_tunnel_parm *parms)
struct ip_tunnel_parm_kern *parms)
{
int err;
struct ip_tunnel *tunnel;
@ -326,7 +326,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
static struct ip_tunnel *ip_tunnel_create(struct net *net,
struct ip_tunnel_net *itn,
struct ip_tunnel_parm *parms)
struct ip_tunnel_parm_kern *parms)
{
struct ip_tunnel *nt;
struct net_device *dev;
@ -386,15 +386,15 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
}
#endif
if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) !=
test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) {
DEV_STATS_INC(tunnel->dev, rx_crc_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
if (tunnel->parms.i_flags&TUNNEL_SEQ) {
if (!(tpi->flags&TUNNEL_SEQ) ||
if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) {
if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) ||
(tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
@ -638,7 +638,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags))
df = htons(IP_DF);
if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, tunnel_hlen,
key->u.ipv4.dst, true)) {
@ -871,7 +871,7 @@ EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
static void ip_tunnel_update(struct ip_tunnel_net *itn,
struct ip_tunnel *t,
struct net_device *dev,
struct ip_tunnel_parm *p,
struct ip_tunnel_parm_kern *p,
bool set_mtu,
__u32 fwmark)
{
@ -903,7 +903,8 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
netdev_state_change(dev);
}
int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p,
int cmd)
{
int err = 0;
struct ip_tunnel *t = netdev_priv(dev);
@ -927,10 +928,10 @@ int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
goto done;
if (p->iph.ttl)
p->iph.frag_off |= htons(IP_DF);
if (!(p->i_flags & VTI_ISVTI)) {
if (!(p->i_flags & TUNNEL_KEY))
if (!test_bit(IP_TUNNEL_VTI_BIT, p->i_flags)) {
if (!test_bit(IP_TUNNEL_KEY_BIT, p->i_flags))
p->i_key = 0;
if (!(p->o_flags & TUNNEL_KEY))
if (!test_bit(IP_TUNNEL_KEY_BIT, p->o_flags))
p->o_key = 0;
}
@ -1005,16 +1006,56 @@ done:
}
EXPORT_SYMBOL_GPL(ip_tunnel_ctl);
bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp,
const void __user *data)
{
struct ip_tunnel_parm p;
if (copy_from_user(&p, data, sizeof(p)))
return false;
strscpy(kp->name, p.name);
kp->link = p.link;
ip_tunnel_flags_from_be16(kp->i_flags, p.i_flags);
ip_tunnel_flags_from_be16(kp->o_flags, p.o_flags);
kp->i_key = p.i_key;
kp->o_key = p.o_key;
memcpy(&kp->iph, &p.iph, min(sizeof(kp->iph), sizeof(p.iph)));
return true;
}
EXPORT_SYMBOL_GPL(ip_tunnel_parm_from_user);
bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp)
{
struct ip_tunnel_parm p;
if (!ip_tunnel_flags_is_be16_compat(kp->i_flags) ||
!ip_tunnel_flags_is_be16_compat(kp->o_flags))
return false;
strscpy(p.name, kp->name);
p.link = kp->link;
p.i_flags = ip_tunnel_flags_to_be16(kp->i_flags);
p.o_flags = ip_tunnel_flags_to_be16(kp->o_flags);
p.i_key = kp->i_key;
p.o_key = kp->o_key;
memcpy(&p.iph, &kp->iph, min(sizeof(p.iph), sizeof(kp->iph)));
return !copy_to_user(data, &p, sizeof(p));
}
EXPORT_SYMBOL_GPL(ip_tunnel_parm_to_user);
int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
struct ip_tunnel_parm p;
struct ip_tunnel_parm_kern p;
int err;
if (copy_from_user(&p, data, sizeof(p)))
if (!ip_tunnel_parm_from_user(&p, data))
return -EFAULT;
err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, cmd);
if (!err && copy_to_user(data, &p, sizeof(p)))
if (!err && !ip_tunnel_parm_to_user(data, &p))
return -EFAULT;
return err;
}
@ -1093,7 +1134,7 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname)
{
struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
struct ip_tunnel_parm parms;
struct ip_tunnel_parm_kern parms;
unsigned int i;
itn->rtnl_link_ops = ops;
@ -1171,7 +1212,7 @@ void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark)
struct ip_tunnel_parm_kern *p, __u32 fwmark)
{
struct ip_tunnel *nt;
struct net *net = dev_net(dev);
@ -1225,7 +1266,7 @@ err_register_netdevice:
EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark)
struct ip_tunnel_parm_kern *p, __u32 fwmark)
{
struct ip_tunnel *t;
struct ip_tunnel *tunnel = netdev_priv(dev);

View File

@ -125,6 +125,7 @@ EXPORT_SYMBOL_GPL(__iptunnel_pull_header);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags)
{
IP_TUNNEL_DECLARE_FLAGS(tun_flags) = { };
struct metadata_dst *res;
struct ip_tunnel_info *dst, *src;
@ -144,10 +145,10 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
sizeof(struct in6_addr));
else
dst->key.u.ipv4.dst = src->key.u.ipv4.src;
dst->key.tun_flags = src->key.tun_flags;
ip_tunnel_flags_copy(dst->key.tun_flags, src->key.tun_flags);
dst->mode = src->mode | IP_TUNNEL_INFO_TX;
ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src),
src->options_len, 0);
src->options_len, tun_flags);
return res;
}
@ -497,7 +498,7 @@ static int ip_tun_parse_opts_geneve(struct nlattr *attr,
opt->opt_class = nla_get_be16(attr);
attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE];
opt->type = nla_get_u8(attr);
info->key.tun_flags |= TUNNEL_GENEVE_OPT;
__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags);
}
return sizeof(struct geneve_opt) + data_len;
@ -525,7 +526,7 @@ static int ip_tun_parse_opts_vxlan(struct nlattr *attr,
attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP];
md->gbp = nla_get_u32(attr);
md->gbp &= VXLAN_GBP_MASK;
info->key.tun_flags |= TUNNEL_VXLAN_OPT;
__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags);
}
return sizeof(struct vxlan_metadata);
@ -574,7 +575,7 @@ static int ip_tun_parse_opts_erspan(struct nlattr *attr,
set_hwid(&md->u.md2, nla_get_u8(attr));
}
info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags);
}
return sizeof(struct erspan_metadata);
@ -585,7 +586,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
{
int err, rem, opt_len, opts_len = 0;
struct nlattr *nla;
__be16 type = 0;
u32 type = 0;
if (!attr)
return 0;
@ -598,7 +599,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
switch (nla_type(nla)) {
case LWTUNNEL_IP_OPTS_GENEVE:
if (type && type != TUNNEL_GENEVE_OPT)
if (type && type != IP_TUNNEL_GENEVE_OPT_BIT)
return -EINVAL;
opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len,
extack);
@ -607,7 +608,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
opts_len += opt_len;
if (opts_len > IP_TUNNEL_OPTS_MAX)
return -EINVAL;
type = TUNNEL_GENEVE_OPT;
type = IP_TUNNEL_GENEVE_OPT_BIT;
break;
case LWTUNNEL_IP_OPTS_VXLAN:
if (type)
@ -617,7 +618,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
type = TUNNEL_VXLAN_OPT;
type = IP_TUNNEL_VXLAN_OPT_BIT;
break;
case LWTUNNEL_IP_OPTS_ERSPAN:
if (type)
@ -627,7 +628,7 @@ static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
type = TUNNEL_ERSPAN_OPT;
type = IP_TUNNEL_ERSPAN_OPT_BIT;
break;
default:
return -EINVAL;
@ -705,10 +706,16 @@ static int ip_tun_build_state(struct net *net, struct nlattr *attr,
if (tb[LWTUNNEL_IP_TOS])
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
if (tb[LWTUNNEL_IP_FLAGS])
tun_info->key.tun_flags |=
(nla_get_be16(tb[LWTUNNEL_IP_FLAGS]) &
~TUNNEL_OPTIONS_PRESENT);
if (tb[LWTUNNEL_IP_FLAGS]) {
IP_TUNNEL_DECLARE_FLAGS(flags);
ip_tunnel_flags_from_be16(flags,
nla_get_be16(tb[LWTUNNEL_IP_FLAGS]));
ip_tunnel_clear_options_present(flags);
ip_tunnel_flags_or(tun_info->key.tun_flags,
tun_info->key.tun_flags, flags);
}
tun_info->mode = IP_TUNNEL_INFO_TX;
tun_info->options_len = opt_len;
@ -812,18 +819,18 @@ static int ip_tun_fill_encap_opts(struct sk_buff *skb, int type,
struct nlattr *nest;
int err = 0;
if (!(tun_info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
if (!ip_tunnel_is_options_present(tun_info->key.tun_flags))
return 0;
nest = nla_nest_start_noflag(skb, type);
if (!nest)
return -ENOMEM;
if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT)
if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags))
err = ip_tun_fill_encap_opts_geneve(skb, tun_info);
else if (tun_info->key.tun_flags & TUNNEL_VXLAN_OPT)
else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_info->key.tun_flags))
err = ip_tun_fill_encap_opts_vxlan(skb, tun_info);
else if (tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)
else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
err = ip_tun_fill_encap_opts_erspan(skb, tun_info);
if (err) {
@ -846,7 +853,8 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags) ||
nla_put_be16(skb, LWTUNNEL_IP_FLAGS,
ip_tunnel_flags_to_be16(tun_info->key.tun_flags)) ||
ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info))
return -ENOMEM;
@ -857,11 +865,11 @@ static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
{
int opt_len;
if (!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
if (!ip_tunnel_is_options_present(info->key.tun_flags))
return 0;
opt_len = nla_total_size(0); /* LWTUNNEL_IP_OPTS */
if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) {
struct geneve_opt *opt;
int offset = 0;
@ -874,10 +882,10 @@ static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
/* OPT_GENEVE_DATA */
offset += sizeof(*opt) + opt->length * 4;
}
} else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
} else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) {
opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_VXLAN */
+ nla_total_size(4); /* OPT_VXLAN_GBP */
} else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
} else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags)) {
struct erspan_metadata *md = ip_tunnel_info_opts(info);
opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_ERSPAN */
@ -984,10 +992,17 @@ static int ip6_tun_build_state(struct net *net, struct nlattr *attr,
if (tb[LWTUNNEL_IP6_TC])
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
if (tb[LWTUNNEL_IP6_FLAGS])
tun_info->key.tun_flags |=
(nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]) &
~TUNNEL_OPTIONS_PRESENT);
if (tb[LWTUNNEL_IP6_FLAGS]) {
IP_TUNNEL_DECLARE_FLAGS(flags);
__be16 data;
data = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]);
ip_tunnel_flags_from_be16(flags, data);
ip_tunnel_clear_options_present(flags);
ip_tunnel_flags_or(tun_info->key.tun_flags,
tun_info->key.tun_flags, flags);
}
tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
tun_info->options_len = opt_len;
@ -1008,7 +1023,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags) ||
nla_put_be16(skb, LWTUNNEL_IP6_FLAGS,
ip_tunnel_flags_to_be16(tun_info->key.tun_flags)) ||
ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info))
return -ENOMEM;
@ -1116,7 +1132,7 @@ bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
EXPORT_SYMBOL_GPL(ip_tunnel_netlink_encap_parms);
void ip_tunnel_netlink_parms(struct nlattr *data[],
struct ip_tunnel_parm *parms)
struct ip_tunnel_parm_kern *parms)
{
if (data[IFLA_IPTUN_LINK])
parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
@ -1139,8 +1155,12 @@ void ip_tunnel_netlink_parms(struct nlattr *data[],
if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
parms->iph.frag_off = htons(IP_DF);
if (data[IFLA_IPTUN_FLAGS])
parms->i_flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
if (data[IFLA_IPTUN_FLAGS]) {
__be16 flags;
flags = nla_get_be16(data[IFLA_IPTUN_FLAGS]);
ip_tunnel_flags_from_be16(parms->i_flags, flags);
}
if (data[IFLA_IPTUN_PROTO])
parms->iph.protocol = nla_get_u8(data[IFLA_IPTUN_PROTO]);

View File

@ -51,8 +51,11 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
const struct iphdr *iph = ip_hdr(skb);
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
iph->saddr, iph->daddr, 0);
if (tunnel) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
@ -167,7 +170,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
struct flowi *fl)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_parm *parms = &tunnel->parms;
struct ip_tunnel_parm_kern *parms = &tunnel->parms;
struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; /* Device to other host */
int pkt_len = skb->len;
@ -322,8 +325,11 @@ static int vti4_err(struct sk_buff *skb, u32 info)
const struct iphdr *iph = (const struct iphdr *)skb->data;
int protocol = iph->protocol;
struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
iph->daddr, iph->saddr, 0);
if (!tunnel)
return -1;
@ -373,8 +379,9 @@ static int vti4_err(struct sk_buff *skb, u32 info)
}
static int
vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd)
{
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
int err = 0;
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
@ -383,20 +390,26 @@ vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
return -EINVAL;
}
if (!(p->i_flags & GRE_KEY))
if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
!ip_tunnel_flags_is_be16_compat(p->o_flags))
return -EOVERFLOW;
if (!(ip_tunnel_flags_to_be16(p->i_flags) & GRE_KEY))
p->i_key = 0;
if (!(p->o_flags & GRE_KEY))
if (!(ip_tunnel_flags_to_be16(p->o_flags) & GRE_KEY))
p->o_key = 0;
p->i_flags = VTI_ISVTI;
__set_bit(IP_TUNNEL_VTI_BIT, flags);
ip_tunnel_flags_copy(p->i_flags, flags);
err = ip_tunnel_ctl(dev, p, cmd);
if (err)
return err;
if (cmd != SIOCDELTUNNEL) {
p->i_flags |= GRE_KEY;
p->o_flags |= GRE_KEY;
ip_tunnel_flags_from_be16(flags, GRE_KEY);
ip_tunnel_flags_or(p->i_flags, p->i_flags, flags);
ip_tunnel_flags_or(p->o_flags, p->o_flags, flags);
}
return 0;
}
@ -531,7 +544,7 @@ static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
}
static void vti_netlink_parms(struct nlattr *data[],
struct ip_tunnel_parm *parms,
struct ip_tunnel_parm_kern *parms,
__u32 *fwmark)
{
memset(parms, 0, sizeof(*parms));
@ -541,7 +554,7 @@ static void vti_netlink_parms(struct nlattr *data[],
if (!data)
return;
parms->i_flags = VTI_ISVTI;
__set_bit(IP_TUNNEL_VTI_BIT, parms->i_flags);
if (data[IFLA_VTI_LINK])
parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
@ -566,7 +579,7 @@ static int vti_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel_parm parms;
struct ip_tunnel_parm_kern parms;
__u32 fwmark = 0;
vti_netlink_parms(data, &parms, &fwmark);
@ -578,8 +591,8 @@ static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm_kern p;
__u32 fwmark = t->fwmark;
struct ip_tunnel_parm p;
vti_netlink_parms(data, &p, &fwmark);
return ip_tunnel_changelink(dev, tb, &p, fwmark);
@ -606,7 +619,7 @@ static size_t vti_get_size(const struct net_device *dev)
static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm *p = &t->parms;
struct ip_tunnel_parm_kern *p = &t->parms;
if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||

View File

@ -130,13 +130,16 @@ static int ipip_err(struct sk_buff *skb, u32 info)
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
const struct iphdr *iph = (const struct iphdr *)skb->data;
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct ip_tunnel *t;
int err = 0;
t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->daddr, iph->saddr, 0);
__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
t = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, iph->daddr,
iph->saddr, 0);
if (!t) {
err = -ENOENT;
goto out;
@ -213,13 +216,16 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
{
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *tun_dst = NULL;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
iph = ip_hdr(skb);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags, iph->saddr,
iph->daddr, 0);
if (tunnel) {
const struct tnl_ptk_info *tpi;
@ -238,7 +244,9 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
if (tunnel->collect_md) {
tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
ip_tunnel_flags_zero(flags);
tun_dst = ip_tun_rx_dst(skb, flags, 0, 0);
if (!tun_dst)
return 0;
ip_tunnel_md_udp_encap(skb, &tun_dst->u.tun_info);
@ -330,7 +338,7 @@ static bool ipip_tunnel_ioctl_verify_protocol(u8 ipproto)
}
static int
ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd)
{
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
if (p->iph.version != 4 ||
@ -340,7 +348,8 @@ ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
}
p->i_key = p->o_key = 0;
p->i_flags = p->o_flags = 0;
ip_tunnel_flags_zero(p->i_flags);
ip_tunnel_flags_zero(p->o_flags);
return ip_tunnel_ctl(dev, p, cmd);
}
@ -405,8 +414,8 @@ static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
}
static void ipip_netlink_parms(struct nlattr *data[],
struct ip_tunnel_parm *parms, bool *collect_md,
__u32 *fwmark)
struct ip_tunnel_parm_kern *parms,
bool *collect_md, __u32 *fwmark)
{
memset(parms, 0, sizeof(*parms));
@ -432,8 +441,8 @@ static int ipip_newlink(struct net *src_net, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
struct ip_tunnel_parm_kern p;
__u32 fwmark = 0;
if (ip_tunnel_netlink_encap_parms(data, &ipencap)) {
@ -452,8 +461,8 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
struct ip_tunnel_parm_kern p;
bool collect_md;
__u32 fwmark = t->fwmark;
@ -510,7 +519,7 @@ static size_t ipip_get_size(const struct net_device *dev)
static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_parm *parm = &tunnel->parms;
struct ip_tunnel_parm_kern *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||

View File

@ -441,7 +441,7 @@ static bool ipmr_init_vif_indev(const struct net_device *dev)
static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
{
struct net_device *tunnel_dev, *new_dev;
struct ip_tunnel_parm p = { };
struct ip_tunnel_parm_kern p = { };
int err;
tunnel_dev = __dev_get_by_name(net, "tunl0");

View File

@ -183,7 +183,8 @@ void udp_tunnel_sock_release(struct socket *sock)
EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
__be16 flags, __be64 tunnel_id, int md_size)
const unsigned long *flags,
__be64 tunnel_id, int md_size)
{
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
@ -199,7 +200,7 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
info->key.tp_src = udp_hdr(skb)->source;
info->key.tp_dst = udp_hdr(skb)->dest;
if (udp_hdr(skb)->check)
info->key.tun_flags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
return tun_dst;
}
EXPORT_SYMBOL_GPL(udp_tun_rx_dst);

View File

@ -63,6 +63,7 @@
#include <linux/string.h>
#include <linux/hash.h>
#include <net/ip_tunnels.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/snmp.h>
@ -2917,7 +2918,7 @@ put:
static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
struct in6_ifreq *ireq)
{
struct ip_tunnel_parm p = { };
struct ip_tunnel_parm_kern p = { };
int err;
if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))

View File

@ -496,11 +496,11 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
tpi->proto);
if (tunnel) {
if (tunnel->parms.collect_md) {
IP_TUNNEL_DECLARE_FLAGS(flags);
struct metadata_dst *tun_dst;
__be64 tun_id;
__be16 flags;
flags = tpi->flags;
ip_tunnel_flags_copy(flags, tpi->flags);
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
@ -548,14 +548,14 @@ static int ip6erspan_rcv(struct sk_buff *skb,
if (tunnel->parms.collect_md) {
struct erspan_metadata *pkt_md, *md;
IP_TUNNEL_DECLARE_FLAGS(flags);
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
unsigned char *gh;
__be64 tun_id;
__be16 flags;
tpi->flags |= TUNNEL_KEY;
flags = tpi->flags;
__set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
ip_tunnel_flags_copy(flags, tpi->flags);
tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
@ -577,7 +577,8 @@ static int ip6erspan_rcv(struct sk_buff *skb,
md2 = &md->u.md2;
memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
ERSPAN_V2_MDSIZE);
info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
info->key.tun_flags);
info->options_len = sizeof(*md);
ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
@ -745,8 +746,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
__u32 *pmtu, __be16 proto)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
IP_TUNNEL_DECLARE_FLAGS(flags);
__be16 protocol;
__be16 flags;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
@ -778,8 +779,11 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id);
dsfield = key->tos;
flags = key->tun_flags &
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
ip_tunnel_flags_zero(flags);
__set_bit(IP_TUNNEL_CSUM_BIT, flags);
__set_bit(IP_TUNNEL_KEY_BIT, flags);
__set_bit(IP_TUNNEL_SEQ_BIT, flags);
ip_tunnel_flags_and(flags, flags, key->tun_flags);
tun_hlen = gre_calc_hlen(flags);
if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
@ -788,19 +792,21 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
gre_build_header(skb, tun_hlen,
flags, protocol,
tunnel_id_to_key32(tun_info->key.tun_id),
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
: 0);
test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
htonl(atomic_fetch_inc(&tunnel->o_seqno)) :
0);
} else {
if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
return -ENOMEM;
flags = tunnel->parms.o_flags;
ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
gre_build_header(skb, tunnel->tun_hlen, flags,
protocol, tunnel->parms.o_key,
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
: 0);
test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
htonl(atomic_fetch_inc(&tunnel->o_seqno)) :
0);
}
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
@ -822,7 +828,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
&dsfield, &encap_limit);
err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
t->parms.o_flags));
if (err)
return -1;
@ -856,7 +863,8 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
return -1;
if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
t->parms.o_flags)))
return -1;
err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
@ -883,7 +891,8 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
prepare_ip6gre_xmit_other(skb, dev, &fl6, &dsfield, &encap_limit))
return -1;
err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
t->parms.o_flags));
if (err)
return err;
err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, skb->protocol);
@ -936,6 +945,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
struct ip_tunnel_info *tun_info = NULL;
struct ip6_tnl *t = netdev_priv(dev);
struct dst_entry *dst = skb_dst(skb);
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
bool truncate = false;
int encap_limit = -1;
__u8 dsfield = false;
@ -979,7 +989,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
goto tx_err;
t->parms.o_flags &= ~TUNNEL_KEY;
__clear_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
IPCB(skb)->flags = 0;
/* For collect_md mode, derive fl6 from the tunnel key,
@ -1004,7 +1014,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id);
dsfield = key->tos;
if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
tun_info->key.tun_flags))
goto tx_err;
if (tun_info->options_len < sizeof(*md))
goto tx_err;
@ -1065,7 +1076,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
}
/* Push GRE header. */
gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
__set_bit(IP_TUNNEL_SEQ_BIT, flags);
gre_build_header(skb, 8, flags, proto, 0,
htonl(atomic_fetch_inc(&t->o_seqno)));
/* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
@ -1208,8 +1221,8 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
t->parms.proto = p->proto;
t->parms.i_key = p->i_key;
t->parms.o_key = p->o_key;
t->parms.i_flags = p->i_flags;
t->parms.o_flags = p->o_flags;
ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
t->parms.fwmark = p->fwmark;
t->parms.erspan_ver = p->erspan_ver;
t->parms.index = p->index;
@ -1238,8 +1251,8 @@ static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
p->link = u->link;
p->i_key = u->i_key;
p->o_key = u->o_key;
p->i_flags = gre_flags_to_tnl_flags(u->i_flags);
p->o_flags = gre_flags_to_tnl_flags(u->o_flags);
gre_flags_to_tnl_flags(p->i_flags, u->i_flags);
gre_flags_to_tnl_flags(p->o_flags, u->o_flags);
memcpy(p->name, u->name, sizeof(u->name));
}
@ -1391,7 +1404,7 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
ipv6h->daddr = t->parms.raddr;
p = (__be16 *)(ipv6h + 1);
p[0] = t->parms.o_flags;
p[0] = ip_tunnel_flags_to_be16(t->parms.o_flags);
p[1] = htons(type);
/*
@ -1455,19 +1468,17 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
static void ip6gre_tnl_init_features(struct net_device *dev)
{
struct ip6_tnl *nt = netdev_priv(dev);
__be16 flags;
dev->features |= GRE6_FEATURES | NETIF_F_LLTX;
dev->hw_features |= GRE6_FEATURES;
flags = nt->parms.o_flags;
/* TCP offload with GRE SEQ is not supported, nor can we support 2
* levels of outer headers requiring an update.
*/
if (flags & TUNNEL_SEQ)
if (test_bit(IP_TUNNEL_SEQ_BIT, nt->parms.o_flags))
return;
if (flags & TUNNEL_CSUM && nt->encap.type != TUNNEL_ENCAP_NONE)
if (test_bit(IP_TUNNEL_CSUM_BIT, nt->parms.o_flags) &&
nt->encap.type != TUNNEL_ENCAP_NONE)
return;
dev->features |= NETIF_F_GSO_SOFTWARE;
@ -1792,12 +1803,12 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
if (data[IFLA_GRE_IFLAGS])
parms->i_flags = gre_flags_to_tnl_flags(
nla_get_be16(data[IFLA_GRE_IFLAGS]));
gre_flags_to_tnl_flags(parms->i_flags,
nla_get_be16(data[IFLA_GRE_IFLAGS]));
if (data[IFLA_GRE_OFLAGS])
parms->o_flags = gre_flags_to_tnl_flags(
nla_get_be16(data[IFLA_GRE_OFLAGS]));
gre_flags_to_tnl_flags(parms->o_flags,
nla_get_be16(data[IFLA_GRE_OFLAGS]));
if (data[IFLA_GRE_IKEY])
parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
@ -2144,11 +2155,13 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct __ip6_tnl_parm *p = &t->parms;
__be16 o_flags = p->o_flags;
IP_TUNNEL_DECLARE_FLAGS(o_flags);
ip_tunnel_flags_copy(o_flags, p->o_flags);
if (p->erspan_ver == 1 || p->erspan_ver == 2) {
if (!p->collect_md)
o_flags |= TUNNEL_KEY;
__set_bit(IP_TUNNEL_KEY_BIT, o_flags);
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
goto nla_put_failure;

View File

@ -798,17 +798,15 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
const struct ipv6hdr *ipv6h;
int nh, err;
if ((!(tpi->flags & TUNNEL_CSUM) &&
(tunnel->parms.i_flags & TUNNEL_CSUM)) ||
((tpi->flags & TUNNEL_CSUM) &&
!(tunnel->parms.i_flags & TUNNEL_CSUM))) {
if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.i_flags) !=
test_bit(IP_TUNNEL_CSUM_BIT, tpi->flags)) {
DEV_STATS_INC(tunnel->dev, rx_crc_errors);
DEV_STATS_INC(tunnel->dev, rx_errors);
goto drop;
}
if (tunnel->parms.i_flags & TUNNEL_SEQ) {
if (!(tpi->flags & TUNNEL_SEQ) ||
if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.i_flags)) {
if (!test_bit(IP_TUNNEL_SEQ_BIT, tpi->flags) ||
(tunnel->i_seqno &&
(s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
DEV_STATS_INC(tunnel->dev, rx_fifo_errors);
@ -946,7 +944,9 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
if (t->parms.collect_md) {
tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
tun_dst = ipv6_tun_rx_dst(skb, flags, 0, 0);
if (!tun_dst)
goto drop;
}

View File

@ -132,8 +132,8 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
return NULL;
}
static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
struct ip_tunnel_parm *parms)
static struct ip_tunnel __rcu **
__ipip6_bucket(struct sit_net *sitn, struct ip_tunnel_parm_kern *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
@ -207,7 +207,7 @@ static int ipip6_tunnel_create(struct net_device *dev)
__dev_addr_set(dev, &t->parms.iph.saddr, 4);
memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
if ((__force u16)t->parms.i_flags & SIT_ISATAP)
if (test_bit(IP_TUNNEL_SIT_ISATAP_BIT, t->parms.i_flags))
dev->priv_flags |= IFF_ISATAP;
dev->rtnl_link_ops = &sit_link_ops;
@ -226,7 +226,8 @@ out:
}
static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
struct ip_tunnel_parm *parms, int create)
struct ip_tunnel_parm_kern *parms,
int create)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
@ -1135,7 +1136,8 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
dev->needed_headroom = t_hlen + hlen;
}
static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
static void ipip6_tunnel_update(struct ip_tunnel *t,
struct ip_tunnel_parm_kern *p,
__u32 fwmark)
{
struct net *net = t->net;
@ -1196,11 +1198,11 @@ static int
ipip6_tunnel_get6rd(struct net_device *dev, struct ip_tunnel_parm __user *data)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm_kern p;
struct ip_tunnel_6rd ip6rd;
struct ip_tunnel_parm p;
if (dev == dev_to_sit_net(dev)->fb_tunnel_dev) {
if (copy_from_user(&p, data, sizeof(p)))
if (!ip_tunnel_parm_from_user(&p, data))
return -EFAULT;
t = ipip6_tunnel_locate(t->net, &p, 0);
}
@ -1251,7 +1253,7 @@ static bool ipip6_valid_ip_proto(u8 ipproto)
}
static int
__ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm *p)
__ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm_kern *p)
{
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@ -1268,7 +1270,7 @@ __ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm *p)
}
static int
ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm *p)
ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm_kern *p)
{
struct ip_tunnel *t = netdev_priv(dev);
@ -1281,7 +1283,7 @@ ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm *p)
}
static int
ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm *p)
ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm_kern *p)
{
struct ip_tunnel *t = netdev_priv(dev);
int err;
@ -1297,7 +1299,7 @@ ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm *p)
}
static int
ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm *p)
ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm_kern *p)
{
struct ip_tunnel *t = netdev_priv(dev);
int err;
@ -1328,7 +1330,7 @@ ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm *p)
}
static int
ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm *p)
ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm_kern *p)
{
struct ip_tunnel *t = netdev_priv(dev);
@ -1348,7 +1350,8 @@ ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm *p)
}
static int
ipip6_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
ipip6_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p,
int cmd)
{
switch (cmd) {
case SIOCGETTUNNEL:
@ -1490,7 +1493,7 @@ static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[],
}
static void ipip6_netlink_parms(struct nlattr *data[],
struct ip_tunnel_parm *parms,
struct ip_tunnel_parm_kern *parms,
__u32 *fwmark)
{
memset(parms, 0, sizeof(*parms));
@ -1599,8 +1602,8 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
struct ip_tunnel_parm_kern p;
struct net *net = t->net;
struct sit_net *sitn = net_generic(net, sit_net_id);
#ifdef CONFIG_IPV6_SIT_6RD
@ -1687,7 +1690,7 @@ static size_t ipip6_get_size(const struct net_device *dev)
static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_parm *parm = &tunnel->parms;
struct ip_tunnel_parm_kern *parm = &tunnel->parms;
if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) ||
@ -1697,7 +1700,8 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
!!(parm->iph.frag_off & htons(IP_DF))) ||
nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->iph.protocol) ||
nla_put_be16(skb, IFLA_IPTUN_FLAGS, parm->i_flags) ||
nla_put_be16(skb, IFLA_IPTUN_FLAGS,
ip_tunnel_flags_to_be16(parm->i_flags)) ||
nla_put_u32(skb, IFLA_IPTUN_FWMARK, tunnel->fwmark))
goto nla_put_failure;

View File

@ -1550,6 +1550,7 @@ static int ipvs_gre_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
if (!dest)
goto unk;
if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
IP_TUNNEL_DECLARE_FLAGS(flags);
__be16 type;
/* Only support version 0 and C (csum) */
@ -1560,7 +1561,10 @@ static int ipvs_gre_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
if (type != htons(ETH_P_IP))
goto unk;
*proto = IPPROTO_IPIP;
return gre_calc_hlen(gre_flags_to_tnl_flags(greh->flags));
gre_flags_to_tnl_flags(flags, greh->flags);
return gre_calc_hlen(flags);
}
unk:

View File

@ -390,10 +390,10 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
skb->ip_summed == CHECKSUM_PARTIAL)
mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV;
} else if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
__be16 tflags = 0;
IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
if (dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, tflags);
mtu -= gre_calc_hlen(tflags);
}
if (mtu < 68) {
@ -553,10 +553,10 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
skb->ip_summed == CHECKSUM_PARTIAL)
mtu -= GUE_PLEN_REMCSUM + GUE_LEN_PRIV;
} else if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
__be16 tflags = 0;
IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
if (dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, tflags);
mtu -= gre_calc_hlen(tflags);
}
if (mtu < IPV6_MIN_MTU) {
@ -1082,11 +1082,11 @@ ipvs_gre_encap(struct net *net, struct sk_buff *skb,
{
__be16 proto = *next_protocol == IPPROTO_IPIP ?
htons(ETH_P_IP) : htons(ETH_P_IPV6);
__be16 tflags = 0;
IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
size_t hdrlen;
if (cp->dest->tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, tflags);
hdrlen = gre_calc_hlen(tflags);
gre_build_header(skb, hdrlen, tflags, proto, 0, 0);
@ -1165,11 +1165,11 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
max_headroom += sizeof(struct udphdr) + gue_hdrlen;
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
size_t gre_hdrlen;
__be16 tflags = 0;
if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, tflags);
gre_hdrlen = gre_calc_hlen(tflags);
max_headroom += gre_hdrlen;
@ -1310,11 +1310,11 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
max_headroom += sizeof(struct udphdr) + gue_hdrlen;
} else if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GRE) {
IP_TUNNEL_DECLARE_FLAGS(tflags) = { };
size_t gre_hdrlen;
__be16 tflags = 0;
if (tun_flags & IP_VS_TUNNEL_ENCAP_FLAG_CSUM)
tflags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, tflags);
gre_hdrlen = gre_calc_hlen(tflags);
max_headroom += gre_hdrlen;

View File

@ -174,8 +174,8 @@ struct nft_tunnel_opts {
struct erspan_metadata erspan;
u8 data[IP_TUNNEL_OPTS_MAX];
} u;
IP_TUNNEL_DECLARE_FLAGS(flags);
u32 len;
__be16 flags;
};
struct nft_tunnel_obj {
@ -271,7 +271,8 @@ static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
opts->len = sizeof(struct vxlan_metadata);
opts->flags = TUNNEL_VXLAN_OPT;
ip_tunnel_flags_zero(opts->flags);
__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags);
return 0;
}
@ -325,7 +326,8 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
opts->u.erspan.version = version;
opts->len = sizeof(struct erspan_metadata);
opts->flags = TUNNEL_ERSPAN_OPT;
ip_tunnel_flags_zero(opts->flags);
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags);
return 0;
}
@ -366,7 +368,8 @@ static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
opt->length = data_len / 4;
opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
opts->flags = TUNNEL_GENEVE_OPT;
ip_tunnel_flags_zero(opts->flags);
__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags);
return 0;
}
@ -385,8 +388,8 @@ static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
struct nft_tunnel_opts *opts)
{
struct nlattr *nla;
__be16 type = 0;
int err, rem;
u32 type = 0;
err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
nft_tunnel_opts_policy, NULL);
@ -401,7 +404,7 @@ static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
err = nft_tunnel_obj_vxlan_init(nla, opts);
if (err)
return err;
type = TUNNEL_VXLAN_OPT;
type = IP_TUNNEL_VXLAN_OPT_BIT;
break;
case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
if (type)
@ -409,15 +412,15 @@ static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
err = nft_tunnel_obj_erspan_init(nla, opts);
if (err)
return err;
type = TUNNEL_ERSPAN_OPT;
type = IP_TUNNEL_ERSPAN_OPT_BIT;
break;
case NFTA_TUNNEL_KEY_OPTS_GENEVE:
if (type && type != TUNNEL_GENEVE_OPT)
if (type && type != IP_TUNNEL_GENEVE_OPT_BIT)
return -EINVAL;
err = nft_tunnel_obj_geneve_init(nla, opts);
if (err)
return err;
type = TUNNEL_GENEVE_OPT;
type = IP_TUNNEL_GENEVE_OPT_BIT;
break;
default:
return -EOPNOTSUPP;
@ -454,7 +457,9 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
memset(&info, 0, sizeof(info));
info.mode = IP_TUNNEL_INFO_TX;
info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
__set_bit(IP_TUNNEL_KEY_BIT, info.key.tun_flags);
__set_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags);
__set_bit(IP_TUNNEL_NOCACHE_BIT, info.key.tun_flags);
if (tb[NFTA_TUNNEL_KEY_IP]) {
err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
@ -483,11 +488,12 @@ static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
return -EOPNOTSUPP;
if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
info.key.tun_flags &= ~TUNNEL_CSUM;
__clear_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags);
if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
__set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
info.key.tun_flags);
if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
info.key.tun_flags |= TUNNEL_SEQ;
__set_bit(IP_TUNNEL_SEQ_BIT, info.key.tun_flags);
}
if (tb[NFTA_TUNNEL_KEY_TOS])
info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
@ -583,7 +589,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
if (!nest)
return -1;
if (opts->flags & TUNNEL_VXLAN_OPT) {
if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags)) {
inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
if (!inner)
goto failure;
@ -591,7 +597,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
htonl(opts->u.vxlan.gbp)))
goto inner_failure;
nla_nest_end(skb, inner);
} else if (opts->flags & TUNNEL_ERSPAN_OPT) {
} else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags)) {
inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
if (!inner)
goto failure;
@ -613,7 +619,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
break;
}
nla_nest_end(skb, inner);
} else if (opts->flags & TUNNEL_GENEVE_OPT) {
} else if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags)) {
struct geneve_opt *opt;
int offset = 0;
@ -658,11 +664,11 @@ static int nft_tunnel_flags_dump(struct sk_buff *skb,
{
u32 flags = 0;
if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags))
flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
if (!(info->key.tun_flags & TUNNEL_CSUM))
if (!test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags))
flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
if (info->key.tun_flags & TUNNEL_SEQ)
if (test_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags))
flags |= NFT_TUNNEL_F_SEQ_NUMBER;
if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)

View File

@ -152,6 +152,13 @@ static void update_range(struct sw_flow_match *match,
sizeof((match)->key->field)); \
} while (0)
#define SW_FLOW_KEY_BITMAP_COPY(match, field, value_p, nbits, is_mask) ({ \
update_range(match, offsetof(struct sw_flow_key, field), \
bitmap_size(nbits), is_mask); \
bitmap_copy(is_mask ? (match)->mask->key.field : (match)->key->field, \
value_p, nbits); \
})
static bool match_validate(const struct sw_flow_match *match,
u64 key_attrs, u64 mask_attrs, bool log)
{
@ -670,8 +677,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
bool log)
{
bool ttl = false, ipv4 = false, ipv6 = false;
IP_TUNNEL_DECLARE_FLAGS(tun_flags) = { };
bool info_bridge_mode = false;
__be16 tun_flags = 0;
int opts_type = 0;
struct nlattr *a;
int rem;
@ -697,7 +704,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
case OVS_TUNNEL_KEY_ATTR_ID:
SW_FLOW_KEY_PUT(match, tun_key.tun_id,
nla_get_be64(a), is_mask);
tun_flags |= TUNNEL_KEY;
__set_bit(IP_TUNNEL_KEY_BIT, tun_flags);
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
@ -729,10 +736,10 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
ttl = true;
break;
case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
tun_flags |= TUNNEL_DONT_FRAGMENT;
__set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, tun_flags);
break;
case OVS_TUNNEL_KEY_ATTR_CSUM:
tun_flags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, tun_flags);
break;
case OVS_TUNNEL_KEY_ATTR_TP_SRC:
SW_FLOW_KEY_PUT(match, tun_key.tp_src,
@ -743,7 +750,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
nla_get_be16(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_OAM:
tun_flags |= TUNNEL_OAM;
__set_bit(IP_TUNNEL_OAM_BIT, tun_flags);
break;
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
if (opts_type) {
@ -755,7 +762,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
if (err)
return err;
tun_flags |= TUNNEL_GENEVE_OPT;
__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_flags);
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
@ -768,7 +775,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
if (err)
return err;
tun_flags |= TUNNEL_VXLAN_OPT;
__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_flags);
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_PAD:
@ -784,7 +791,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
if (err)
return err;
tun_flags |= TUNNEL_ERSPAN_OPT;
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_flags);
opts_type = type;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE:
@ -798,7 +805,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
}
}
SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
SW_FLOW_KEY_BITMAP_COPY(match, tun_key.tun_flags, tun_flags,
__IP_TUNNEL_FLAG_NUM, is_mask);
if (is_mask)
SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
else
@ -823,13 +831,15 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
}
if (ipv4) {
if (info_bridge_mode) {
__clear_bit(IP_TUNNEL_KEY_BIT, tun_flags);
if (match->key->tun_key.u.ipv4.src ||
match->key->tun_key.u.ipv4.dst ||
match->key->tun_key.tp_src ||
match->key->tun_key.tp_dst ||
match->key->tun_key.ttl ||
match->key->tun_key.tos ||
tun_flags & ~TUNNEL_KEY) {
!ip_tunnel_flags_empty(tun_flags)) {
OVS_NLERR(log, "IPv4 tun info is not correct");
return -EINVAL;
}
@ -874,7 +884,7 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
const void *tun_opts, int swkey_tun_opts_len,
unsigned short tun_proto, u8 mode)
{
if (output->tun_flags & TUNNEL_KEY &&
if (test_bit(IP_TUNNEL_KEY_BIT, output->tun_flags) &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
OVS_TUNNEL_KEY_ATTR_PAD))
return -EMSGSIZE;
@ -910,10 +920,10 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
return -EMSGSIZE;
if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, output->tun_flags) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_CSUM) &&
if (test_bit(IP_TUNNEL_CSUM_BIT, output->tun_flags) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
if (output->tp_src &&
@ -922,18 +932,20 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
if (output->tp_dst &&
nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_OAM) &&
if (test_bit(IP_TUNNEL_OAM_BIT, output->tun_flags) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
if (swkey_tun_opts_len) {
if (output->tun_flags & TUNNEL_GENEVE_OPT &&
if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, output->tun_flags) &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT,
output->tun_flags) &&
vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
output->tun_flags) &&
nla_put(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
@ -2029,7 +2041,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
if ((swkey->tun_proto || is_mask)) {
const void *opts = NULL;
if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
if (ip_tunnel_is_options_present(output->tun_key.tun_flags))
opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
@ -2752,7 +2764,8 @@ static int validate_geneve_opts(struct sw_flow_key *key)
opts_len -= len;
}
key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
if (crit_opt)
__set_bit(IP_TUNNEL_CRIT_OPT_BIT, key->tun_key.tun_flags);
return 0;
}
@ -2760,6 +2773,7 @@ static int validate_geneve_opts(struct sw_flow_key *key)
static int validate_and_copy_set_tun(const struct nlattr *attr,
struct sw_flow_actions **sfa, bool log)
{
IP_TUNNEL_DECLARE_FLAGS(dst_opt_type) = { };
struct sw_flow_match match;
struct sw_flow_key key;
struct metadata_dst *tun_dst;
@ -2767,9 +2781,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
struct ovs_tunnel_info *ovs_tun;
struct nlattr *a;
int err = 0, start, opts_type;
__be16 dst_opt_type;
dst_opt_type = 0;
ovs_match_init(&match, &key, true, NULL);
opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
if (opts_type < 0)
@ -2781,13 +2793,14 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
err = validate_geneve_opts(&key);
if (err < 0)
return err;
dst_opt_type = TUNNEL_GENEVE_OPT;
__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, dst_opt_type);
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
dst_opt_type = TUNNEL_VXLAN_OPT;
__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, dst_opt_type);
break;
case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
dst_opt_type = TUNNEL_ERSPAN_OPT;
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, dst_opt_type);
break;
}
}

View File

@ -221,7 +221,7 @@ static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
const struct ip_tunnel_key *tun_key = &tun_info->key;
int tun_opts_len = tun_info->options_len;
if (tun_key->tun_flags & TUNNEL_KEY &&
if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags) &&
nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
PSAMPLE_TUNNEL_KEY_ATTR_PAD))
return -EMSGSIZE;
@ -257,10 +257,10 @@ static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
return -EMSGSIZE;
if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
return -EMSGSIZE;
if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, tun_key->tun_flags) &&
nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
if ((tun_key->tun_flags & TUNNEL_CSUM) &&
if (test_bit(IP_TUNNEL_CSUM_BIT, tun_key->tun_flags) &&
nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
if (tun_key->tp_src &&
@ -269,15 +269,16 @@ static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
if (tun_key->tp_dst &&
nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
return -EMSGSIZE;
if ((tun_key->tun_flags & TUNNEL_OAM) &&
if (test_bit(IP_TUNNEL_OAM_BIT, tun_key->tun_flags) &&
nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
return -EMSGSIZE;
if (tun_opts_len) {
if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_key->tun_flags) &&
nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
tun_opts_len, tun_opts))
return -EMSGSIZE;
else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
tun_key->tun_flags) &&
nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
tun_opts_len, tun_opts))
return -EMSGSIZE;
@ -314,7 +315,7 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
int tun_opts_len = tun_info->options_len;
int sum = nla_total_size(0); /* PSAMPLE_ATTR_TUNNEL */
if (tun_key->tun_flags & TUNNEL_KEY)
if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags))
sum += nla_total_size_64bit(sizeof(u64));
if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
@ -337,20 +338,21 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
if (tun_key->tos)
sum += nla_total_size(sizeof(u8));
sum += nla_total_size(sizeof(u8)); /* TTL */
if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, tun_key->tun_flags))
sum += nla_total_size(0);
if (tun_key->tun_flags & TUNNEL_CSUM)
if (test_bit(IP_TUNNEL_CSUM_BIT, tun_key->tun_flags))
sum += nla_total_size(0);
if (tun_key->tp_src)
sum += nla_total_size(sizeof(u16));
if (tun_key->tp_dst)
sum += nla_total_size(sizeof(u16));
if (tun_key->tun_flags & TUNNEL_OAM)
if (test_bit(IP_TUNNEL_OAM_BIT, tun_key->tun_flags))
sum += nla_total_size(0);
if (tun_opts_len) {
if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_key->tun_flags))
sum += nla_total_size(tun_opts_len);
else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
tun_key->tun_flags))
sum += nla_total_size(tun_opts_len);
}

View File

@ -230,7 +230,7 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
nla_for_each_attr(attr, head, len, rem) {
switch (nla_type(attr)) {
case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
if (type && type != TUNNEL_GENEVE_OPT) {
if (type && type != IP_TUNNEL_GENEVE_OPT_BIT) {
NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
return -EINVAL;
}
@ -247,7 +247,7 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
dst_len -= opt_len;
dst += opt_len;
}
type = TUNNEL_GENEVE_OPT;
type = IP_TUNNEL_GENEVE_OPT_BIT;
break;
case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
if (type) {
@ -259,7 +259,7 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
type = TUNNEL_VXLAN_OPT;
type = IP_TUNNEL_VXLAN_OPT_BIT;
break;
case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
if (type) {
@ -271,7 +271,7 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
if (opt_len < 0)
return opt_len;
opts_len += opt_len;
type = TUNNEL_ERSPAN_OPT;
type = IP_TUNNEL_ERSPAN_OPT_BIT;
break;
}
}
@ -302,7 +302,7 @@ static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
switch (nla_type(nla_data(nla))) {
case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
#if IS_ENABLED(CONFIG_INET)
info->key.tun_flags |= TUNNEL_GENEVE_OPT;
__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags);
return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
opts_len, extack);
#else
@ -310,7 +310,7 @@ static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
#endif
case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
#if IS_ENABLED(CONFIG_INET)
info->key.tun_flags |= TUNNEL_VXLAN_OPT;
__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags);
return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
opts_len, extack);
#else
@ -318,7 +318,7 @@ static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
#endif
case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
#if IS_ENABLED(CONFIG_INET)
info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags);
return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
opts_len, extack);
#else
@ -363,6 +363,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
bool bind = act_flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
struct tcf_tunnel_key_params *params_new;
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *metadata = NULL;
struct tcf_chain *goto_ch = NULL;
struct tc_tunnel_key *parm;
@ -371,7 +372,6 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
__be16 dst_port = 0;
__be64 key_id = 0;
int opts_len = 0;
__be16 flags = 0;
u8 tos, ttl;
int ret = 0;
u32 index;
@ -412,16 +412,16 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
key_id = key32_to_tunnel_id(key32);
flags = TUNNEL_KEY;
__set_bit(IP_TUNNEL_KEY_BIT, flags);
}
flags |= TUNNEL_CSUM;
__set_bit(IP_TUNNEL_CSUM_BIT, flags);
if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
flags &= ~TUNNEL_CSUM;
__clear_bit(IP_TUNNEL_CSUM_BIT, flags);
if (nla_get_flag(tb[TCA_TUNNEL_KEY_NO_FRAG]))
flags |= TUNNEL_DONT_FRAGMENT;
__set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, flags);
if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
@ -663,15 +663,15 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
if (!start)
return -EMSGSIZE;
if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) {
err = tunnel_key_geneve_opts_dump(skb, info);
if (err)
goto err_out;
} else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
} else if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) {
err = tunnel_key_vxlan_opts_dump(skb, info);
if (err)
goto err_out;
} else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
} else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, info->key.tun_flags)) {
err = tunnel_key_erspan_opts_dump(skb, info);
if (err)
goto err_out;
@ -741,7 +741,7 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
struct ip_tunnel_key *key = &info->key;
__be32 key_id = tunnel_id_to_key32(key->tun_id);
if (((key->tun_flags & TUNNEL_KEY) &&
if ((test_bit(IP_TUNNEL_KEY_BIT, key->tun_flags) &&
nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
tunnel_key_dump_addresses(skb,
&params->tcft_enc_metadata->u.tun_info) ||
@ -749,8 +749,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
key->tp_dst)) ||
nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
!(key->tun_flags & TUNNEL_CSUM)) ||
((key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
!test_bit(IP_TUNNEL_CSUM_BIT, key->tun_flags)) ||
(test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) &&
nla_put_flag(skb, TCA_TUNNEL_KEY_NO_FRAG)) ||
tunnel_key_opts_dump(skb, info))
goto nla_put_failure;

View File

@ -28,6 +28,7 @@
#include <net/vxlan.h>
#include <net/erspan.h>
#include <net/gtp.h>
#include <net/pfcp.h>
#include <net/tc_wrapper.h>
#include <net/dst.h>
@ -741,6 +742,7 @@ enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
[TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
[TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED },
};
static const struct nla_policy
@ -770,6 +772,12 @@ gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 },
};
static const struct nla_policy
pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 },
};
static const struct nla_policy
mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
@ -1419,6 +1427,44 @@ static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
return sizeof(*sinfo);
}
static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key,
int depth, int option_len,
struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1];
struct pfcp_metadata *md;
int err;
md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len];
memset(md, 0xff, sizeof(*md));
if (!depth)
return sizeof(*md);
if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) {
NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask");
return -EINVAL;
}
err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla,
pfcp_opt_policy, extack);
if (err < 0)
return err;
if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) {
NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type");
return -EINVAL;
}
if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE])
md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]);
if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID])
md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]);
return sizeof(*md);
}
static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
struct fl_flow_key *mask,
struct netlink_ext_ack *extack)
@ -1454,12 +1500,13 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
switch (nla_type(nla_opt_key)) {
case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
if (key->enc_opts.dst_opt_type &&
key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
key->enc_opts.dst_opt_type !=
IP_TUNNEL_GENEVE_OPT_BIT) {
NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
return -EINVAL;
}
option_len = 0;
key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
option_len = fl_set_geneve_opt(nla_opt_key, key,
key_depth, option_len,
extack);
@ -1470,7 +1517,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
option_len = fl_set_geneve_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
@ -1489,7 +1536,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
return -EINVAL;
}
option_len = 0;
key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
option_len = fl_set_vxlan_opt(nla_opt_key, key,
key_depth, option_len,
extack);
@ -1500,7 +1547,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
@ -1519,7 +1566,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
return -EINVAL;
}
option_len = 0;
key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
option_len = fl_set_erspan_opt(nla_opt_key, key,
key_depth, option_len,
extack);
@ -1530,7 +1577,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
option_len = fl_set_erspan_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
@ -1550,7 +1597,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
return -EINVAL;
}
option_len = 0;
key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
option_len = fl_set_gtp_opt(nla_opt_key, key,
key_depth, option_len,
extack);
@ -1561,7 +1608,7 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
option_len = fl_set_gtp_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
@ -1575,6 +1622,36 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
return -EINVAL;
}
break;
case TCA_FLOWER_KEY_ENC_OPTS_PFCP:
if (key->enc_opts.dst_opt_type) {
NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options");
return -EINVAL;
}
option_len = 0;
key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
option_len = fl_set_pfcp_opt(nla_opt_key, key,
key_depth, option_len,
extack);
if (option_len < 0)
return option_len;
key->enc_opts.len += option_len;
/* At the same time we need to parse through the mask
* in order to verify exact and mask attribute lengths.
*/
mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
option_len = fl_set_pfcp_opt(nla_opt_msk, mask,
msk_depth, option_len,
extack);
if (option_len < 0)
return option_len;
mask->enc_opts.len += option_len;
if (key->enc_opts.len != mask->enc_opts.len) {
NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned");
return -EINVAL;
}
break;
default:
NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
return -EINVAL;
@ -3117,6 +3194,32 @@ nla_put_failure:
return -EMSGSIZE;
}
static int fl_dump_key_pfcp_opt(struct sk_buff *skb,
struct flow_dissector_key_enc_opts *enc_opts)
{
struct pfcp_metadata *md;
struct nlattr *nest;
nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP);
if (!nest)
goto nla_put_failure;
md = (struct pfcp_metadata *)&enc_opts->data[0];
if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type))
goto nla_put_failure;
if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID,
md->seid, 0))
goto nla_put_failure;
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int fl_dump_key_ct(struct sk_buff *skb,
struct flow_dissector_key_ct *key,
struct flow_dissector_key_ct *mask)
@ -3202,26 +3305,31 @@ static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
goto nla_put_failure;
switch (enc_opts->dst_opt_type) {
case TUNNEL_GENEVE_OPT:
case IP_TUNNEL_GENEVE_OPT_BIT:
err = fl_dump_key_geneve_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
case TUNNEL_VXLAN_OPT:
case IP_TUNNEL_VXLAN_OPT_BIT:
err = fl_dump_key_vxlan_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
case TUNNEL_ERSPAN_OPT:
case IP_TUNNEL_ERSPAN_OPT_BIT:
err = fl_dump_key_erspan_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
case TUNNEL_GTP_OPT:
case IP_TUNNEL_GTP_OPT_BIT:
err = fl_dump_key_gtp_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
case IP_TUNNEL_PFCP_OPT_BIT:
err = fl_dump_key_pfcp_opt(skb, enc_opts);
if (err)
goto nla_put_failure;
break;
default:
goto nla_put_failure;
}

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _TOOLS_LINUX_ALIGN_H
#define _TOOLS_LINUX_ALIGN_H
#include <uapi/linux/const.h>
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
#endif /* _TOOLS_LINUX_ALIGN_H */

View File

@ -3,6 +3,7 @@
#define _TOOLS_LINUX_BITMAP_H
#include <string.h>
#include <linux/align.h>
#include <linux/bitops.h>
#include <linux/find.h>
#include <stdlib.h>
@ -25,13 +26,14 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
memset(dst, 0, bitmap_size(nbits));
}
}
@ -83,7 +85,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
*/
static inline unsigned long *bitmap_zalloc(int nbits)
{
return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
return calloc(1, bitmap_size(nbits));
}
/*
@ -126,7 +128,6 @@ static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
static inline bool bitmap_equal(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)

View File

@ -20,6 +20,8 @@
#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);

View File

@ -2,8 +2,8 @@
#ifndef _TOOLS_LINUX_MM_H
#define _TOOLS_LINUX_MM_H
#include <linux/align.h>
#include <linux/mmzone.h>
#include <uapi/linux/const.h>
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
@ -11,9 +11,6 @@
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
#define __va(x) ((void *)((unsigned long)(x)))

View File

@ -186,8 +186,6 @@ static_var:
return ret2;
}
#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long))
static int convert_variable_type(Dwarf_Die *vr_die,
struct probe_trace_arg *tvar,
const char *cast, bool user_access)
@ -217,7 +215,7 @@ static int convert_variable_type(Dwarf_Die *vr_die,
total = dwarf_bytesize(vr_die);
if (boffs < 0 || total < 0)
return -ENOENT;
ret = snprintf(buf, 16, "b%d@%d/%zd", bsize, boffs,
ret = snprintf(buf, 16, "b%d@%d/%d", bsize, boffs,
BYTES_TO_BITS(total));
goto formatted;
}