2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2016-09-13 03:13:09 +03:00
/*
* net / sched / act_skbmod . c skb data modifier
*
* Copyright ( c ) 2016 Jamal Hadi Salim < jhs @ mojatatu . com >
*/
# include <linux/module.h>
2021-07-20 02:41:24 +03:00
# include <linux/if_arp.h>
2016-09-13 03:13:09 +03:00
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/skbuff.h>
# include <linux/rtnetlink.h>
net/sched: act_skbmod: Add SKBMOD_F_ECN option support
Currently, when doing rate limiting using the tc-police(8) action, the
easiest way is to simply drop the packets which exceed or conform the
configured bandwidth limit. Add a new option to tc-skbmod(8), so that
users may use the ECN [1] extension to explicitly inform the receiver
about the congestion instead of dropping packets "on the floor".
The 2 least significant bits of the Traffic Class field in IPv4 and IPv6
headers are used to represent different ECN states [2]:
0b00: "Non ECN-Capable Transport", Non-ECT
0b10: "ECN Capable Transport", ECT(0)
0b01: "ECN Capable Transport", ECT(1)
0b11: "Congestion Encountered", CE
As an example:
$ tc filter add dev eth0 parent 1: protocol ip prio 10 \
matchall action skbmod ecn
Doing the above marks all ECT(0) and ECT(1) packets as CE. It does NOT
affect Non-ECT or non-IP packets. In the tc-police scenario mentioned
above, users may pipe a tc-police action and a tc-skbmod "ecn" action
together to achieve ECN-based rate limiting.
For TCP connections, upon receiving a CE packet, the receiver will respond
with an ECE packet, asking the sender to reduce their congestion window.
However ECN also works with other L4 protocols e.g. DCCP and SCTP [2], and
our implementation does not touch or care about L4 headers.
The updated tc-skbmod SYNOPSIS looks like the following:
tc ... action skbmod { set SETTABLE | swap SWAPPABLE | ecn } ...
Only one of "set", "swap" or "ecn" shall be used in a single tc-skbmod
command. Trying to use more than one of them at a time is considered
undefined behavior; pipe multiple tc-skbmod commands together instead.
"set" and "swap" only affect Ethernet packets, while "ecn" only affects
IPv{4,6} packets.
It is also worth mentioning that, in theory, the same effect could be
achieved by piping a "police" action and a "bpf" action using the
bpf_skb_ecn_set_ce() helper, but this requires eBPF programming from the
user, thus impractical.
Depends on patch "net/sched: act_skbmod: Skip non-Ethernet packets".
[1] https://datatracker.ietf.org/doc/html/rfc3168
[2] https://en.wikipedia.org/wiki/Explicit_Congestion_Notification
Reviewed-by: Cong Wang <cong.wang@bytedance.com>
Signed-off-by: Peilin Ye <peilin.ye@bytedance.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-28 04:33:15 +03:00
# include <net/inet_ecn.h>
2016-09-13 03:13:09 +03:00
# include <net/netlink.h>
# include <net/pkt_sched.h>
2019-03-20 17:00:12 +03:00
# include <net/pkt_cls.h>
2022-12-06 16:55:12 +03:00
# include <net/tc_wrapper.h>
2016-09-13 03:13:09 +03:00
# include <linux/tc_act/tc_skbmod.h>
# include <net/tc_act/tc_skbmod.h>
static struct tc_action_ops act_skbmod_ops ;
2022-12-06 16:55:12 +03:00
TC_INDIRECT_SCOPE int tcf_skbmod_act ( struct sk_buff * skb ,
const struct tc_action * a ,
struct tcf_result * res )
2016-09-13 03:13:09 +03:00
{
struct tcf_skbmod * d = to_skbmod ( a ) ;
net/sched: act_skbmod: Add SKBMOD_F_ECN option support
Currently, when doing rate limiting using the tc-police(8) action, the
easiest way is to simply drop the packets which exceed or conform the
configured bandwidth limit. Add a new option to tc-skbmod(8), so that
users may use the ECN [1] extension to explicitly inform the receiver
about the congestion instead of dropping packets "on the floor".
The 2 least significant bits of the Traffic Class field in IPv4 and IPv6
headers are used to represent different ECN states [2]:
0b00: "Non ECN-Capable Transport", Non-ECT
0b10: "ECN Capable Transport", ECT(0)
0b01: "ECN Capable Transport", ECT(1)
0b11: "Congestion Encountered", CE
As an example:
$ tc filter add dev eth0 parent 1: protocol ip prio 10 \
matchall action skbmod ecn
Doing the above marks all ECT(0) and ECT(1) packets as CE. It does NOT
affect Non-ECT or non-IP packets. In the tc-police scenario mentioned
above, users may pipe a tc-police action and a tc-skbmod "ecn" action
together to achieve ECN-based rate limiting.
For TCP connections, upon receiving a CE packet, the receiver will respond
with an ECE packet, asking the sender to reduce their congestion window.
However ECN also works with other L4 protocols e.g. DCCP and SCTP [2], and
our implementation does not touch or care about L4 headers.
The updated tc-skbmod SYNOPSIS looks like the following:
tc ... action skbmod { set SETTABLE | swap SWAPPABLE | ecn } ...
Only one of "set", "swap" or "ecn" shall be used in a single tc-skbmod
command. Trying to use more than one of them at a time is considered
undefined behavior; pipe multiple tc-skbmod commands together instead.
"set" and "swap" only affect Ethernet packets, while "ecn" only affects
IPv{4,6} packets.
It is also worth mentioning that, in theory, the same effect could be
achieved by piping a "police" action and a "bpf" action using the
bpf_skb_ecn_set_ce() helper, but this requires eBPF programming from the
user, thus impractical.
Depends on patch "net/sched: act_skbmod: Skip non-Ethernet packets".
[1] https://datatracker.ietf.org/doc/html/rfc3168
[2] https://en.wikipedia.org/wiki/Explicit_Congestion_Notification
Reviewed-by: Cong Wang <cong.wang@bytedance.com>
Signed-off-by: Peilin Ye <peilin.ye@bytedance.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-28 04:33:15 +03:00
int action , max_edit_len , err ;
2016-09-13 03:13:09 +03:00
struct tcf_skbmod_params * p ;
u64 flags ;
tcf_lastuse_update ( & d - > tcf_tm ) ;
2021-10-16 11:49:09 +03:00
bstats_update ( this_cpu_ptr ( d - > common . cpu_bstats ) , skb ) ;
2016-09-13 03:13:09 +03:00
2021-07-20 02:41:24 +03:00
action = READ_ONCE ( d - > tcf_action ) ;
if ( unlikely ( action = = TC_ACT_SHOT ) )
goto drop ;
net/sched: act_skbmod: Add SKBMOD_F_ECN option support
Currently, when doing rate limiting using the tc-police(8) action, the
easiest way is to simply drop the packets which exceed or conform the
configured bandwidth limit. Add a new option to tc-skbmod(8), so that
users may use the ECN [1] extension to explicitly inform the receiver
about the congestion instead of dropping packets "on the floor".
The 2 least significant bits of the Traffic Class field in IPv4 and IPv6
headers are used to represent different ECN states [2]:
0b00: "Non ECN-Capable Transport", Non-ECT
0b10: "ECN Capable Transport", ECT(0)
0b01: "ECN Capable Transport", ECT(1)
0b11: "Congestion Encountered", CE
As an example:
$ tc filter add dev eth0 parent 1: protocol ip prio 10 \
matchall action skbmod ecn
Doing the above marks all ECT(0) and ECT(1) packets as CE. It does NOT
affect Non-ECT or non-IP packets. In the tc-police scenario mentioned
above, users may pipe a tc-police action and a tc-skbmod "ecn" action
together to achieve ECN-based rate limiting.
For TCP connections, upon receiving a CE packet, the receiver will respond
with an ECE packet, asking the sender to reduce their congestion window.
However ECN also works with other L4 protocols e.g. DCCP and SCTP [2], and
our implementation does not touch or care about L4 headers.
The updated tc-skbmod SYNOPSIS looks like the following:
tc ... action skbmod { set SETTABLE | swap SWAPPABLE | ecn } ...
Only one of "set", "swap" or "ecn" shall be used in a single tc-skbmod
command. Trying to use more than one of them at a time is considered
undefined behavior; pipe multiple tc-skbmod commands together instead.
"set" and "swap" only affect Ethernet packets, while "ecn" only affects
IPv{4,6} packets.
It is also worth mentioning that, in theory, the same effect could be
achieved by piping a "police" action and a "bpf" action using the
bpf_skb_ecn_set_ce() helper, but this requires eBPF programming from the
user, thus impractical.
Depends on patch "net/sched: act_skbmod: Skip non-Ethernet packets".
[1] https://datatracker.ietf.org/doc/html/rfc3168
[2] https://en.wikipedia.org/wiki/Explicit_Congestion_Notification
Reviewed-by: Cong Wang <cong.wang@bytedance.com>
Signed-off-by: Peilin Ye <peilin.ye@bytedance.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-28 04:33:15 +03:00
max_edit_len = skb_mac_header_len ( skb ) ;
p = rcu_dereference_bh ( d - > skbmod_p ) ;
flags = p - > flags ;
/* tcf_skbmod_init() guarantees "flags" to be one of the following:
* 1. a combination of SKBMOD_F_ { DMAC , SMAC , ETYPE }
* 2. SKBMOD_F_SWAPMAC
* 3. SKBMOD_F_ECN
* SKBMOD_F_ECN only works with IP packets ; all other flags only work with Ethernet
* packets .
*/
if ( flags = = SKBMOD_F_ECN ) {
switch ( skb_protocol ( skb , true ) ) {
case cpu_to_be16 ( ETH_P_IP ) :
case cpu_to_be16 ( ETH_P_IPV6 ) :
max_edit_len + = skb_network_header_len ( skb ) ;
break ;
default :
goto out ;
}
} else if ( ! skb - > dev | | skb - > dev - > type ! = ARPHRD_ETHER ) {
goto out ;
}
2021-07-20 02:41:24 +03:00
net/sched: act_skbmod: Add SKBMOD_F_ECN option support
Currently, when doing rate limiting using the tc-police(8) action, the
easiest way is to simply drop the packets which exceed or conform the
configured bandwidth limit. Add a new option to tc-skbmod(8), so that
users may use the ECN [1] extension to explicitly inform the receiver
about the congestion instead of dropping packets "on the floor".
The 2 least significant bits of the Traffic Class field in IPv4 and IPv6
headers are used to represent different ECN states [2]:
0b00: "Non ECN-Capable Transport", Non-ECT
0b10: "ECN Capable Transport", ECT(0)
0b01: "ECN Capable Transport", ECT(1)
0b11: "Congestion Encountered", CE
As an example:
$ tc filter add dev eth0 parent 1: protocol ip prio 10 \
matchall action skbmod ecn
Doing the above marks all ECT(0) and ECT(1) packets as CE. It does NOT
affect Non-ECT or non-IP packets. In the tc-police scenario mentioned
above, users may pipe a tc-police action and a tc-skbmod "ecn" action
together to achieve ECN-based rate limiting.
For TCP connections, upon receiving a CE packet, the receiver will respond
with an ECE packet, asking the sender to reduce their congestion window.
However ECN also works with other L4 protocols e.g. DCCP and SCTP [2], and
our implementation does not touch or care about L4 headers.
The updated tc-skbmod SYNOPSIS looks like the following:
tc ... action skbmod { set SETTABLE | swap SWAPPABLE | ecn } ...
Only one of "set", "swap" or "ecn" shall be used in a single tc-skbmod
command. Trying to use more than one of them at a time is considered
undefined behavior; pipe multiple tc-skbmod commands together instead.
"set" and "swap" only affect Ethernet packets, while "ecn" only affects
IPv{4,6} packets.
It is also worth mentioning that, in theory, the same effect could be
achieved by piping a "police" action and a "bpf" action using the
bpf_skb_ecn_set_ce() helper, but this requires eBPF programming from the
user, thus impractical.
Depends on patch "net/sched: act_skbmod: Skip non-Ethernet packets".
[1] https://datatracker.ietf.org/doc/html/rfc3168
[2] https://en.wikipedia.org/wiki/Explicit_Congestion_Notification
Reviewed-by: Cong Wang <cong.wang@bytedance.com>
Signed-off-by: Peilin Ye <peilin.ye@bytedance.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-28 04:33:15 +03:00
err = skb_ensure_writable ( skb , max_edit_len ) ;
2018-07-30 15:30:43 +03:00
if ( unlikely ( err ) ) /* best policy is to drop on the floor */
goto drop ;
2016-09-13 03:13:09 +03:00
if ( flags & SKBMOD_F_DMAC )
ether_addr_copy ( eth_hdr ( skb ) - > h_dest , p - > eth_dst ) ;
if ( flags & SKBMOD_F_SMAC )
ether_addr_copy ( eth_hdr ( skb ) - > h_source , p - > eth_src ) ;
if ( flags & SKBMOD_F_ETYPE )
eth_hdr ( skb ) - > h_proto = p - > eth_type ;
if ( flags & SKBMOD_F_SWAPMAC ) {
u16 tmpaddr [ ETH_ALEN / 2 ] ; /* ether_addr_copy() requirement */
/*XXX: I am sure we can come up with more efficient swapping*/
ether_addr_copy ( ( u8 * ) tmpaddr , eth_hdr ( skb ) - > h_dest ) ;
ether_addr_copy ( eth_hdr ( skb ) - > h_dest , eth_hdr ( skb ) - > h_source ) ;
ether_addr_copy ( eth_hdr ( skb ) - > h_source , ( u8 * ) tmpaddr ) ;
}
net/sched: act_skbmod: Add SKBMOD_F_ECN option support
Currently, when doing rate limiting using the tc-police(8) action, the
easiest way is to simply drop the packets which exceed or conform the
configured bandwidth limit. Add a new option to tc-skbmod(8), so that
users may use the ECN [1] extension to explicitly inform the receiver
about the congestion instead of dropping packets "on the floor".
The 2 least significant bits of the Traffic Class field in IPv4 and IPv6
headers are used to represent different ECN states [2]:
0b00: "Non ECN-Capable Transport", Non-ECT
0b10: "ECN Capable Transport", ECT(0)
0b01: "ECN Capable Transport", ECT(1)
0b11: "Congestion Encountered", CE
As an example:
$ tc filter add dev eth0 parent 1: protocol ip prio 10 \
matchall action skbmod ecn
Doing the above marks all ECT(0) and ECT(1) packets as CE. It does NOT
affect Non-ECT or non-IP packets. In the tc-police scenario mentioned
above, users may pipe a tc-police action and a tc-skbmod "ecn" action
together to achieve ECN-based rate limiting.
For TCP connections, upon receiving a CE packet, the receiver will respond
with an ECE packet, asking the sender to reduce their congestion window.
However ECN also works with other L4 protocols e.g. DCCP and SCTP [2], and
our implementation does not touch or care about L4 headers.
The updated tc-skbmod SYNOPSIS looks like the following:
tc ... action skbmod { set SETTABLE | swap SWAPPABLE | ecn } ...
Only one of "set", "swap" or "ecn" shall be used in a single tc-skbmod
command. Trying to use more than one of them at a time is considered
undefined behavior; pipe multiple tc-skbmod commands together instead.
"set" and "swap" only affect Ethernet packets, while "ecn" only affects
IPv{4,6} packets.
It is also worth mentioning that, in theory, the same effect could be
achieved by piping a "police" action and a "bpf" action using the
bpf_skb_ecn_set_ce() helper, but this requires eBPF programming from the
user, thus impractical.
Depends on patch "net/sched: act_skbmod: Skip non-Ethernet packets".
[1] https://datatracker.ietf.org/doc/html/rfc3168
[2] https://en.wikipedia.org/wiki/Explicit_Congestion_Notification
Reviewed-by: Cong Wang <cong.wang@bytedance.com>
Signed-off-by: Peilin Ye <peilin.ye@bytedance.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-28 04:33:15 +03:00
if ( flags & SKBMOD_F_ECN )
INET_ECN_set_ce ( skb ) ;
out :
2016-09-13 03:13:09 +03:00
return action ;
2018-07-30 15:30:43 +03:00
drop :
qstats_overlimit_inc ( this_cpu_ptr ( d - > common . cpu_qstats ) ) ;
return TC_ACT_SHOT ;
2016-09-13 03:13:09 +03:00
}
static const struct nla_policy skbmod_policy [ TCA_SKBMOD_MAX + 1 ] = {
[ TCA_SKBMOD_PARMS ] = { . len = sizeof ( struct tc_skbmod ) } ,
[ TCA_SKBMOD_DMAC ] = { . len = ETH_ALEN } ,
[ TCA_SKBMOD_SMAC ] = { . len = ETH_ALEN } ,
[ TCA_SKBMOD_ETYPE ] = { . type = NLA_U16 } ,
} ;
static int tcf_skbmod_init ( struct net * net , struct nlattr * nla ,
struct nlattr * est , struct tc_action * * a ,
2019-10-30 17:09:05 +03:00
struct tcf_proto * tp , u32 flags ,
2018-07-05 17:24:25 +03:00
struct netlink_ext_ack * extack )
2016-09-13 03:13:09 +03:00
{
2022-09-08 07:14:33 +03:00
struct tc_action_net * tn = net_generic ( net , act_skbmod_ops . net_id ) ;
2021-07-30 02:12:14 +03:00
bool ovr = flags & TCA_ACT_FLAGS_REPLACE ;
bool bind = flags & TCA_ACT_FLAGS_BIND ;
2016-09-13 03:13:09 +03:00
struct nlattr * tb [ TCA_SKBMOD_MAX + 1 ] ;
struct tcf_skbmod_params * p , * p_old ;
2019-03-20 17:00:12 +03:00
struct tcf_chain * goto_ch = NULL ;
2016-09-13 03:13:09 +03:00
struct tc_skbmod * parm ;
2019-08-01 16:02:51 +03:00
u32 lflags = 0 , index ;
2016-09-13 03:13:09 +03:00
struct tcf_skbmod * d ;
bool exists = false ;
u8 * daddr = NULL ;
u8 * saddr = NULL ;
u16 eth_type = 0 ;
int ret = 0 , err ;
if ( ! nla )
return - EINVAL ;
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 15:07:28 +03:00
err = nla_parse_nested_deprecated ( tb , TCA_SKBMOD_MAX , nla ,
skbmod_policy , NULL ) ;
2016-09-13 03:13:09 +03:00
if ( err < 0 )
return err ;
if ( ! tb [ TCA_SKBMOD_PARMS ] )
return - EINVAL ;
if ( tb [ TCA_SKBMOD_DMAC ] ) {
daddr = nla_data ( tb [ TCA_SKBMOD_DMAC ] ) ;
lflags | = SKBMOD_F_DMAC ;
}
if ( tb [ TCA_SKBMOD_SMAC ] ) {
saddr = nla_data ( tb [ TCA_SKBMOD_SMAC ] ) ;
lflags | = SKBMOD_F_SMAC ;
}
if ( tb [ TCA_SKBMOD_ETYPE ] ) {
eth_type = nla_get_u16 ( tb [ TCA_SKBMOD_ETYPE ] ) ;
lflags | = SKBMOD_F_ETYPE ;
}
parm = nla_data ( tb [ TCA_SKBMOD_PARMS ] ) ;
2019-08-01 16:02:51 +03:00
index = parm - > index ;
2016-09-13 03:13:09 +03:00
if ( parm - > flags & SKBMOD_F_SWAPMAC )
lflags = SKBMOD_F_SWAPMAC ;
net/sched: act_skbmod: Add SKBMOD_F_ECN option support
Currently, when doing rate limiting using the tc-police(8) action, the
easiest way is to simply drop the packets which exceed or conform the
configured bandwidth limit. Add a new option to tc-skbmod(8), so that
users may use the ECN [1] extension to explicitly inform the receiver
about the congestion instead of dropping packets "on the floor".
The 2 least significant bits of the Traffic Class field in IPv4 and IPv6
headers are used to represent different ECN states [2]:
0b00: "Non ECN-Capable Transport", Non-ECT
0b10: "ECN Capable Transport", ECT(0)
0b01: "ECN Capable Transport", ECT(1)
0b11: "Congestion Encountered", CE
As an example:
$ tc filter add dev eth0 parent 1: protocol ip prio 10 \
matchall action skbmod ecn
Doing the above marks all ECT(0) and ECT(1) packets as CE. It does NOT
affect Non-ECT or non-IP packets. In the tc-police scenario mentioned
above, users may pipe a tc-police action and a tc-skbmod "ecn" action
together to achieve ECN-based rate limiting.
For TCP connections, upon receiving a CE packet, the receiver will respond
with an ECE packet, asking the sender to reduce their congestion window.
However ECN also works with other L4 protocols e.g. DCCP and SCTP [2], and
our implementation does not touch or care about L4 headers.
The updated tc-skbmod SYNOPSIS looks like the following:
tc ... action skbmod { set SETTABLE | swap SWAPPABLE | ecn } ...
Only one of "set", "swap" or "ecn" shall be used in a single tc-skbmod
command. Trying to use more than one of them at a time is considered
undefined behavior; pipe multiple tc-skbmod commands together instead.
"set" and "swap" only affect Ethernet packets, while "ecn" only affects
IPv{4,6} packets.
It is also worth mentioning that, in theory, the same effect could be
achieved by piping a "police" action and a "bpf" action using the
bpf_skb_ecn_set_ce() helper, but this requires eBPF programming from the
user, thus impractical.
Depends on patch "net/sched: act_skbmod: Skip non-Ethernet packets".
[1] https://datatracker.ietf.org/doc/html/rfc3168
[2] https://en.wikipedia.org/wiki/Explicit_Congestion_Notification
Reviewed-by: Cong Wang <cong.wang@bytedance.com>
Signed-off-by: Peilin Ye <peilin.ye@bytedance.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-28 04:33:15 +03:00
if ( parm - > flags & SKBMOD_F_ECN )
lflags = SKBMOD_F_ECN ;
2016-09-13 03:13:09 +03:00
2019-08-01 16:02:51 +03:00
err = tcf_idr_check_alloc ( tn , & index , a , bind ) ;
2018-07-05 17:24:32 +03:00
if ( err < 0 )
return err ;
exists = err ;
2016-09-13 03:13:09 +03:00
if ( exists & & bind )
return 0 ;
2018-05-11 21:35:33 +03:00
if ( ! lflags ) {
if ( exists )
tcf_idr_release ( * a , bind ) ;
2018-07-05 17:24:32 +03:00
else
2019-08-01 16:02:51 +03:00
tcf_idr_cleanup ( tn , index ) ;
2016-09-13 03:13:09 +03:00
return - EINVAL ;
2018-05-11 21:35:33 +03:00
}
2016-09-13 03:13:09 +03:00
if ( ! exists ) {
2019-08-01 16:02:51 +03:00
ret = tcf_idr_create ( tn , index , est , a ,
2021-12-17 21:16:17 +03:00
& act_skbmod_ops , bind , true , flags ) ;
2018-07-05 17:24:32 +03:00
if ( ret ) {
2019-08-01 16:02:51 +03:00
tcf_idr_cleanup ( tn , index ) ;
2016-09-13 03:13:09 +03:00
return ret ;
2018-07-05 17:24:32 +03:00
}
2016-09-13 03:13:09 +03:00
ret = ACT_P_CREATED ;
2018-07-05 17:24:30 +03:00
} else if ( ! ovr ) {
2017-08-30 09:31:59 +03:00
tcf_idr_release ( * a , bind ) ;
2018-07-05 17:24:30 +03:00
return - EEXIST ;
2016-09-13 03:13:09 +03:00
}
2019-03-20 17:00:12 +03:00
err = tcf_action_check_ctrlact ( parm - > action , tp , & goto_ch , extack ) ;
if ( err < 0 )
goto release_idr ;
2016-09-13 03:13:09 +03:00
d = to_skbmod ( * a ) ;
p = kzalloc ( sizeof ( struct tcf_skbmod_params ) , GFP_KERNEL ) ;
if ( unlikely ( ! p ) ) {
2019-03-20 17:00:12 +03:00
err = - ENOMEM ;
goto put_chain ;
2016-09-13 03:13:09 +03:00
}
p - > flags = lflags ;
if ( ovr )
spin_lock_bh ( & d - > tcf_lock ) ;
2018-08-10 20:51:49 +03:00
/* Protected by tcf_lock if overwriting existing action. */
2019-03-20 17:00:12 +03:00
goto_ch = tcf_action_set_ctrlact ( * a , parm - > action , goto_ch ) ;
2018-08-10 20:51:49 +03:00
p_old = rcu_dereference_protected ( d - > skbmod_p , 1 ) ;
2016-09-13 03:13:09 +03:00
if ( lflags & SKBMOD_F_DMAC )
ether_addr_copy ( p - > eth_dst , daddr ) ;
if ( lflags & SKBMOD_F_SMAC )
ether_addr_copy ( p - > eth_src , saddr ) ;
if ( lflags & SKBMOD_F_ETYPE )
p - > eth_type = htons ( eth_type ) ;
rcu_assign_pointer ( d - > skbmod_p , p ) ;
if ( ovr )
spin_unlock_bh ( & d - > tcf_lock ) ;
if ( p_old )
kfree_rcu ( p_old , rcu ) ;
2019-03-20 17:00:12 +03:00
if ( goto_ch )
tcf_chain_put_by_act ( goto_ch ) ;
2016-09-13 03:13:09 +03:00
return ret ;
2019-03-20 17:00:12 +03:00
put_chain :
if ( goto_ch )
tcf_chain_put_by_act ( goto_ch ) ;
release_idr :
tcf_idr_release ( * a , bind ) ;
return err ;
2016-09-13 03:13:09 +03:00
}
2017-12-05 23:53:07 +03:00
static void tcf_skbmod_cleanup ( struct tc_action * a )
2016-09-13 03:13:09 +03:00
{
struct tcf_skbmod * d = to_skbmod ( a ) ;
struct tcf_skbmod_params * p ;
p = rcu_dereference_protected ( d - > skbmod_p , 1 ) ;
2018-03-16 02:00:57 +03:00
if ( p )
kfree_rcu ( p , rcu ) ;
2016-09-13 03:13:09 +03:00
}
static int tcf_skbmod_dump ( struct sk_buff * skb , struct tc_action * a ,
int bind , int ref )
{
struct tcf_skbmod * d = to_skbmod ( a ) ;
unsigned char * b = skb_tail_pointer ( skb ) ;
2018-08-10 20:51:49 +03:00
struct tcf_skbmod_params * p ;
2016-09-13 03:13:09 +03:00
struct tc_skbmod opt = {
. index = d - > tcf_index ,
2018-07-05 17:24:24 +03:00
. refcnt = refcount_read ( & d - > tcf_refcnt ) - ref ,
. bindcnt = atomic_read ( & d - > tcf_bindcnt ) - bind ,
2016-09-13 03:13:09 +03:00
} ;
struct tcf_t t ;
2018-08-10 20:51:49 +03:00
spin_lock_bh ( & d - > tcf_lock ) ;
opt . action = d - > tcf_action ;
p = rcu_dereference_protected ( d - > skbmod_p ,
lockdep_is_held ( & d - > tcf_lock ) ) ;
2016-09-13 03:13:09 +03:00
opt . flags = p - > flags ;
if ( nla_put ( skb , TCA_SKBMOD_PARMS , sizeof ( opt ) , & opt ) )
goto nla_put_failure ;
if ( ( p - > flags & SKBMOD_F_DMAC ) & &
nla_put ( skb , TCA_SKBMOD_DMAC , ETH_ALEN , p - > eth_dst ) )
goto nla_put_failure ;
if ( ( p - > flags & SKBMOD_F_SMAC ) & &
nla_put ( skb , TCA_SKBMOD_SMAC , ETH_ALEN , p - > eth_src ) )
goto nla_put_failure ;
if ( ( p - > flags & SKBMOD_F_ETYPE ) & &
nla_put_u16 ( skb , TCA_SKBMOD_ETYPE , ntohs ( p - > eth_type ) ) )
goto nla_put_failure ;
tcf_tm_dump ( & t , & d - > tcf_tm ) ;
if ( nla_put_64bit ( skb , TCA_SKBMOD_TM , sizeof ( t ) , & t , TCA_SKBMOD_PAD ) )
goto nla_put_failure ;
2018-08-10 20:51:49 +03:00
spin_unlock_bh ( & d - > tcf_lock ) ;
2016-09-13 03:13:09 +03:00
return skb - > len ;
nla_put_failure :
2018-08-10 20:51:49 +03:00
spin_unlock_bh ( & d - > tcf_lock ) ;
2016-09-13 03:13:09 +03:00
nlmsg_trim ( skb , b ) ;
return - 1 ;
}
static struct tc_action_ops act_skbmod_ops = {
. kind = " skbmod " ,
2019-02-10 15:25:00 +03:00
. id = TCA_ACT_SKBMOD ,
2016-09-13 03:13:09 +03:00
. owner = THIS_MODULE ,
2018-08-12 16:34:59 +03:00
. act = tcf_skbmod_act ,
2016-09-13 03:13:09 +03:00
. dump = tcf_skbmod_dump ,
. init = tcf_skbmod_init ,
. cleanup = tcf_skbmod_cleanup ,
. size = sizeof ( struct tcf_skbmod ) ,
} ;
static __net_init int skbmod_init_net ( struct net * net )
{
2022-09-08 07:14:33 +03:00
struct tc_action_net * tn = net_generic ( net , act_skbmod_ops . net_id ) ;
2016-09-13 03:13:09 +03:00
2019-08-25 20:01:32 +03:00
return tc_action_net_init ( net , tn , & act_skbmod_ops ) ;
2016-09-13 03:13:09 +03:00
}
2017-12-12 02:35:03 +03:00
static void __net_exit skbmod_exit_net ( struct list_head * net_list )
2016-09-13 03:13:09 +03:00
{
2022-09-08 07:14:33 +03:00
tc_action_net_exit ( net_list , act_skbmod_ops . net_id ) ;
2016-09-13 03:13:09 +03:00
}
static struct pernet_operations skbmod_net_ops = {
. init = skbmod_init_net ,
2017-12-12 02:35:03 +03:00
. exit_batch = skbmod_exit_net ,
2022-09-08 07:14:33 +03:00
. id = & act_skbmod_ops . net_id ,
2016-09-13 03:13:09 +03:00
. size = sizeof ( struct tc_action_net ) ,
} ;
MODULE_AUTHOR ( " Jamal Hadi Salim, <jhs@mojatatu.com> " ) ;
MODULE_DESCRIPTION ( " SKB data mod-ing " ) ;
MODULE_LICENSE ( " GPL " ) ;
static int __init skbmod_init_module ( void )
{
return tcf_register_action ( & act_skbmod_ops , & skbmod_net_ops ) ;
}
static void __exit skbmod_cleanup_module ( void )
{
tcf_unregister_action ( & act_skbmod_ops , & skbmod_net_ops ) ;
}
module_init ( skbmod_init_module ) ;
module_exit ( skbmod_cleanup_module ) ;