2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2016-09-08 16:23:48 +03:00
/*
* Copyright ( c ) 2016 , Amir Vadai < amir @ vadai . me >
* Copyright ( c ) 2016 , Mellanox Technologies . All rights reserved .
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/skbuff.h>
# include <linux/rtnetlink.h>
2018-06-27 07:39:37 +03:00
# include <net/geneve.h>
2019-11-21 13:03:26 +03:00
# include <net/vxlan.h>
2019-11-21 13:03:27 +03:00
# include <net/erspan.h>
2016-09-08 16:23:48 +03:00
# include <net/netlink.h>
# include <net/pkt_sched.h>
# include <net/dst.h>
2019-03-20 17:00:13 +03:00
# include <net/pkt_cls.h>
2016-09-08 16:23:48 +03:00
# include <linux/tc_act/tc_tunnel_key.h>
# include <net/tc_act/tc_tunnel_key.h>
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 04:58:21 +03:00
static unsigned int tunnel_key_net_id ;
2016-09-08 16:23:48 +03:00
static struct tc_action_ops act_tunnel_key_ops ;
static int tunnel_key_act ( struct sk_buff * skb , const struct tc_action * a ,
struct tcf_result * res )
{
struct tcf_tunnel_key * t = to_tunnel_key ( a ) ;
struct tcf_tunnel_key_params * params ;
int action ;
2018-07-30 15:30:43 +03:00
params = rcu_dereference_bh ( t - > params ) ;
2016-09-08 16:23:48 +03:00
tcf_lastuse_update ( & t - > tcf_tm ) ;
2019-10-30 17:09:01 +03:00
tcf_action_update_bstats ( & t - > common , skb ) ;
2018-07-06 22:01:06 +03:00
action = READ_ONCE ( t - > tcf_action ) ;
2016-09-08 16:23:48 +03:00
switch ( params - > tcft_action ) {
case TCA_TUNNEL_KEY_ACT_RELEASE :
skb_dst_drop ( skb ) ;
break ;
case TCA_TUNNEL_KEY_ACT_SET :
skb_dst_drop ( skb ) ;
skb_dst_set ( skb , dst_clone ( & params - > tcft_enc_metadata - > dst ) ) ;
break ;
default :
WARN_ONCE ( 1 , " Bad tunnel_key action %d. \n " ,
params - > tcft_action ) ;
break ;
}
return action ;
}
2018-06-27 07:39:37 +03:00
static const struct nla_policy
enc_opts_policy [ TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1 ] = {
2019-11-21 13:03:26 +03:00
[ TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC ] = {
. strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN } ,
2018-06-27 07:39:37 +03:00
[ TCA_TUNNEL_KEY_ENC_OPTS_GENEVE ] = { . type = NLA_NESTED } ,
2019-11-21 13:03:26 +03:00
[ TCA_TUNNEL_KEY_ENC_OPTS_VXLAN ] = { . type = NLA_NESTED } ,
2019-11-21 13:03:27 +03:00
[ TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN ] = { . type = NLA_NESTED } ,
2018-06-27 07:39:37 +03:00
} ;
static const struct nla_policy
geneve_opt_policy [ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1 ] = {
[ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS ] = { . type = NLA_U16 } ,
[ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE ] = { . type = NLA_U8 } ,
[ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA ] = { . type = NLA_BINARY ,
. len = 128 } ,
} ;
2019-11-21 13:03:26 +03:00
static const struct nla_policy
vxlan_opt_policy [ TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1 ] = {
[ TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP ] = { . type = NLA_U32 } ,
} ;
2019-11-21 13:03:27 +03:00
static const struct nla_policy
erspan_opt_policy [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1 ] = {
[ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER ] = { . type = NLA_U8 } ,
[ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX ] = { . type = NLA_U32 } ,
[ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR ] = { . type = NLA_U8 } ,
[ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID ] = { . type = NLA_U8 } ,
} ;
2018-06-27 07:39:37 +03:00
static int
tunnel_key_copy_geneve_opt ( const struct nlattr * nla , void * dst , int dst_len ,
struct netlink_ext_ack * extack )
{
struct nlattr * tb [ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1 ] ;
int err , data_len , opt_len ;
u8 * data ;
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 15:07:28 +03:00
err = nla_parse_nested_deprecated ( tb ,
TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX ,
nla , geneve_opt_policy , extack ) ;
2018-06-27 07:39:37 +03:00
if ( err < 0 )
return err ;
if ( ! tb [ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS ] | |
! tb [ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE ] | |
! tb [ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA ] ) {
NL_SET_ERR_MSG ( extack , " Missing tunnel key geneve option class, type or data " ) ;
return - EINVAL ;
}
data = nla_data ( tb [ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA ] ) ;
data_len = nla_len ( tb [ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA ] ) ;
if ( data_len < 4 ) {
NL_SET_ERR_MSG ( extack , " Tunnel key geneve option data is less than 4 bytes long " ) ;
return - ERANGE ;
}
if ( data_len % 4 ) {
NL_SET_ERR_MSG ( extack , " Tunnel key geneve option data is not a multiple of 4 bytes long " ) ;
return - ERANGE ;
}
opt_len = sizeof ( struct geneve_opt ) + data_len ;
if ( dst ) {
struct geneve_opt * opt = dst ;
WARN_ON ( dst_len < opt_len ) ;
opt - > opt_class =
nla_get_be16 ( tb [ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS ] ) ;
opt - > type = nla_get_u8 ( tb [ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE ] ) ;
opt - > length = data_len / 4 ; /* length is in units of 4 bytes */
opt - > r1 = 0 ;
opt - > r2 = 0 ;
opt - > r3 = 0 ;
memcpy ( opt + 1 , data , data_len ) ;
}
return opt_len ;
}
2019-11-21 13:03:26 +03:00
static int
tunnel_key_copy_vxlan_opt ( const struct nlattr * nla , void * dst , int dst_len ,
struct netlink_ext_ack * extack )
{
struct nlattr * tb [ TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1 ] ;
int err ;
err = nla_parse_nested ( tb , TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX , nla ,
vxlan_opt_policy , extack ) ;
if ( err < 0 )
return err ;
if ( ! tb [ TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP ] ) {
NL_SET_ERR_MSG ( extack , " Missing tunnel key vxlan option gbp " ) ;
return - EINVAL ;
}
if ( dst ) {
struct vxlan_metadata * md = dst ;
md - > gbp = nla_get_u32 ( tb [ TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP ] ) ;
2020-09-13 14:51:50 +03:00
md - > gbp & = VXLAN_GBP_MASK ;
2019-11-21 13:03:26 +03:00
}
return sizeof ( struct vxlan_metadata ) ;
}
2019-11-21 13:03:27 +03:00
static int
tunnel_key_copy_erspan_opt ( const struct nlattr * nla , void * dst , int dst_len ,
struct netlink_ext_ack * extack )
{
struct nlattr * tb [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1 ] ;
int err ;
u8 ver ;
err = nla_parse_nested ( tb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX , nla ,
erspan_opt_policy , extack ) ;
if ( err < 0 )
return err ;
if ( ! tb [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER ] ) {
NL_SET_ERR_MSG ( extack , " Missing tunnel key erspan option ver " ) ;
return - EINVAL ;
}
ver = nla_get_u8 ( tb [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER ] ) ;
if ( ver = = 1 ) {
if ( ! tb [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX ] ) {
NL_SET_ERR_MSG ( extack , " Missing tunnel key erspan option index " ) ;
return - EINVAL ;
}
} else if ( ver = = 2 ) {
if ( ! tb [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR ] | |
! tb [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID ] ) {
NL_SET_ERR_MSG ( extack , " Missing tunnel key erspan option dir or hwid " ) ;
return - EINVAL ;
}
} else {
NL_SET_ERR_MSG ( extack , " Tunnel key erspan option ver is incorrect " ) ;
return - EINVAL ;
}
if ( dst ) {
struct erspan_metadata * md = dst ;
md - > version = ver ;
if ( ver = = 1 ) {
nla = tb [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX ] ;
md - > u . index = nla_get_be32 ( nla ) ;
} else {
nla = tb [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR ] ;
md - > u . md2 . dir = nla_get_u8 ( nla ) ;
nla = tb [ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID ] ;
set_hwid ( & md - > u . md2 , nla_get_u8 ( nla ) ) ;
}
}
return sizeof ( struct erspan_metadata ) ;
}
2018-06-27 07:39:37 +03:00
static int tunnel_key_copy_opts ( const struct nlattr * nla , u8 * dst ,
int dst_len , struct netlink_ext_ack * extack )
{
2019-11-21 13:03:26 +03:00
int err , rem , opt_len , len = nla_len ( nla ) , opts_len = 0 , type = 0 ;
2018-06-27 07:39:37 +03:00
const struct nlattr * attr , * head = nla_data ( nla ) ;
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 15:07:28 +03:00
err = nla_validate_deprecated ( head , len , TCA_TUNNEL_KEY_ENC_OPTS_MAX ,
enc_opts_policy , extack ) ;
2018-06-27 07:39:37 +03:00
if ( err )
return err ;
nla_for_each_attr ( attr , head , len , rem ) {
switch ( nla_type ( attr ) ) {
case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE :
2019-11-21 13:03:26 +03:00
if ( type & & type ! = TUNNEL_GENEVE_OPT ) {
NL_SET_ERR_MSG ( extack , " Duplicate type for geneve options " ) ;
return - EINVAL ;
}
2018-06-27 07:39:37 +03:00
opt_len = tunnel_key_copy_geneve_opt ( attr , dst ,
dst_len , extack ) ;
if ( opt_len < 0 )
return opt_len ;
opts_len + = opt_len ;
2019-11-18 12:39:34 +03:00
if ( opts_len > IP_TUNNEL_OPTS_MAX ) {
NL_SET_ERR_MSG ( extack , " Tunnel options exceeds max size " ) ;
return - EINVAL ;
}
2018-06-27 07:39:37 +03:00
if ( dst ) {
dst_len - = opt_len ;
dst + = opt_len ;
}
2019-11-21 13:03:26 +03:00
type = TUNNEL_GENEVE_OPT ;
break ;
case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN :
if ( type ) {
NL_SET_ERR_MSG ( extack , " Duplicate type for vxlan options " ) ;
return - EINVAL ;
}
opt_len = tunnel_key_copy_vxlan_opt ( attr , dst ,
dst_len , extack ) ;
if ( opt_len < 0 )
return opt_len ;
opts_len + = opt_len ;
type = TUNNEL_VXLAN_OPT ;
2018-06-27 07:39:37 +03:00
break ;
2019-11-21 13:03:27 +03:00
case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN :
if ( type ) {
NL_SET_ERR_MSG ( extack , " Duplicate type for erspan options " ) ;
return - EINVAL ;
}
opt_len = tunnel_key_copy_erspan_opt ( attr , dst ,
dst_len , extack ) ;
if ( opt_len < 0 )
return opt_len ;
opts_len + = opt_len ;
type = TUNNEL_ERSPAN_OPT ;
break ;
2018-06-27 07:39:37 +03:00
}
}
if ( ! opts_len ) {
NL_SET_ERR_MSG ( extack , " Empty list of tunnel options " ) ;
return - EINVAL ;
}
if ( rem > 0 ) {
NL_SET_ERR_MSG ( extack , " Trailing data after parsing tunnel key options attributes " ) ;
return - EINVAL ;
}
return opts_len ;
}
static int tunnel_key_get_opts_len ( struct nlattr * nla ,
struct netlink_ext_ack * extack )
{
return tunnel_key_copy_opts ( nla , NULL , 0 , extack ) ;
}
static int tunnel_key_opts_set ( struct nlattr * nla , struct ip_tunnel_info * info ,
int opts_len , struct netlink_ext_ack * extack )
{
info - > options_len = opts_len ;
switch ( nla_type ( nla_data ( nla ) ) ) {
case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE :
# if IS_ENABLED(CONFIG_INET)
info - > key . tun_flags | = TUNNEL_GENEVE_OPT ;
return tunnel_key_copy_opts ( nla , ip_tunnel_info_opts ( info ) ,
opts_len , extack ) ;
# else
return - EAFNOSUPPORT ;
2019-11-21 13:03:26 +03:00
# endif
case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN :
# if IS_ENABLED(CONFIG_INET)
info - > key . tun_flags | = TUNNEL_VXLAN_OPT ;
return tunnel_key_copy_opts ( nla , ip_tunnel_info_opts ( info ) ,
opts_len , extack ) ;
# else
return - EAFNOSUPPORT ;
2019-11-21 13:03:27 +03:00
# endif
case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN :
# if IS_ENABLED(CONFIG_INET)
info - > key . tun_flags | = TUNNEL_ERSPAN_OPT ;
return tunnel_key_copy_opts ( nla , ip_tunnel_info_opts ( info ) ,
opts_len , extack ) ;
# else
return - EAFNOSUPPORT ;
2018-06-27 07:39:37 +03:00
# endif
default :
NL_SET_ERR_MSG ( extack , " Cannot set tunnel options for unknown tunnel type " ) ;
return - EINVAL ;
}
}
2016-09-08 16:23:48 +03:00
static const struct nla_policy tunnel_key_policy [ TCA_TUNNEL_KEY_MAX + 1 ] = {
[ TCA_TUNNEL_KEY_PARMS ] = { . len = sizeof ( struct tc_tunnel_key ) } ,
[ TCA_TUNNEL_KEY_ENC_IPV4_SRC ] = { . type = NLA_U32 } ,
[ TCA_TUNNEL_KEY_ENC_IPV4_DST ] = { . type = NLA_U32 } ,
[ TCA_TUNNEL_KEY_ENC_IPV6_SRC ] = { . len = sizeof ( struct in6_addr ) } ,
[ TCA_TUNNEL_KEY_ENC_IPV6_DST ] = { . len = sizeof ( struct in6_addr ) } ,
[ TCA_TUNNEL_KEY_ENC_KEY_ID ] = { . type = NLA_U32 } ,
2016-11-07 16:14:41 +03:00
[ TCA_TUNNEL_KEY_ENC_DST_PORT ] = { . type = NLA_U16 } ,
2017-06-14 22:19:31 +03:00
[ TCA_TUNNEL_KEY_NO_CSUM ] = { . type = NLA_U8 } ,
2018-06-27 07:39:37 +03:00
[ TCA_TUNNEL_KEY_ENC_OPTS ] = { . type = NLA_NESTED } ,
2018-07-17 19:27:16 +03:00
[ TCA_TUNNEL_KEY_ENC_TOS ] = { . type = NLA_U8 } ,
[ TCA_TUNNEL_KEY_ENC_TTL ] = { . type = NLA_U8 } ,
2016-09-08 16:23:48 +03:00
} ;
2019-01-10 22:21:02 +03:00
static void tunnel_key_release_params ( struct tcf_tunnel_key_params * p )
{
if ( ! p )
return ;
2019-03-05 03:29:28 +03:00
if ( p - > tcft_action = = TCA_TUNNEL_KEY_ACT_SET )
2019-01-10 22:21:02 +03:00
dst_release ( & p - > tcft_enc_metadata - > dst ) ;
2019-03-05 03:29:28 +03:00
2019-01-10 22:21:02 +03:00
kfree_rcu ( p , rcu ) ;
}
2016-09-08 16:23:48 +03:00
static int tunnel_key_init ( struct net * net , struct nlattr * nla ,
struct nlattr * est , struct tc_action * * a ,
2018-07-05 17:24:25 +03:00
int ovr , int bind , bool rtnl_held ,
2019-10-30 17:09:05 +03:00
struct tcf_proto * tp , u32 act_flags ,
2018-07-05 17:24:25 +03:00
struct netlink_ext_ack * extack )
2016-09-08 16:23:48 +03:00
{
struct tc_action_net * tn = net_generic ( net , tunnel_key_net_id ) ;
struct nlattr * tb [ TCA_TUNNEL_KEY_MAX + 1 ] ;
struct tcf_tunnel_key_params * params_new ;
struct metadata_dst * metadata = NULL ;
2019-03-20 17:00:13 +03:00
struct tcf_chain * goto_ch = NULL ;
2016-09-08 16:23:48 +03:00
struct tc_tunnel_key * parm ;
struct tcf_tunnel_key * t ;
bool exists = false ;
2016-11-07 16:14:41 +03:00
__be16 dst_port = 0 ;
2018-12-02 15:55:20 +03:00
__be64 key_id = 0 ;
2018-06-27 07:39:37 +03:00
int opts_len = 0 ;
2018-12-02 15:55:20 +03:00
__be16 flags = 0 ;
2018-07-17 19:27:16 +03:00
u8 tos , ttl ;
2016-09-08 16:23:48 +03:00
int ret = 0 ;
2019-08-01 16:02:51 +03:00
u32 index ;
2016-09-08 16:23:48 +03:00
int err ;
2018-06-27 07:39:35 +03:00
if ( ! nla ) {
NL_SET_ERR_MSG ( extack , " Tunnel requires attributes to be passed " ) ;
2016-09-08 16:23:48 +03:00
return - EINVAL ;
2018-06-27 07:39:35 +03:00
}
2016-09-08 16:23:48 +03:00
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 15:07:28 +03:00
err = nla_parse_nested_deprecated ( tb , TCA_TUNNEL_KEY_MAX , nla ,
tunnel_key_policy , extack ) ;
2018-06-27 07:39:35 +03:00
if ( err < 0 ) {
NL_SET_ERR_MSG ( extack , " Failed to parse nested tunnel key attributes " ) ;
2016-09-08 16:23:48 +03:00
return err ;
2018-06-27 07:39:35 +03:00
}
2016-09-08 16:23:48 +03:00
2018-06-27 07:39:35 +03:00
if ( ! tb [ TCA_TUNNEL_KEY_PARMS ] ) {
NL_SET_ERR_MSG ( extack , " Missing tunnel key parameters " ) ;
2016-09-08 16:23:48 +03:00
return - EINVAL ;
2018-06-27 07:39:35 +03:00
}
2016-09-08 16:23:48 +03:00
parm = nla_data ( tb [ TCA_TUNNEL_KEY_PARMS ] ) ;
2019-08-01 16:02:51 +03:00
index = parm - > index ;
err = tcf_idr_check_alloc ( tn , & index , a , bind ) ;
2018-07-05 17:24:32 +03:00
if ( err < 0 )
return err ;
exists = err ;
2016-09-08 16:23:48 +03:00
if ( exists & & bind )
return 0 ;
switch ( parm - > t_action ) {
case TCA_TUNNEL_KEY_ACT_RELEASE :
break ;
case TCA_TUNNEL_KEY_ACT_SET :
2018-12-02 15:55:20 +03:00
if ( tb [ TCA_TUNNEL_KEY_ENC_KEY_ID ] ) {
__be32 key32 ;
2016-09-08 16:23:48 +03:00
2018-12-02 15:55:20 +03:00
key32 = nla_get_be32 ( tb [ TCA_TUNNEL_KEY_ENC_KEY_ID ] ) ;
key_id = key32_to_tunnel_id ( key32 ) ;
flags = TUNNEL_KEY ;
}
2016-09-08 16:23:48 +03:00
2018-12-02 15:55:20 +03:00
flags | = TUNNEL_CSUM ;
2017-06-14 22:19:31 +03:00
if ( tb [ TCA_TUNNEL_KEY_NO_CSUM ] & &
nla_get_u8 ( tb [ TCA_TUNNEL_KEY_NO_CSUM ] ) )
flags & = ~ TUNNEL_CSUM ;
2016-11-07 16:14:41 +03:00
if ( tb [ TCA_TUNNEL_KEY_ENC_DST_PORT ] )
dst_port = nla_get_be16 ( tb [ TCA_TUNNEL_KEY_ENC_DST_PORT ] ) ;
2018-06-27 07:39:37 +03:00
if ( tb [ TCA_TUNNEL_KEY_ENC_OPTS ] ) {
opts_len = tunnel_key_get_opts_len ( tb [ TCA_TUNNEL_KEY_ENC_OPTS ] ,
extack ) ;
if ( opts_len < 0 ) {
ret = opts_len ;
goto err_out ;
}
}
2018-07-17 19:27:16 +03:00
tos = 0 ;
if ( tb [ TCA_TUNNEL_KEY_ENC_TOS ] )
tos = nla_get_u8 ( tb [ TCA_TUNNEL_KEY_ENC_TOS ] ) ;
ttl = 0 ;
if ( tb [ TCA_TUNNEL_KEY_ENC_TTL ] )
ttl = nla_get_u8 ( tb [ TCA_TUNNEL_KEY_ENC_TTL ] ) ;
2016-09-08 16:23:48 +03:00
if ( tb [ TCA_TUNNEL_KEY_ENC_IPV4_SRC ] & &
tb [ TCA_TUNNEL_KEY_ENC_IPV4_DST ] ) {
__be32 saddr ;
__be32 daddr ;
saddr = nla_get_in_addr ( tb [ TCA_TUNNEL_KEY_ENC_IPV4_SRC ] ) ;
daddr = nla_get_in_addr ( tb [ TCA_TUNNEL_KEY_ENC_IPV4_DST ] ) ;
2018-07-17 19:27:16 +03:00
metadata = __ip_tun_set_dst ( saddr , daddr , tos , ttl ,
2017-06-14 22:19:31 +03:00
dst_port , flags ,
2018-06-27 07:39:37 +03:00
key_id , opts_len ) ;
2016-09-08 16:23:48 +03:00
} else if ( tb [ TCA_TUNNEL_KEY_ENC_IPV6_SRC ] & &
tb [ TCA_TUNNEL_KEY_ENC_IPV6_DST ] ) {
struct in6_addr saddr ;
struct in6_addr daddr ;
saddr = nla_get_in6_addr ( tb [ TCA_TUNNEL_KEY_ENC_IPV6_SRC ] ) ;
daddr = nla_get_in6_addr ( tb [ TCA_TUNNEL_KEY_ENC_IPV6_DST ] ) ;
2018-07-17 19:27:16 +03:00
metadata = __ipv6_tun_set_dst ( & saddr , & daddr , tos , ttl , dst_port ,
2017-06-14 22:19:31 +03:00
0 , flags ,
2020-10-21 01:02:40 +03:00
key_id , opts_len ) ;
2018-06-27 07:39:34 +03:00
} else {
2018-06-27 07:39:35 +03:00
NL_SET_ERR_MSG ( extack , " Missing either ipv4 or ipv6 src and dst " ) ;
2018-06-27 07:39:34 +03:00
ret = - EINVAL ;
goto err_out ;
2016-09-08 16:23:48 +03:00
}
if ( ! metadata ) {
2018-06-27 07:39:35 +03:00
NL_SET_ERR_MSG ( extack , " Cannot allocate tunnel metadata dst " ) ;
2018-06-27 07:39:34 +03:00
ret = - ENOMEM ;
2016-09-08 16:23:48 +03:00
goto err_out ;
}
2019-02-22 10:58:12 +03:00
# ifdef CONFIG_DST_CACHE
ret = dst_cache_init ( & metadata - > u . tun_info . dst_cache , GFP_KERNEL ) ;
if ( ret )
goto release_tun_meta ;
# endif
2018-06-27 07:39:37 +03:00
if ( opts_len ) {
ret = tunnel_key_opts_set ( tb [ TCA_TUNNEL_KEY_ENC_OPTS ] ,
& metadata - > u . tun_info ,
opts_len , extack ) ;
if ( ret < 0 )
2019-03-05 03:29:28 +03:00
goto release_tun_meta ;
2018-06-27 07:39:37 +03:00
}
2016-09-08 16:23:48 +03:00
metadata - > u . tun_info . mode | = IP_TUNNEL_INFO_TX ;
break ;
default :
2018-06-27 07:39:35 +03:00
NL_SET_ERR_MSG ( extack , " Unknown tunnel key action " ) ;
2018-03-12 23:20:58 +03:00
ret = - EINVAL ;
2016-09-08 16:23:48 +03:00
goto err_out ;
}
if ( ! exists ) {
2019-10-30 17:09:06 +03:00
ret = tcf_idr_create_from_flags ( tn , index , est , a ,
& act_tunnel_key_ops , bind ,
act_flags ) ;
2018-06-27 07:39:35 +03:00
if ( ret ) {
NL_SET_ERR_MSG ( extack , " Cannot create TC IDR " ) ;
2019-03-05 03:29:28 +03:00
goto release_tun_meta ;
2018-06-27 07:39:35 +03:00
}
2016-09-08 16:23:48 +03:00
ret = ACT_P_CREATED ;
2018-07-05 17:24:30 +03:00
} else if ( ! ovr ) {
NL_SET_ERR_MSG ( extack , " TC IDR already exists " ) ;
2018-09-04 20:00:19 +03:00
ret = - EEXIST ;
2019-03-05 03:29:28 +03:00
goto release_tun_meta ;
2016-09-08 16:23:48 +03:00
}
2019-03-20 17:00:13 +03:00
err = tcf_action_check_ctrlact ( parm - > action , tp , & goto_ch , extack ) ;
if ( err < 0 ) {
ret = err ;
exists = true ;
goto release_tun_meta ;
}
2016-09-08 16:23:48 +03:00
t = to_tunnel_key ( * a ) ;
params_new = kzalloc ( sizeof ( * params_new ) , GFP_KERNEL ) ;
if ( unlikely ( ! params_new ) ) {
2018-06-27 07:39:35 +03:00
NL_SET_ERR_MSG ( extack , " Cannot allocate tunnel key parameters " ) ;
2018-09-04 20:00:19 +03:00
ret = - ENOMEM ;
exists = true ;
2019-03-20 17:00:13 +03:00
goto put_chain ;
2016-09-08 16:23:48 +03:00
}
params_new - > tcft_action = parm - > t_action ;
params_new - > tcft_enc_metadata = metadata ;
2018-08-14 21:46:16 +03:00
spin_lock_bh ( & t - > tcf_lock ) ;
2019-03-20 17:00:13 +03:00
goto_ch = tcf_action_set_ctrlact ( * a , parm - > action , goto_ch ) ;
2019-09-24 02:09:18 +03:00
params_new = rcu_replace_pointer ( t - > params , params_new ,
lockdep_is_held ( & t - > tcf_lock ) ) ;
2018-08-14 21:46:16 +03:00
spin_unlock_bh ( & t - > tcf_lock ) ;
2019-01-10 22:21:02 +03:00
tunnel_key_release_params ( params_new ) ;
2019-03-20 17:00:13 +03:00
if ( goto_ch )
tcf_chain_put_by_act ( goto_ch ) ;
2016-09-08 16:23:48 +03:00
return ret ;
2019-03-20 17:00:13 +03:00
put_chain :
if ( goto_ch )
tcf_chain_put_by_act ( goto_ch ) ;
2018-09-04 20:00:19 +03:00
release_tun_meta :
2019-02-25 18:28:27 +03:00
if ( metadata )
dst_release ( & metadata - > dst ) ;
2018-09-04 20:00:19 +03:00
2016-09-08 16:23:48 +03:00
err_out :
if ( exists )
2017-08-30 09:31:59 +03:00
tcf_idr_release ( * a , bind ) ;
2018-07-05 17:24:32 +03:00
else
2019-08-01 16:02:51 +03:00
tcf_idr_cleanup ( tn , index ) ;
2016-09-08 16:23:48 +03:00
return ret ;
}
2017-12-05 23:53:07 +03:00
static void tunnel_key_release ( struct tc_action * a )
2016-09-08 16:23:48 +03:00
{
struct tcf_tunnel_key * t = to_tunnel_key ( a ) ;
struct tcf_tunnel_key_params * params ;
2016-09-12 15:19:21 +03:00
params = rcu_dereference_protected ( t - > params , 1 ) ;
2019-01-10 22:21:02 +03:00
tunnel_key_release_params ( params ) ;
2016-09-08 16:23:48 +03:00
}
2018-06-27 07:39:37 +03:00
static int tunnel_key_geneve_opts_dump ( struct sk_buff * skb ,
const struct ip_tunnel_info * info )
{
int len = info - > options_len ;
u8 * src = ( u8 * ) ( info + 1 ) ;
struct nlattr * start ;
2019-04-26 12:13:06 +03:00
start = nla_nest_start_noflag ( skb , TCA_TUNNEL_KEY_ENC_OPTS_GENEVE ) ;
2018-06-27 07:39:37 +03:00
if ( ! start )
return - EMSGSIZE ;
while ( len > 0 ) {
struct geneve_opt * opt = ( struct geneve_opt * ) src ;
if ( nla_put_be16 ( skb , TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS ,
opt - > opt_class ) | |
nla_put_u8 ( skb , TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE ,
opt - > type ) | |
nla_put ( skb , TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA ,
2018-09-07 00:50:16 +03:00
opt - > length * 4 , opt + 1 ) ) {
nla_nest_cancel ( skb , start ) ;
2018-06-27 07:39:37 +03:00
return - EMSGSIZE ;
2018-09-07 00:50:16 +03:00
}
2018-06-27 07:39:37 +03:00
len - = sizeof ( struct geneve_opt ) + opt - > length * 4 ;
src + = sizeof ( struct geneve_opt ) + opt - > length * 4 ;
}
nla_nest_end ( skb , start ) ;
return 0 ;
}
2019-11-21 13:03:26 +03:00
static int tunnel_key_vxlan_opts_dump ( struct sk_buff * skb ,
const struct ip_tunnel_info * info )
{
struct vxlan_metadata * md = ( struct vxlan_metadata * ) ( info + 1 ) ;
struct nlattr * start ;
start = nla_nest_start_noflag ( skb , TCA_TUNNEL_KEY_ENC_OPTS_VXLAN ) ;
if ( ! start )
return - EMSGSIZE ;
if ( nla_put_u32 ( skb , TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP , md - > gbp ) ) {
nla_nest_cancel ( skb , start ) ;
return - EMSGSIZE ;
}
nla_nest_end ( skb , start ) ;
return 0 ;
}
2019-11-21 13:03:27 +03:00
static int tunnel_key_erspan_opts_dump ( struct sk_buff * skb ,
const struct ip_tunnel_info * info )
{
struct erspan_metadata * md = ( struct erspan_metadata * ) ( info + 1 ) ;
struct nlattr * start ;
start = nla_nest_start_noflag ( skb , TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN ) ;
if ( ! start )
return - EMSGSIZE ;
if ( nla_put_u8 ( skb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER , md - > version ) )
goto err ;
if ( md - > version = = 1 & &
nla_put_be32 ( skb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX , md - > u . index ) )
goto err ;
if ( md - > version = = 2 & &
( nla_put_u8 ( skb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR ,
md - > u . md2 . dir ) | |
nla_put_u8 ( skb , TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID ,
get_hwid ( & md - > u . md2 ) ) ) )
goto err ;
nla_nest_end ( skb , start ) ;
return 0 ;
err :
nla_nest_cancel ( skb , start ) ;
return - EMSGSIZE ;
}
2018-06-27 07:39:37 +03:00
static int tunnel_key_opts_dump ( struct sk_buff * skb ,
const struct ip_tunnel_info * info )
{
struct nlattr * start ;
2018-09-07 00:50:16 +03:00
int err = - EINVAL ;
2018-06-27 07:39:37 +03:00
if ( ! info - > options_len )
return 0 ;
2019-04-26 12:13:06 +03:00
start = nla_nest_start_noflag ( skb , TCA_TUNNEL_KEY_ENC_OPTS ) ;
2018-06-27 07:39:37 +03:00
if ( ! start )
return - EMSGSIZE ;
if ( info - > key . tun_flags & TUNNEL_GENEVE_OPT ) {
err = tunnel_key_geneve_opts_dump ( skb , info ) ;
if ( err )
2018-09-07 00:50:16 +03:00
goto err_out ;
2019-11-21 13:03:26 +03:00
} else if ( info - > key . tun_flags & TUNNEL_VXLAN_OPT ) {
err = tunnel_key_vxlan_opts_dump ( skb , info ) ;
if ( err )
goto err_out ;
2019-11-21 13:03:27 +03:00
} else if ( info - > key . tun_flags & TUNNEL_ERSPAN_OPT ) {
err = tunnel_key_erspan_opts_dump ( skb , info ) ;
if ( err )
goto err_out ;
2018-06-27 07:39:37 +03:00
} else {
2018-09-07 00:50:16 +03:00
err_out :
nla_nest_cancel ( skb , start ) ;
return err ;
2018-06-27 07:39:37 +03:00
}
nla_nest_end ( skb , start ) ;
return 0 ;
}
2016-09-08 16:23:48 +03:00
static int tunnel_key_dump_addresses ( struct sk_buff * skb ,
const struct ip_tunnel_info * info )
{
unsigned short family = ip_tunnel_info_af ( info ) ;
if ( family = = AF_INET ) {
__be32 saddr = info - > key . u . ipv4 . src ;
__be32 daddr = info - > key . u . ipv4 . dst ;
if ( ! nla_put_in_addr ( skb , TCA_TUNNEL_KEY_ENC_IPV4_SRC , saddr ) & &
! nla_put_in_addr ( skb , TCA_TUNNEL_KEY_ENC_IPV4_DST , daddr ) )
return 0 ;
}
if ( family = = AF_INET6 ) {
const struct in6_addr * saddr6 = & info - > key . u . ipv6 . src ;
const struct in6_addr * daddr6 = & info - > key . u . ipv6 . dst ;
if ( ! nla_put_in6_addr ( skb ,
TCA_TUNNEL_KEY_ENC_IPV6_SRC , saddr6 ) & &
! nla_put_in6_addr ( skb ,
TCA_TUNNEL_KEY_ENC_IPV6_DST , daddr6 ) )
return 0 ;
}
return - EINVAL ;
}
static int tunnel_key_dump ( struct sk_buff * skb , struct tc_action * a ,
int bind , int ref )
{
unsigned char * b = skb_tail_pointer ( skb ) ;
struct tcf_tunnel_key * t = to_tunnel_key ( a ) ;
struct tcf_tunnel_key_params * params ;
struct tc_tunnel_key opt = {
. index = t - > tcf_index ,
2018-07-05 17:24:24 +03:00
. refcnt = refcount_read ( & t - > tcf_refcnt ) - ref ,
. bindcnt = atomic_read ( & t - > tcf_bindcnt ) - bind ,
2016-09-08 16:23:48 +03:00
} ;
struct tcf_t tm ;
2018-08-14 21:46:16 +03:00
spin_lock_bh ( & t - > tcf_lock ) ;
2018-08-10 20:51:50 +03:00
params = rcu_dereference_protected ( t - > params ,
lockdep_is_held ( & t - > tcf_lock ) ) ;
opt . action = t - > tcf_action ;
2016-09-08 16:23:48 +03:00
opt . t_action = params - > tcft_action ;
if ( nla_put ( skb , TCA_TUNNEL_KEY_PARMS , sizeof ( opt ) , & opt ) )
goto nla_put_failure ;
if ( params - > tcft_action = = TCA_TUNNEL_KEY_ACT_SET ) {
2018-06-27 07:39:37 +03:00
struct ip_tunnel_info * info =
& params - > tcft_enc_metadata - > u . tun_info ;
struct ip_tunnel_key * key = & info - > key ;
2016-09-08 16:23:48 +03:00
__be32 key_id = tunnel_id_to_key32 ( key - > tun_id ) ;
2018-12-02 15:55:20 +03:00
if ( ( ( key - > tun_flags & TUNNEL_KEY ) & &
nla_put_be32 ( skb , TCA_TUNNEL_KEY_ENC_KEY_ID , key_id ) ) | |
2016-09-08 16:23:48 +03:00
tunnel_key_dump_addresses ( skb ,
2016-11-07 16:14:41 +03:00
& params - > tcft_enc_metadata - > u . tun_info ) | |
2018-12-02 15:55:21 +03:00
( key - > tp_dst & &
nla_put_be16 ( skb , TCA_TUNNEL_KEY_ENC_DST_PORT ,
key - > tp_dst ) ) | |
2017-06-14 22:19:31 +03:00
nla_put_u8 ( skb , TCA_TUNNEL_KEY_NO_CSUM ,
2018-06-27 07:39:37 +03:00
! ( key - > tun_flags & TUNNEL_CSUM ) ) | |
tunnel_key_opts_dump ( skb , info ) )
2016-09-08 16:23:48 +03:00
goto nla_put_failure ;
2018-07-17 19:27:16 +03:00
if ( key - > tos & & nla_put_u8 ( skb , TCA_TUNNEL_KEY_ENC_TOS , key - > tos ) )
goto nla_put_failure ;
if ( key - > ttl & & nla_put_u8 ( skb , TCA_TUNNEL_KEY_ENC_TTL , key - > ttl ) )
goto nla_put_failure ;
2016-09-08 16:23:48 +03:00
}
tcf_tm_dump ( & tm , & t - > tcf_tm ) ;
if ( nla_put_64bit ( skb , TCA_TUNNEL_KEY_TM , sizeof ( tm ) ,
& tm , TCA_TUNNEL_KEY_PAD ) )
goto nla_put_failure ;
2018-08-14 21:46:16 +03:00
spin_unlock_bh ( & t - > tcf_lock ) ;
2016-09-08 16:23:48 +03:00
2016-09-12 15:19:21 +03:00
return skb - > len ;
2016-09-08 16:23:48 +03:00
nla_put_failure :
2018-08-14 21:46:16 +03:00
spin_unlock_bh ( & t - > tcf_lock ) ;
2016-09-08 16:23:48 +03:00
nlmsg_trim ( skb , b ) ;
2016-09-12 15:19:21 +03:00
return - 1 ;
2016-09-08 16:23:48 +03:00
}
static int tunnel_key_walker ( struct net * net , struct sk_buff * skb ,
struct netlink_callback * cb , int type ,
2018-02-15 18:54:58 +03:00
const struct tc_action_ops * ops ,
struct netlink_ext_ack * extack )
2016-09-08 16:23:48 +03:00
{
struct tc_action_net * tn = net_generic ( net , tunnel_key_net_id ) ;
2018-02-15 18:54:59 +03:00
return tcf_generic_walker ( tn , skb , cb , type , ops , extack ) ;
2016-09-08 16:23:48 +03:00
}
2018-08-29 20:15:35 +03:00
static int tunnel_key_search ( struct net * net , struct tc_action * * a , u32 index )
2016-09-08 16:23:48 +03:00
{
struct tc_action_net * tn = net_generic ( net , tunnel_key_net_id ) ;
2017-08-30 09:31:59 +03:00
return tcf_idr_search ( tn , a , index ) ;
2016-09-08 16:23:48 +03:00
}
static struct tc_action_ops act_tunnel_key_ops = {
. kind = " tunnel_key " ,
2019-02-10 15:25:00 +03:00
. id = TCA_ID_TUNNEL_KEY ,
2016-09-08 16:23:48 +03:00
. owner = THIS_MODULE ,
. act = tunnel_key_act ,
. dump = tunnel_key_dump ,
. init = tunnel_key_init ,
. cleanup = tunnel_key_release ,
. walk = tunnel_key_walker ,
. lookup = tunnel_key_search ,
. size = sizeof ( struct tcf_tunnel_key ) ,
} ;
static __net_init int tunnel_key_init_net ( struct net * net )
{
struct tc_action_net * tn = net_generic ( net , tunnel_key_net_id ) ;
2019-08-25 20:01:32 +03:00
return tc_action_net_init ( net , tn , & act_tunnel_key_ops ) ;
2016-09-08 16:23:48 +03:00
}
2017-12-12 02:35:03 +03:00
static void __net_exit tunnel_key_exit_net ( struct list_head * net_list )
2016-09-08 16:23:48 +03:00
{
2017-12-12 02:35:03 +03:00
tc_action_net_exit ( net_list , tunnel_key_net_id ) ;
2016-09-08 16:23:48 +03:00
}
static struct pernet_operations tunnel_key_net_ops = {
. init = tunnel_key_init_net ,
2017-12-12 02:35:03 +03:00
. exit_batch = tunnel_key_exit_net ,
2016-09-08 16:23:48 +03:00
. id = & tunnel_key_net_id ,
. size = sizeof ( struct tc_action_net ) ,
} ;
static int __init tunnel_key_init_module ( void )
{
return tcf_register_action ( & act_tunnel_key_ops , & tunnel_key_net_ops ) ;
}
static void __exit tunnel_key_cleanup_module ( void )
{
tcf_unregister_action ( & act_tunnel_key_ops , & tunnel_key_net_ops ) ;
}
module_init ( tunnel_key_init_module ) ;
module_exit ( tunnel_key_cleanup_module ) ;
MODULE_AUTHOR ( " Amir Vadai <amir@vadai.me> " ) ;
MODULE_DESCRIPTION ( " ip tunnel manipulation actions " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;