2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2010-08-22 10:05:39 +04:00
/*
* GRE over IPv4 demultiplexer driver
*
* Authors : Dmitry Kozlov ( xeb @ mail . ru )
*/
2012-03-12 11:03:32 +04:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2010-08-22 10:05:39 +04:00
# include <linux/module.h>
2013-06-18 04:49:38 +04:00
# include <linux/if.h>
# include <linux/icmp.h>
2010-08-22 10:05:39 +04:00
# include <linux/kernel.h>
# include <linux/kmod.h>
# include <linux/skbuff.h>
# include <linux/in.h>
2011-07-23 00:49:40 +04:00
# include <linux/ip.h>
2010-08-22 10:05:39 +04:00
# include <linux/netdevice.h>
2013-02-14 18:02:41 +04:00
# include <linux/if_tunnel.h>
2010-08-22 10:05:39 +04:00
# include <linux/spinlock.h>
# include <net/protocol.h>
# include <net/gre.h>
2019-01-18 14:05:39 +03:00
# include <net/erspan.h>
2010-08-22 10:05:39 +04:00
2013-06-18 04:49:38 +04:00
# include <net/icmp.h>
# include <net/route.h>
# include <net/xfrm.h>
2010-08-22 10:05:39 +04:00
2010-10-25 01:33:16 +04:00
static const struct gre_protocol __rcu * gre_proto [ GREPROTO_MAX ] __read_mostly ;
2010-08-22 10:05:39 +04:00
int gre_add_protocol ( const struct gre_protocol * proto , u8 version )
{
if ( version > = GREPROTO_MAX )
2013-06-18 04:49:32 +04:00
return - EINVAL ;
2010-08-22 10:05:39 +04:00
2013-06-18 04:49:32 +04:00
return ( cmpxchg ( ( const struct gre_protocol * * ) & gre_proto [ version ] , NULL , proto ) = = NULL ) ?
0 : - EBUSY ;
2010-08-22 10:05:39 +04:00
}
EXPORT_SYMBOL_GPL ( gre_add_protocol ) ;
int gre_del_protocol ( const struct gre_protocol * proto , u8 version )
{
2013-06-18 04:49:32 +04:00
int ret ;
2010-08-22 10:05:39 +04:00
if ( version > = GREPROTO_MAX )
2013-06-18 04:49:32 +04:00
return - EINVAL ;
ret = ( cmpxchg ( ( const struct gre_protocol * * ) & gre_proto [ version ] , proto , NULL ) = = proto ) ?
0 : - EBUSY ;
if ( ret )
return ret ;
2010-08-22 10:05:39 +04:00
synchronize_rcu ( ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( gre_del_protocol ) ;
2020-03-08 09:05:14 +03:00
/* Fills in tpi and returns header length to be pulled.
* Note that caller must use pskb_may_pull ( ) before pulling GRE header .
*/
2016-04-30 03:12:16 +03:00
int gre_parse_header ( struct sk_buff * skb , struct tnl_ptk_info * tpi ,
2016-06-15 16:24:00 +03:00
bool * csum_err , __be16 proto , int nhs )
2016-04-30 03:12:16 +03:00
{
const struct gre_base_hdr * greh ;
__be32 * options ;
int hdr_len ;
2016-06-15 16:24:00 +03:00
if ( unlikely ( ! pskb_may_pull ( skb , nhs + sizeof ( struct gre_base_hdr ) ) ) )
2016-04-30 03:12:16 +03:00
return - EINVAL ;
2016-06-15 16:24:00 +03:00
greh = ( struct gre_base_hdr * ) ( skb - > data + nhs ) ;
2016-04-30 03:12:16 +03:00
if ( unlikely ( greh - > flags & ( GRE_VERSION | GRE_ROUTING ) ) )
return - EINVAL ;
ip_tunnel: convert __be16 tunnel flags to bitmaps
Historically, tunnel flags like TUNNEL_CSUM or TUNNEL_ERSPAN_OPT
have been defined as __be16. Now all of those 16 bits are occupied
and there's no more free space for new flags.
It can't be simply switched to a bigger container with no
adjustments to the values, since it's an explicit Endian storage,
and on LE systems (__be16)0x0001 equals to
(__be64)0x0001000000000000.
We could probably define new 64-bit flags depending on the
Endianness, i.e. (__be64)0x0001 on BE and (__be64)0x00010000... on
LE, but that would introduce an Endianness dependency and spawn a
ton of Sparse warnings. To mitigate them, all of those places which
were adjusted with this change would be touched anyway, so why not
define stuff properly if there's no choice.
Define IP_TUNNEL_*_BIT counterparts as a bit number instead of the
value already coded and a fistful of <16 <-> bitmap> converters and
helpers. The two flags which have a different bit position are
SIT_ISATAP_BIT and VTI_ISVTI_BIT, as they were defined not as
__cpu_to_be16(), but as (__force __be16), i.e. had different
positions on LE and BE. Now they both have strongly defined places.
Change all __be16 fields which were used to store those flags, to
IP_TUNNEL_DECLARE_FLAGS() -> DECLARE_BITMAP(__IP_TUNNEL_FLAG_NUM) ->
unsigned long[1] for now, and replace all TUNNEL_* occurrences to
their bitmap counterparts. Use the converters in the places which talk
to the userspace, hardware (NFP) or other hosts (GRE header). The rest
must explicitly use the new flags only. This must be done at once,
otherwise there will be too many conversions throughout the code in
the intermediate commits.
Finally, disable the old __be16 flags for use in the kernel code
(except for the two 'irregular' flags mentioned above), to prevent
any accidental (mis)use of them. For the userspace, nothing is
changed, only additions were made.
Most noticeable bloat-o-meter difference (.text):
vmlinux: 307/-1 (306)
gre.ko: 62/0 (62)
ip_gre.ko: 941/-217 (724) [*]
ip_tunnel.ko: 390/-900 (-510) [**]
ip_vti.ko: 138/0 (138)
ip6_gre.ko: 534/-18 (516) [*]
ip6_tunnel.ko: 118/-10 (108)
[*] gre_flags_to_tnl_flags() grew, but still is inlined
[**] ip_tunnel_find() got uninlined, hence such decrease
The average code size increase in non-extreme case is 100-200 bytes
per module, mostly due to sizeof(long) > sizeof(__be16), as
%__IP_TUNNEL_FLAG_NUM is less than %BITS_PER_LONG and the compilers
are able to expand the majority of bitmap_*() calls here into direct
operations on scalars.
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2024-03-27 18:23:53 +03:00
gre_flags_to_tnl_flags ( tpi - > flags , greh - > flags ) ;
2016-04-30 03:12:16 +03:00
hdr_len = gre_calc_hlen ( tpi - > flags ) ;
2016-06-15 16:24:00 +03:00
if ( ! pskb_may_pull ( skb , nhs + hdr_len ) )
2016-04-30 03:12:16 +03:00
return - EINVAL ;
2016-06-15 16:24:00 +03:00
greh = ( struct gre_base_hdr * ) ( skb - > data + nhs ) ;
2016-04-30 03:12:16 +03:00
tpi - > proto = greh - > protocol ;
options = ( __be32 * ) ( greh + 1 ) ;
if ( greh - > flags & GRE_CSUM ) {
2018-09-14 07:26:47 +03:00
if ( ! skb_checksum_simple_validate ( skb ) ) {
2019-07-04 12:03:26 +03:00
skb_checksum_try_convert ( skb , IPPROTO_GRE ,
2018-09-14 07:26:47 +03:00
null_compute_pseudo ) ;
} else if ( csum_err ) {
2016-04-30 03:12:16 +03:00
* csum_err = true ;
return - EINVAL ;
}
options + + ;
}
if ( greh - > flags & GRE_KEY ) {
tpi - > key = * options ;
options + + ;
} else {
tpi - > key = 0 ;
}
if ( unlikely ( greh - > flags & GRE_SEQ ) ) {
tpi - > seq = * options ;
options + + ;
} else {
tpi - > seq = 0 ;
}
/* WCCP version 1 and 2 protocol decoding.
2016-05-11 13:48:32 +03:00
* - Change protocol to IPv4 / IPv6
2016-04-30 03:12:16 +03:00
* - When dealing with WCCPv2 , Skip extra 4 bytes in GRE header
*/
if ( greh - > flags = = 0 & & tpi - > proto = = htons ( ETH_P_WCCP ) ) {
2020-03-08 09:05:14 +03:00
u8 _val , * val ;
val = skb_header_pointer ( skb , nhs + hdr_len ,
sizeof ( _val ) , & _val ) ;
if ( ! val )
return - EINVAL ;
2016-05-11 13:48:32 +03:00
tpi - > proto = proto ;
2020-03-08 09:05:14 +03:00
if ( ( * val & 0xF0 ) ! = 0x40 )
2016-04-30 03:12:16 +03:00
hdr_len + = 4 ;
}
2016-06-19 07:52:05 +03:00
tpi - > hdr_len = hdr_len ;
2019-01-18 14:05:39 +03:00
/* ERSPAN ver 1 and 2 protocol sets GRE key field
* to 0 and sets the configured key in the
* inner erspan header field
*/
2020-12-27 02:44:53 +03:00
if ( ( greh - > protocol = = htons ( ETH_P_ERSPAN ) & & hdr_len ! = 4 ) | |
2019-01-18 14:05:39 +03:00
greh - > protocol = = htons ( ETH_P_ERSPAN2 ) ) {
struct erspan_base_hdr * ershdr ;
if ( ! pskb_may_pull ( skb , nhs + hdr_len + sizeof ( * ershdr ) ) )
return - EINVAL ;
2019-12-06 06:39:02 +03:00
ershdr = ( struct erspan_base_hdr * ) ( skb - > data + nhs + hdr_len ) ;
2019-01-18 14:05:39 +03:00
tpi - > key = cpu_to_be32 ( get_session_id ( ershdr ) ) ;
}
2016-05-03 16:00:21 +03:00
return hdr_len ;
2016-04-30 03:12:16 +03:00
}
EXPORT_SYMBOL ( gre_parse_header ) ;
2010-08-22 10:05:39 +04:00
static int gre_rcv ( struct sk_buff * skb )
{
const struct gre_protocol * proto ;
u8 ver ;
int ret ;
if ( ! pskb_may_pull ( skb , 12 ) )
goto drop ;
ver = skb - > data [ 1 ] & 0x7f ;
if ( ver > = GREPROTO_MAX )
goto drop ;
rcu_read_lock ( ) ;
proto = rcu_dereference ( gre_proto [ ver ] ) ;
if ( ! proto | | ! proto - > handler )
goto drop_unlock ;
ret = proto - > handler ( skb ) ;
rcu_read_unlock ( ) ;
return ret ;
drop_unlock :
rcu_read_unlock ( ) ;
drop :
kfree_skb ( skb ) ;
return NET_RX_DROP ;
}
2018-11-08 14:19:21 +03:00
static int gre_err ( struct sk_buff * skb , u32 info )
2010-08-22 10:05:39 +04:00
{
const struct gre_protocol * proto ;
2011-07-23 00:49:40 +04:00
const struct iphdr * iph = ( const struct iphdr * ) skb - > data ;
u8 ver = skb - > data [ ( iph - > ihl < < 2 ) + 1 ] & 0x7f ;
2018-11-08 14:19:21 +03:00
int err = 0 ;
2010-08-22 10:05:39 +04:00
if ( ver > = GREPROTO_MAX )
2018-11-08 14:19:21 +03:00
return - EINVAL ;
2010-08-22 10:05:39 +04:00
rcu_read_lock ( ) ;
proto = rcu_dereference ( gre_proto [ ver ] ) ;
2011-07-23 00:49:40 +04:00
if ( proto & & proto - > err_handler )
proto - > err_handler ( skb , info ) ;
2018-11-08 14:19:21 +03:00
else
err = - EPROTONOSUPPORT ;
2010-08-22 10:05:39 +04:00
rcu_read_unlock ( ) ;
2018-11-08 14:19:21 +03:00
return err ;
2010-08-22 10:05:39 +04:00
}
static const struct net_protocol net_gre_protocol = {
. handler = gre_rcv ,
. err_handler = gre_err ,
} ;
static int __init gre_init ( void )
{
2012-03-12 11:03:32 +04:00
pr_info ( " GRE over IPv4 demultiplexor driver \n " ) ;
2010-08-22 10:05:39 +04:00
if ( inet_add_protocol ( & net_gre_protocol , IPPROTO_GRE ) < 0 ) {
2012-03-12 11:03:32 +04:00
pr_err ( " can't add protocol \n " ) ;
2015-08-08 09:51:52 +03:00
return - EAGAIN ;
2013-06-18 04:49:38 +04:00
}
2010-08-22 10:05:39 +04:00
return 0 ;
}
static void __exit gre_exit ( void )
{
inet_del_protocol ( & net_gre_protocol , IPPROTO_GRE ) ;
}
module_init ( gre_init ) ;
module_exit ( gre_exit ) ;
MODULE_DESCRIPTION ( " GRE over IPv4 demultiplexer driver " ) ;
2024-02-13 17:54:04 +03:00
MODULE_AUTHOR ( " D. Kozlov <xeb@mail.ru> " ) ;
2010-08-22 10:05:39 +04:00
MODULE_LICENSE ( " GPL " ) ;