2019-06-01 11:08:37 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2017-02-15 11:40:00 +03:00
/*
* IPV4 GSO / GRO offload support
* Linux INET implementation
*
* Copyright ( C ) 2016 secunet Security Networks AG
* Author : Steffen Klassert < steffen . klassert @ secunet . com >
*
* ESP GRO support
*/
# include <linux/skbuff.h>
# include <linux/init.h>
# include <net/protocol.h>
# include <crypto/aead.h>
# include <crypto/authenc.h>
# include <linux/err.h>
# include <linux/module.h>
2021-11-15 20:05:51 +03:00
# include <net/gro.h>
2017-02-15 11:40:00 +03:00
# include <net/ip.h>
# include <net/xfrm.h>
# include <net/esp.h>
# include <linux/scatterlist.h>
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <net/udp.h>
2018-06-24 08:13:49 +03:00
static struct sk_buff * esp4_gro_receive ( struct list_head * head ,
struct sk_buff * skb )
2017-02-15 11:40:00 +03:00
{
int offset = skb_gro_offset ( skb ) ;
struct xfrm_offload * xo ;
struct xfrm_state * x ;
__be32 seq ;
__be32 spi ;
2018-01-05 10:35:47 +03:00
if ( ! pskb_pull ( skb , offset ) )
return NULL ;
2017-02-15 11:40:00 +03:00
2021-04-25 13:14:32 +03:00
if ( xfrm_parse_spi ( skb , IPPROTO_ESP , & spi , & seq ) ! = 0 )
2017-02-15 11:40:00 +03:00
goto out ;
2017-04-14 11:07:49 +03:00
xo = xfrm_offload ( skb ) ;
if ( ! xo | | ! ( xo - > flags & CRYPTO_DONE ) ) {
2018-12-18 19:15:18 +03:00
struct sec_path * sp = secpath_set ( skb ) ;
if ( ! sp )
2017-04-14 11:07:49 +03:00
goto out ;
2017-02-15 11:40:00 +03:00
2018-12-18 19:15:18 +03:00
if ( sp - > len = = XFRM_MAX_DEPTH )
2019-03-07 04:23:08 +03:00
goto out_reset ;
2017-02-15 11:40:00 +03:00
2017-04-14 11:07:49 +03:00
x = xfrm_state_lookup ( dev_net ( skb - > dev ) , skb - > mark ,
( xfrm_address_t * ) & ip_hdr ( skb ) - > daddr ,
spi , IPPROTO_ESP , AF_INET ) ;
if ( ! x )
2019-03-07 04:23:08 +03:00
goto out_reset ;
2017-02-15 11:40:00 +03:00
2020-01-15 14:11:29 +03:00
skb - > mark = xfrm_smark_get ( skb - > mark , x ) ;
2018-12-18 19:15:18 +03:00
sp - > xvec [ sp - > len + + ] = x ;
sp - > olen + + ;
2017-02-15 11:40:00 +03:00
2017-04-14 11:07:49 +03:00
xo = xfrm_offload ( skb ) ;
2020-04-10 12:08:24 +03:00
if ( ! xo )
2019-03-07 04:23:08 +03:00
goto out_reset ;
2017-02-15 11:40:00 +03:00
}
2017-04-14 11:07:49 +03:00
2017-02-15 11:40:00 +03:00
xo - > flags | = XFRM_GRO ;
XFRM_TUNNEL_SKB_CB ( skb ) - > tunnel . ip4 = NULL ;
XFRM_SPI_SKB_CB ( skb ) - > family = AF_INET ;
XFRM_SPI_SKB_CB ( skb ) - > daddroff = offsetof ( struct iphdr , daddr ) ;
XFRM_SPI_SKB_CB ( skb ) - > seq = seq ;
/* We don't need to handle errors from xfrm_input, it does all
* the error handling and frees the resources on error . */
xfrm_input ( skb , IPPROTO_ESP , spi , - 2 ) ;
return ERR_PTR ( - EINPROGRESS ) ;
2019-03-07 04:23:08 +03:00
out_reset :
secpath_reset ( skb ) ;
2017-02-15 11:40:00 +03:00
out :
skb_push ( skb , offset ) ;
NAPI_GRO_CB ( skb ) - > same_flow = 0 ;
NAPI_GRO_CB ( skb ) - > flush = 1 ;
return NULL ;
}
2017-04-14 11:06:50 +03:00
static void esp4_gso_encap ( struct xfrm_state * x , struct sk_buff * skb )
{
struct ip_esp_hdr * esph ;
struct iphdr * iph = ip_hdr ( skb ) ;
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
int proto = iph - > protocol ;
skb_push ( skb , - skb_network_offset ( skb ) ) ;
esph = ip_esp_hdr ( skb ) ;
* skb_mac_header ( skb ) = IPPROTO_ESP ;
esph - > spi = x - > id . spi ;
esph - > seq_no = htonl ( XFRM_SKB_CB ( skb ) - > seq . output . low ) ;
xo - > proto = proto ;
}
2019-03-29 23:16:27 +03:00
static struct sk_buff * xfrm4_tunnel_gso_segment ( struct xfrm_state * x ,
struct sk_buff * skb ,
netdev_features_t features )
{
2022-08-25 18:16:51 +03:00
__be16 type = x - > inner_mode . family = = AF_INET6 ? htons ( ETH_P_IPV6 )
: htons ( ETH_P_IP ) ;
return skb_eth_gso_segment ( skb , features , type ) ;
2019-03-29 23:16:27 +03:00
}
static struct sk_buff * xfrm4_transport_gso_segment ( struct xfrm_state * x ,
struct sk_buff * skb ,
netdev_features_t features )
{
const struct net_offload * ops ;
struct sk_buff * segs = ERR_PTR ( - EINVAL ) ;
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
skb - > transport_header + = x - > props . header_len ;
ops = rcu_dereference ( inet_offloads [ xo - > proto ] ) ;
if ( likely ( ops & & ops - > callbacks . gso_segment ) )
segs = ops - > callbacks . gso_segment ( skb , features ) ;
return segs ;
}
2020-03-26 12:02:29 +03:00
static struct sk_buff * xfrm4_beet_gso_segment ( struct xfrm_state * x ,
struct sk_buff * skb ,
netdev_features_t features )
{
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
struct sk_buff * segs = ERR_PTR ( - EINVAL ) ;
const struct net_offload * ops ;
2020-04-19 11:11:02 +03:00
u8 proto = xo - > proto ;
2020-03-26 12:02:29 +03:00
skb - > transport_header + = x - > props . header_len ;
2020-05-18 08:35:19 +03:00
if ( x - > sel . family ! = AF_INET6 ) {
if ( proto = = IPPROTO_BEETPH ) {
struct ip_beet_phdr * ph =
( struct ip_beet_phdr * ) skb - > data ;
skb - > transport_header + = ph - > hdrlen * 8 ;
proto = ph - > nexthdr ;
} else {
skb - > transport_header - = IPV4_BEET_PHMAXLEN ;
}
} else {
2020-04-19 11:11:02 +03:00
__be16 frag ;
skb - > transport_header + =
ipv6_skip_exthdr ( skb , 0 , & proto , & frag ) ;
if ( proto = = IPPROTO_TCP )
skb_shinfo ( skb ) - > gso_type | = SKB_GSO_TCPV4 ;
2020-03-26 12:02:29 +03:00
}
2022-03-07 15:11:40 +03:00
if ( proto = = IPPROTO_IPV6 )
skb_shinfo ( skb ) - > gso_type | = SKB_GSO_IPXIP4 ;
2020-03-26 12:02:29 +03:00
__skb_pull ( skb , skb_transport_offset ( skb ) ) ;
ops = rcu_dereference ( inet_offloads [ proto ] ) ;
if ( likely ( ops & & ops - > callbacks . gso_segment ) )
segs = ops - > callbacks . gso_segment ( skb , features ) ;
return segs ;
}
2019-03-29 23:16:27 +03:00
static struct sk_buff * xfrm4_outer_mode_gso_segment ( struct xfrm_state * x ,
struct sk_buff * skb ,
netdev_features_t features )
{
2019-03-29 23:16:32 +03:00
switch ( x - > outer_mode . encap ) {
2019-03-29 23:16:27 +03:00
case XFRM_MODE_TUNNEL :
return xfrm4_tunnel_gso_segment ( x , skb , features ) ;
case XFRM_MODE_TRANSPORT :
return xfrm4_transport_gso_segment ( x , skb , features ) ;
2020-03-26 12:02:29 +03:00
case XFRM_MODE_BEET :
return xfrm4_beet_gso_segment ( x , skb , features ) ;
2019-03-29 23:16:27 +03:00
}
return ERR_PTR ( - EOPNOTSUPP ) ;
}
2017-04-14 11:06:50 +03:00
static struct sk_buff * esp4_gso_segment ( struct sk_buff * skb ,
netdev_features_t features )
{
struct xfrm_state * x ;
struct ip_esp_hdr * esph ;
struct crypto_aead * aead ;
netdev_features_t esp_features = features ;
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
2018-12-18 19:15:20 +03:00
struct sec_path * sp ;
2017-04-14 11:06:50 +03:00
if ( ! xo )
2017-12-20 12:41:31 +03:00
return ERR_PTR ( - EINVAL ) ;
2017-04-14 11:06:50 +03:00
2018-01-19 17:29:18 +03:00
if ( ! ( skb_shinfo ( skb ) - > gso_type & SKB_GSO_ESP ) )
2018-01-23 21:49:06 +03:00
return ERR_PTR ( - EINVAL ) ;
2017-04-14 11:06:50 +03:00
2018-12-18 19:15:20 +03:00
sp = skb_sec_path ( skb ) ;
x = sp - > xvec [ sp - > len - 1 ] ;
2017-04-14 11:06:50 +03:00
aead = x - > data ;
esph = ip_esp_hdr ( skb ) ;
if ( esph - > spi ! = x - > id . spi )
2017-12-20 12:41:31 +03:00
return ERR_PTR ( - EINVAL ) ;
2017-04-14 11:06:50 +03:00
if ( ! pskb_may_pull ( skb , sizeof ( * esph ) + crypto_aead_ivsize ( aead ) ) )
2017-12-20 12:41:31 +03:00
return ERR_PTR ( - EINVAL ) ;
2017-04-14 11:06:50 +03:00
__skb_pull ( skb , sizeof ( * esph ) + crypto_aead_ivsize ( aead ) ) ;
skb - > encap_hdr_csum = 1 ;
2019-03-21 17:41:37 +03:00
if ( ( ! ( skb - > dev - > gso_partial_features & NETIF_F_HW_ESP ) & &
! ( features & NETIF_F_HW_ESP ) ) | | x - > xso . dev ! = skb - > dev )
2021-03-19 10:35:07 +03:00
esp_features = features & ~ ( NETIF_F_SG | NETIF_F_CSUM_MASK |
NETIF_F_SCTP_CRC ) ;
2019-03-21 17:41:37 +03:00
else if ( ! ( features & NETIF_F_HW_ESP_TX_CSUM ) & &
! ( skb - > dev - > gso_partial_features & NETIF_F_HW_ESP_TX_CSUM ) )
2021-03-19 10:35:07 +03:00
esp_features = features & ~ ( NETIF_F_CSUM_MASK |
NETIF_F_SCTP_CRC ) ;
2017-04-14 11:06:50 +03:00
2017-12-20 12:41:31 +03:00
xo - > flags | = XFRM_GSO_SEGMENT ;
2017-04-14 11:06:50 +03:00
2019-03-29 23:16:27 +03:00
return xfrm4_outer_mode_gso_segment ( x , skb , esp_features ) ;
2017-04-14 11:06:50 +03:00
}
2017-04-14 11:06:33 +03:00
static int esp_input_tail ( struct xfrm_state * x , struct sk_buff * skb )
{
struct crypto_aead * aead = x - > data ;
2017-08-01 12:49:04 +03:00
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
2017-04-14 11:06:33 +03:00
if ( ! pskb_may_pull ( skb , sizeof ( struct ip_esp_hdr ) + crypto_aead_ivsize ( aead ) ) )
return - EINVAL ;
2017-08-01 12:49:04 +03:00
if ( ! ( xo - > flags & CRYPTO_DONE ) )
skb - > ip_summed = CHECKSUM_NONE ;
2017-04-14 11:06:33 +03:00
return esp_input_done2 ( skb , 0 ) ;
}
static int esp_xmit ( struct xfrm_state * x , struct sk_buff * skb , netdev_features_t features )
{
int err ;
int alen ;
int blksize ;
struct xfrm_offload * xo ;
struct ip_esp_hdr * esph ;
struct crypto_aead * aead ;
struct esp_info esp ;
bool hw_offload = true ;
2017-12-20 12:41:31 +03:00
__u32 seq ;
2017-04-14 11:06:33 +03:00
esp . inplace = true ;
xo = xfrm_offload ( skb ) ;
if ( ! xo )
return - EINVAL ;
2019-03-21 17:41:37 +03:00
if ( ( ! ( features & NETIF_F_HW_ESP ) & &
! ( skb - > dev - > gso_partial_features & NETIF_F_HW_ESP ) ) | |
x - > xso . dev ! = skb - > dev ) {
2017-04-14 11:06:33 +03:00
xo - > flags | = CRYPTO_FALLBACK ;
hw_offload = false ;
}
esp . proto = xo - > proto ;
/* skb is pure payload to encrypt */
aead = x - > data ;
alen = crypto_aead_authsize ( aead ) ;
esp . tfclen = 0 ;
/* XXX: Add support for tfc padding here. */
blksize = ALIGN ( crypto_aead_blocksize ( aead ) , 4 ) ;
esp . clen = ALIGN ( skb - > len + 2 + esp . tfclen , blksize ) ;
esp . plen = esp . clen - skb - > len - esp . tfclen ;
esp . tailen = esp . tfclen + esp . plen + alen ;
esp . esph = ip_esp_hdr ( skb ) ;
2021-01-25 09:41:46 +03:00
if ( ! hw_offload | | ! skb_is_gso ( skb ) ) {
2017-04-14 11:06:33 +03:00
esp . nfrags = esp_output_head ( x , skb , & esp ) ;
if ( esp . nfrags < 0 )
return esp . nfrags ;
}
2017-12-20 12:41:31 +03:00
seq = xo - > seq . low ;
2017-04-14 11:06:33 +03:00
esph = esp . esph ;
esph - > spi = x - > id . spi ;
skb_push ( skb , - skb_network_offset ( skb ) ) ;
if ( xo - > flags & XFRM_GSO_SEGMENT ) {
2017-12-20 12:41:31 +03:00
esph - > seq_no = htonl ( seq ) ;
if ( ! skb_is_gso ( skb ) )
xo - > seq . low + + ;
else
xo - > seq . low + = skb_shinfo ( skb ) - > gso_segs ;
2017-04-14 11:06:33 +03:00
}
2022-10-17 09:34:47 +03:00
if ( xo - > seq . low < seq )
xo - > seq . hi + + ;
2017-12-20 12:41:31 +03:00
esp . seqno = cpu_to_be64 ( seq + ( ( u64 ) xo - > seq . hi < < 32 ) ) ;
ip_hdr ( skb ) - > tot_len = htons ( skb - > len ) ;
ip_send_check ( ip_hdr ( skb ) ) ;
2021-03-26 11:44:48 +03:00
if ( hw_offload ) {
if ( ! skb_ext_add ( skb , SKB_EXT_SEC_PATH ) )
return - ENOMEM ;
xo = xfrm_offload ( skb ) ;
if ( ! xo )
return - EINVAL ;
xo - > flags | = XFRM_XMIT ;
2017-04-14 11:06:33 +03:00
return 0 ;
2021-03-26 11:44:48 +03:00
}
2017-04-14 11:06:33 +03:00
err = esp_output_tail ( x , skb , & esp ) ;
2017-08-07 09:31:07 +03:00
if ( err )
2017-04-14 11:06:33 +03:00
return err ;
secpath_reset ( skb ) ;
return 0 ;
}
2017-02-15 11:40:00 +03:00
static const struct net_offload esp4_offload = {
. callbacks = {
. gro_receive = esp4_gro_receive ,
2017-04-14 11:06:50 +03:00
. gso_segment = esp4_gso_segment ,
2017-02-15 11:40:00 +03:00
} ,
} ;
2017-04-14 11:06:33 +03:00
static const struct xfrm_type_offload esp_type_offload = {
. owner = THIS_MODULE ,
. proto = IPPROTO_ESP ,
. input_tail = esp_input_tail ,
. xmit = esp_xmit ,
2017-04-14 11:06:50 +03:00
. encap = esp4_gso_encap ,
2017-04-14 11:06:33 +03:00
} ;
2017-02-15 11:40:00 +03:00
static int __init esp4_offload_init ( void )
{
2017-04-14 11:06:33 +03:00
if ( xfrm_register_type_offload ( & esp_type_offload , AF_INET ) < 0 ) {
pr_info ( " %s: can't add xfrm type offload \n " , __func__ ) ;
return - EAGAIN ;
}
2017-02-15 11:40:00 +03:00
return inet_add_offload ( & esp4_offload , IPPROTO_ESP ) ;
}
static void __exit esp4_offload_exit ( void )
{
2019-05-03 18:46:19 +03:00
xfrm_unregister_type_offload ( & esp_type_offload , AF_INET ) ;
2017-02-15 11:40:00 +03:00
inet_del_offload ( & esp4_offload , IPPROTO_ESP ) ;
}
module_init ( esp4_offload_init ) ;
module_exit ( esp4_offload_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Steffen Klassert <steffen.klassert@secunet.com> " ) ;
2017-08-01 12:49:08 +03:00
MODULE_ALIAS_XFRM_OFFLOAD_TYPE ( AF_INET , XFRM_PROTO_ESP ) ;
2020-06-20 05:08:25 +03:00
MODULE_DESCRIPTION ( " IPV4 GSO/GRO offload support " ) ;