2013-06-08 14:56:03 +04:00
/*
* IPV4 GSO / GRO offload support
* Linux INET implementation
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
* UDPv4 GSO support
*/
# include <linux/skbuff.h>
# include <net/udp.h>
# include <net/protocol.h>
2014-09-30 07:22:29 +04:00
static struct sk_buff * __skb_udp_tunnel_segment ( struct sk_buff * skb ,
netdev_features_t features ,
struct sk_buff * ( * gso_inner_segment ) ( struct sk_buff * skb ,
netdev_features_t features ) ,
2014-11-04 20:06:52 +03:00
__be16 new_protocol , bool is_ipv6 )
2014-07-14 06:49:56 +04:00
{
2016-02-06 02:28:20 +03:00
int tnl_hlen = skb_inner_mac_header ( skb ) - skb_transport_header ( skb ) ;
2016-09-19 13:58:47 +03:00
bool remcsum , need_csum , offload_csum , ufo , gso_partial ;
2014-07-14 06:49:56 +04:00
struct sk_buff * segs = ERR_PTR ( - EINVAL ) ;
2016-02-06 02:28:20 +03:00
struct udphdr * uh = udp_hdr ( skb ) ;
2014-07-14 06:49:56 +04:00
u16 mac_offset = skb - > mac_header ;
__be16 protocol = skb - > protocol ;
2016-02-06 02:28:20 +03:00
u16 mac_len = skb - > mac_len ;
2014-07-14 06:49:56 +04:00
int udp_offset , outer_hlen ;
2016-03-12 01:05:47 +03:00
__wsum partial ;
2014-07-14 06:49:56 +04:00
if ( unlikely ( ! pskb_may_pull ( skb , tnl_hlen ) ) )
goto out ;
2016-03-12 01:05:47 +03:00
/* Adjust partial header checksum to negate old length.
* We cannot rely on the value contained in uh - > len as it is
* possible that the actual value exceeds the boundaries of the
* 16 bit length field due to the header being added outside of an
* IP or IPv6 frame that was already limited to 64 K - 1.
*/
2016-04-11 04:45:03 +03:00
if ( skb_shinfo ( skb ) - > gso_type & SKB_GSO_PARTIAL )
partial = ( __force __wsum ) uh - > len ;
else
partial = ( __force __wsum ) htonl ( skb - > len ) ;
partial = csum_sub ( csum_unfold ( uh - > check ) , partial ) ;
2016-02-06 02:28:20 +03:00
/* setup inner skb. */
2014-07-14 06:49:56 +04:00
skb - > encapsulation = 0 ;
2016-03-23 02:18:07 +03:00
SKB_GSO_CB ( skb ) - > encap_level = 0 ;
2014-07-14 06:49:56 +04:00
__skb_pull ( skb , tnl_hlen ) ;
skb_reset_mac_header ( skb ) ;
skb_set_network_header ( skb , skb_inner_network_offset ( skb ) ) ;
skb - > mac_len = skb_inner_network_offset ( skb ) ;
2014-09-30 07:22:29 +04:00
skb - > protocol = new_protocol ;
2016-02-06 02:28:14 +03:00
need_csum = ! ! ( skb_shinfo ( skb ) - > gso_type & SKB_GSO_UDP_TUNNEL_CSUM ) ;
2014-11-04 20:06:52 +03:00
skb - > encap_hdr_csum = need_csum ;
2016-02-06 02:28:14 +03:00
remcsum = ! ! ( skb_shinfo ( skb ) - > gso_type & SKB_GSO_TUNNEL_REMCSUM ) ;
2014-11-04 20:06:54 +03:00
skb - > remcsum_offload = remcsum ;
2014-07-14 06:49:56 +04:00
2016-02-25 03:46:21 +03:00
ufo = ! ! ( skb_shinfo ( skb ) - > gso_type & SKB_GSO_UDP ) ;
2014-11-04 20:06:52 +03:00
/* Try to offload checksum if possible */
offload_csum = ! ! ( need_csum & &
2016-02-06 02:28:14 +03:00
( skb - > dev - > features &
( is_ipv6 ? ( NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM ) :
( NETIF_F_HW_CSUM | NETIF_F_IP_CSUM ) ) ) ) ;
2014-07-14 06:49:56 +04:00
2016-02-06 02:27:31 +03:00
features & = skb - > dev - > hw_enc_features ;
2016-02-06 02:27:43 +03:00
/* The only checksum offload we care about from here on out is the
* outer one so strip the existing checksum feature flags and
* instead set the flag based on our outer checksum offload value .
*/
2016-02-25 03:46:21 +03:00
if ( remcsum | | ufo ) {
2016-02-06 02:27:43 +03:00
features & = ~ NETIF_F_CSUM_MASK ;
2016-02-25 03:46:21 +03:00
if ( ! need_csum | | offload_csum )
2016-02-06 02:27:43 +03:00
features | = NETIF_F_HW_CSUM ;
}
2014-07-14 06:49:56 +04:00
/* segment inner packet. */
2016-02-06 02:27:31 +03:00
segs = gso_inner_segment ( skb , features ) ;
2014-07-27 11:08:38 +04:00
if ( IS_ERR_OR_NULL ( segs ) ) {
2014-07-14 06:49:56 +04:00
skb_gso_error_unwind ( skb , protocol , tnl_hlen , mac_offset ,
mac_len ) ;
goto out ;
}
2016-09-19 13:58:47 +03:00
gso_partial = ! ! ( skb_shinfo ( segs ) - > gso_type & SKB_GSO_PARTIAL ) ;
2014-07-14 06:49:56 +04:00
outer_hlen = skb_tnl_header_len ( skb ) ;
udp_offset = outer_hlen - tnl_hlen ;
skb = segs ;
do {
2016-04-11 04:45:03 +03:00
unsigned int len ;
2014-11-04 20:06:52 +03:00
2016-02-06 02:28:14 +03:00
if ( remcsum )
2014-11-04 20:06:52 +03:00
skb - > ip_summed = CHECKSUM_NONE ;
2016-02-06 02:28:14 +03:00
/* Set up inner headers if we are offloading inner checksum */
if ( skb - > ip_summed = = CHECKSUM_PARTIAL ) {
2014-11-04 20:06:52 +03:00
skb_reset_inner_headers ( skb ) ;
skb - > encapsulation = 1 ;
}
2014-07-14 06:49:56 +04:00
skb - > mac_len = mac_len ;
2014-11-04 20:06:52 +03:00
skb - > protocol = protocol ;
2014-07-14 06:49:56 +04:00
2016-02-06 02:28:20 +03:00
__skb_push ( skb , outer_hlen ) ;
2014-07-14 06:49:56 +04:00
skb_reset_mac_header ( skb ) ;
skb_set_network_header ( skb , mac_len ) ;
skb_set_transport_header ( skb , udp_offset ) ;
2016-04-11 04:45:03 +03:00
len = skb - > len - udp_offset ;
2014-07-14 06:49:56 +04:00
uh = udp_hdr ( skb ) ;
2016-04-11 04:45:03 +03:00
/* If we are only performing partial GSO the inner header
* will be using a length value equal to only one MSS sized
* segment instead of the entire frame .
*/
2016-09-19 13:58:47 +03:00
if ( gso_partial ) {
2016-04-11 04:45:03 +03:00
uh - > len = htons ( skb_shinfo ( skb ) - > gso_size +
SKB_GSO_CB ( skb ) - > data_offset +
skb - > head - ( unsigned char * ) uh ) ;
} else {
uh - > len = htons ( len ) ;
}
2014-07-14 06:49:56 +04:00
2014-11-04 20:06:52 +03:00
if ( ! need_csum )
continue ;
2016-04-11 04:45:03 +03:00
uh - > check = ~ csum_fold ( csum_add ( partial ,
( __force __wsum ) htonl ( len ) ) ) ;
2014-07-14 06:49:56 +04:00
2016-02-06 02:28:14 +03:00
if ( skb - > encapsulation | | ! offload_csum ) {
uh - > check = gso_make_checksum ( skb , ~ uh - > check ) ;
2014-07-14 06:49:56 +04:00
if ( uh - > check = = 0 )
uh - > check = CSUM_MANGLED_0 ;
2016-02-06 02:28:14 +03:00
} else {
skb - > ip_summed = CHECKSUM_PARTIAL ;
skb - > csum_start = skb_transport_header ( skb ) - skb - > head ;
skb - > csum_offset = offsetof ( struct udphdr , check ) ;
2014-07-14 06:49:56 +04:00
}
} while ( ( skb = skb - > next ) ) ;
out :
return segs ;
}
2014-09-30 07:22:29 +04:00
struct sk_buff * skb_udp_tunnel_segment ( struct sk_buff * skb ,
netdev_features_t features ,
bool is_ipv6 )
{
__be16 protocol = skb - > protocol ;
const struct net_offload * * offloads ;
const struct net_offload * ops ;
struct sk_buff * segs = ERR_PTR ( - EINVAL ) ;
struct sk_buff * ( * gso_inner_segment ) ( struct sk_buff * skb ,
netdev_features_t features ) ;
rcu_read_lock ( ) ;
switch ( skb - > inner_protocol_type ) {
case ENCAP_TYPE_ETHER :
protocol = skb - > inner_protocol ;
gso_inner_segment = skb_mac_gso_segment ;
break ;
case ENCAP_TYPE_IPPROTO :
offloads = is_ipv6 ? inet6_offloads : inet_offloads ;
ops = rcu_dereference ( offloads [ skb - > inner_ipproto ] ) ;
if ( ! ops | | ! ops - > callbacks . gso_segment )
goto out_unlock ;
gso_inner_segment = ops - > callbacks . gso_segment ;
break ;
default :
goto out_unlock ;
}
segs = __skb_udp_tunnel_segment ( skb , features , gso_inner_segment ,
2014-11-04 20:06:52 +03:00
protocol , is_ipv6 ) ;
2014-09-30 07:22:29 +04:00
out_unlock :
rcu_read_unlock ( ) ;
return segs ;
}
2016-04-05 18:22:51 +03:00
EXPORT_SYMBOL ( skb_udp_tunnel_segment ) ;
2014-09-30 07:22:29 +04:00
2013-06-08 14:56:03 +04:00
static struct sk_buff * udp4_ufo_fragment ( struct sk_buff * skb ,
netdev_features_t features )
{
struct sk_buff * segs = ERR_PTR ( - EINVAL ) ;
unsigned int mss ;
2013-12-27 01:10:22 +04:00
__wsum csum ;
2014-09-21 01:52:29 +04:00
struct udphdr * uh ;
struct iphdr * iph ;
2013-12-27 01:10:22 +04:00
if ( skb - > encapsulation & &
2014-06-05 04:20:16 +04:00
( skb_shinfo ( skb ) - > gso_type &
( SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM ) ) ) {
2014-09-30 07:22:29 +04:00
segs = skb_udp_tunnel_segment ( skb , features , false ) ;
2013-12-27 01:10:22 +04:00
goto out ;
}
2013-06-08 14:56:03 +04:00
2014-09-21 01:52:29 +04:00
if ( ! pskb_may_pull ( skb , sizeof ( struct udphdr ) ) )
goto out ;
2013-06-08 14:56:03 +04:00
mss = skb_shinfo ( skb ) - > gso_size ;
if ( unlikely ( skb - > len < = mss ) )
goto out ;
if ( skb_gso_ok ( skb , features | NETIF_F_GSO_ROBUST ) ) {
/* Packet is from an untrusted source, reset gso_segs. */
skb_shinfo ( skb ) - > gso_segs = DIV_ROUND_UP ( skb - > len , mss ) ;
segs = NULL ;
goto out ;
}
2013-12-27 01:10:22 +04:00
/* Do software UFO. Complete and fill in the UDP checksum as
* HW cannot do checksum of UDP packets sent as multiple
* IP fragments .
*/
2014-09-21 01:52:29 +04:00
uh = udp_hdr ( skb ) ;
iph = ip_hdr ( skb ) ;
uh - > check = 0 ;
csum = skb_checksum ( skb , 0 , skb - > len , 0 ) ;
uh - > check = udp_v4_check ( skb - > len , iph - > saddr , iph - > daddr , csum ) ;
if ( uh - > check = = 0 )
uh - > check = CSUM_MANGLED_0 ;
2013-12-27 01:10:22 +04:00
skb - > ip_summed = CHECKSUM_NONE ;
2016-02-25 03:46:21 +03:00
/* If there is no outer header we can fake a checksum offload
* due to the fact that we have already done the checksum in
* software prior to segmenting the frame .
*/
if ( ! skb - > encap_hdr_csum )
features | = NETIF_F_HW_CSUM ;
2013-06-08 14:56:03 +04:00
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment ( )
*/
2013-12-27 01:10:22 +04:00
segs = skb_segment ( skb , features ) ;
2013-06-08 14:56:03 +04:00
out :
return segs ;
}
2014-08-23 00:34:44 +04:00
struct sk_buff * * udp_gro_receive ( struct sk_buff * * head , struct sk_buff * skb ,
2016-04-05 18:22:51 +03:00
struct udphdr * uh , udp_lookup_t lookup )
2014-01-20 15:59:19 +04:00
{
struct sk_buff * p , * * pp = NULL ;
2014-08-23 00:34:44 +04:00
struct udphdr * uh2 ;
unsigned int off = skb_gro_offset ( skb ) ;
2014-01-20 15:59:19 +04:00
int flush = 1 ;
2016-04-05 18:22:51 +03:00
struct sock * sk ;
2014-01-20 15:59:19 +04:00
2016-03-19 19:32:01 +03:00
if ( NAPI_GRO_CB ( skb ) - > encap_mark | |
2014-08-28 08:26:56 +04:00
( skb - > ip_summed ! = CHECKSUM_PARTIAL & &
NAPI_GRO_CB ( skb ) - > csum_cnt = = 0 & &
! NAPI_GRO_CB ( skb ) - > csum_valid ) )
2014-01-20 15:59:19 +04:00
goto out ;
2016-03-19 19:32:01 +03:00
/* mark that this skb passed once through the tunnel gro layer */
NAPI_GRO_CB ( skb ) - > encap_mark = 1 ;
2014-01-20 15:59:19 +04:00
rcu_read_lock ( ) ;
2016-04-05 18:22:51 +03:00
sk = ( * lookup ) ( skb , uh - > source , uh - > dest ) ;
if ( sk & & udp_sk ( sk ) - > gro_receive )
goto unflush ;
2014-01-20 15:59:19 +04:00
goto out_unlock ;
unflush :
flush = 0 ;
for ( p = * head ; p ; p = p - > next ) {
if ( ! NAPI_GRO_CB ( p ) - > same_flow )
continue ;
uh2 = ( struct udphdr * ) ( p - > data + off ) ;
2014-08-23 00:34:44 +04:00
/* Match ports and either checksums are either both zero
* or nonzero .
*/
if ( ( * ( u32 * ) & uh - > source ! = * ( u32 * ) & uh2 - > source ) | |
( ! uh - > check ^ ! uh2 - > check ) ) {
2014-01-20 15:59:19 +04:00
NAPI_GRO_CB ( p ) - > same_flow = 0 ;
continue ;
}
}
skb_gro_pull ( skb , sizeof ( struct udphdr ) ) ; /* pull encapsulating udp header */
2014-06-11 05:54:26 +04:00
skb_gro_postpull_rcsum ( skb , uh , sizeof ( struct udphdr ) ) ;
2016-10-20 16:58:02 +03:00
pp = call_gro_receive_sk ( udp_sk ( sk ) - > gro_receive , sk , head , skb ) ;
2014-01-20 15:59:19 +04:00
out_unlock :
rcu_read_unlock ( ) ;
out :
NAPI_GRO_CB ( skb ) - > flush | = flush ;
return pp ;
}
2016-04-05 18:22:51 +03:00
EXPORT_SYMBOL ( udp_gro_receive ) ;
2014-01-20 15:59:19 +04:00
2014-08-23 00:34:44 +04:00
static struct sk_buff * * udp4_gro_receive ( struct sk_buff * * head ,
struct sk_buff * skb )
{
struct udphdr * uh = udp_gro_udphdr ( skb ) ;
2014-09-01 02:12:43 +04:00
if ( unlikely ( ! uh ) )
goto flush ;
2014-08-23 00:34:44 +04:00
2014-09-01 02:12:43 +04:00
/* Don't bother verifying checksum if we're going to flush anyway. */
2014-09-11 06:23:18 +04:00
if ( NAPI_GRO_CB ( skb ) - > flush )
2014-09-01 02:12:43 +04:00
goto skip ;
if ( skb_gro_checksum_validate_zero_check ( skb , IPPROTO_UDP , uh - > check ,
inet_gro_compute_pseudo ) )
goto flush ;
else if ( uh - > check )
skb_gro_checksum_try_convert ( skb , IPPROTO_UDP , uh - > check ,
inet_gro_compute_pseudo ) ;
skip :
2014-10-04 02:48:08 +04:00
NAPI_GRO_CB ( skb ) - > is_ipv6 = 0 ;
2016-04-05 18:22:51 +03:00
return udp_gro_receive ( head , skb , uh , udp4_lib_lookup_skb ) ;
2014-09-01 02:12:43 +04:00
flush :
NAPI_GRO_CB ( skb ) - > flush = 1 ;
return NULL ;
2014-08-23 00:34:44 +04:00
}
2016-04-05 18:22:51 +03:00
int udp_gro_complete ( struct sk_buff * skb , int nhoff ,
udp_lookup_t lookup )
2014-01-20 15:59:19 +04:00
{
__be16 newlen = htons ( skb - > len - nhoff ) ;
struct udphdr * uh = ( struct udphdr * ) ( skb - > data + nhoff ) ;
int err = - ENOSYS ;
2016-04-05 18:22:51 +03:00
struct sock * sk ;
2014-01-20 15:59:19 +04:00
uh - > len = newlen ;
2016-05-04 02:10:21 +03:00
/* Set encapsulation before calling into inner gro_complete() functions
* to make them set up the inner offsets .
*/
skb - > encapsulation = 1 ;
2014-01-20 15:59:19 +04:00
rcu_read_lock ( ) ;
2016-04-05 18:22:51 +03:00
sk = ( * lookup ) ( skb , uh - > source , uh - > dest ) ;
if ( sk & & udp_sk ( sk ) - > gro_complete )
err = udp_sk ( sk ) - > gro_complete ( sk , skb ,
nhoff + sizeof ( struct udphdr ) ) ;
2014-01-20 15:59:19 +04:00
rcu_read_unlock ( ) ;
2015-02-11 03:30:29 +03:00
if ( skb - > remcsum_offload )
skb_shinfo ( skb ) - > gso_type | = SKB_GSO_TUNNEL_REMCSUM ;
2014-01-20 15:59:19 +04:00
return err ;
}
2016-04-05 18:22:51 +03:00
EXPORT_SYMBOL ( udp_gro_complete ) ;
2014-01-20 15:59:19 +04:00
2014-09-09 19:29:12 +04:00
static int udp4_gro_complete ( struct sk_buff * skb , int nhoff )
2014-08-23 00:34:44 +04:00
{
const struct iphdr * iph = ip_hdr ( skb ) ;
struct udphdr * uh = ( struct udphdr * ) ( skb - > data + nhoff ) ;
2015-02-11 03:30:29 +03:00
if ( uh - > check ) {
skb_shinfo ( skb ) - > gso_type | = SKB_GSO_UDP_TUNNEL_CSUM ;
2014-08-23 00:34:44 +04:00
uh - > check = ~ udp_v4_check ( skb - > len - nhoff , iph - > saddr ,
iph - > daddr , 0 ) ;
2015-02-11 03:30:29 +03:00
} else {
skb_shinfo ( skb ) - > gso_type | = SKB_GSO_UDP_TUNNEL ;
}
2014-08-23 00:34:44 +04:00
2016-04-05 18:22:51 +03:00
return udp_gro_complete ( skb , nhoff , udp4_lib_lookup_skb ) ;
2014-08-23 00:34:44 +04:00
}
2013-06-08 14:56:03 +04:00
static const struct net_offload udpv4_offload = {
. callbacks = {
. gso_segment = udp4_ufo_fragment ,
2014-08-23 00:34:44 +04:00
. gro_receive = udp4_gro_receive ,
. gro_complete = udp4_gro_complete ,
2013-06-08 14:56:03 +04:00
} ,
} ;
int __init udpv4_offload_init ( void )
{
return inet_add_offload ( & udpv4_offload , IPPROTO_UDP ) ;
}