2013-06-08 14:56:03 +04:00
/*
* IPV4 GSO / GRO offload support
* Linux INET implementation
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
* UDPv4 GSO support
*/
# include <linux/skbuff.h>
# include <net/udp.h>
# include <net/protocol.h>
2014-01-20 15:59:19 +04:00
static DEFINE_SPINLOCK ( udp_offload_lock ) ;
2014-01-22 17:23:29 +04:00
static struct udp_offload_priv __rcu * udp_offload_base __read_mostly ;
2014-01-20 15:59:19 +04:00
2014-02-02 17:42:10 +04:00
# define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
2014-01-20 15:59:19 +04:00
struct udp_offload_priv {
struct udp_offload * offload ;
struct rcu_head rcu ;
struct udp_offload_priv __rcu * next ;
} ;
2014-07-14 06:49:56 +04:00
struct sk_buff * skb_udp_tunnel_segment ( struct sk_buff * skb ,
netdev_features_t features )
{
struct sk_buff * segs = ERR_PTR ( - EINVAL ) ;
u16 mac_offset = skb - > mac_header ;
int mac_len = skb - > mac_len ;
int tnl_hlen = skb_inner_mac_header ( skb ) - skb_transport_header ( skb ) ;
__be16 protocol = skb - > protocol ;
netdev_features_t enc_features ;
int udp_offset , outer_hlen ;
unsigned int oldlen ;
bool need_csum ;
oldlen = ( u16 ) ~ skb - > len ;
if ( unlikely ( ! pskb_may_pull ( skb , tnl_hlen ) ) )
goto out ;
skb - > encapsulation = 0 ;
__skb_pull ( skb , tnl_hlen ) ;
skb_reset_mac_header ( skb ) ;
skb_set_network_header ( skb , skb_inner_network_offset ( skb ) ) ;
skb - > mac_len = skb_inner_network_offset ( skb ) ;
skb - > protocol = htons ( ETH_P_TEB ) ;
need_csum = ! ! ( skb_shinfo ( skb ) - > gso_type & SKB_GSO_UDP_TUNNEL_CSUM ) ;
if ( need_csum )
skb - > encap_hdr_csum = 1 ;
/* segment inner packet. */
enc_features = skb - > dev - > hw_enc_features & netif_skb_features ( skb ) ;
segs = skb_mac_gso_segment ( skb , enc_features ) ;
2014-07-27 11:08:38 +04:00
if ( IS_ERR_OR_NULL ( segs ) ) {
2014-07-14 06:49:56 +04:00
skb_gso_error_unwind ( skb , protocol , tnl_hlen , mac_offset ,
mac_len ) ;
goto out ;
}
outer_hlen = skb_tnl_header_len ( skb ) ;
udp_offset = outer_hlen - tnl_hlen ;
skb = segs ;
do {
struct udphdr * uh ;
int len ;
skb_reset_inner_headers ( skb ) ;
skb - > encapsulation = 1 ;
skb - > mac_len = mac_len ;
skb_push ( skb , outer_hlen ) ;
skb_reset_mac_header ( skb ) ;
skb_set_network_header ( skb , mac_len ) ;
skb_set_transport_header ( skb , udp_offset ) ;
len = skb - > len - udp_offset ;
uh = udp_hdr ( skb ) ;
uh - > len = htons ( len ) ;
if ( need_csum ) {
__be32 delta = htonl ( oldlen + len ) ;
uh - > check = ~ csum_fold ( ( __force __wsum )
( ( __force u32 ) uh - > check +
( __force u32 ) delta ) ) ;
uh - > check = gso_make_checksum ( skb , ~ uh - > check ) ;
if ( uh - > check = = 0 )
uh - > check = CSUM_MANGLED_0 ;
}
skb - > protocol = protocol ;
} while ( ( skb = skb - > next ) ) ;
out :
return segs ;
}
2013-06-08 14:56:03 +04:00
static struct sk_buff * udp4_ufo_fragment ( struct sk_buff * skb ,
netdev_features_t features )
{
struct sk_buff * segs = ERR_PTR ( - EINVAL ) ;
unsigned int mss ;
2013-12-27 01:10:22 +04:00
__wsum csum ;
2014-09-21 01:52:29 +04:00
struct udphdr * uh ;
struct iphdr * iph ;
2013-12-27 01:10:22 +04:00
if ( skb - > encapsulation & &
2014-06-05 04:20:16 +04:00
( skb_shinfo ( skb ) - > gso_type &
( SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM ) ) ) {
2013-12-27 01:10:22 +04:00
segs = skb_udp_tunnel_segment ( skb , features ) ;
goto out ;
}
2013-06-08 14:56:03 +04:00
2014-09-21 01:52:29 +04:00
if ( ! pskb_may_pull ( skb , sizeof ( struct udphdr ) ) )
goto out ;
2013-06-08 14:56:03 +04:00
mss = skb_shinfo ( skb ) - > gso_size ;
if ( unlikely ( skb - > len < = mss ) )
goto out ;
if ( skb_gso_ok ( skb , features | NETIF_F_GSO_ROBUST ) ) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo ( skb ) - > gso_type ;
if ( unlikely ( type & ~ ( SKB_GSO_UDP | SKB_GSO_DODGY |
SKB_GSO_UDP_TUNNEL |
2014-06-05 04:20:16 +04:00
SKB_GSO_UDP_TUNNEL_CSUM |
2013-10-19 22:42:57 +04:00
SKB_GSO_IPIP |
2014-06-05 04:20:23 +04:00
SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
SKB_GSO_MPLS ) | |
2013-06-08 14:56:03 +04:00
! ( type & ( SKB_GSO_UDP ) ) ) )
goto out ;
skb_shinfo ( skb ) - > gso_segs = DIV_ROUND_UP ( skb - > len , mss ) ;
segs = NULL ;
goto out ;
}
2013-12-27 01:10:22 +04:00
/* Do software UFO. Complete and fill in the UDP checksum as
* HW cannot do checksum of UDP packets sent as multiple
* IP fragments .
*/
2014-09-21 01:52:29 +04:00
uh = udp_hdr ( skb ) ;
iph = ip_hdr ( skb ) ;
uh - > check = 0 ;
csum = skb_checksum ( skb , 0 , skb - > len , 0 ) ;
uh - > check = udp_v4_check ( skb - > len , iph - > saddr , iph - > daddr , csum ) ;
if ( uh - > check = = 0 )
uh - > check = CSUM_MANGLED_0 ;
2013-12-27 01:10:22 +04:00
skb - > ip_summed = CHECKSUM_NONE ;
2013-06-08 14:56:03 +04:00
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment ( )
*/
2013-12-27 01:10:22 +04:00
segs = skb_segment ( skb , features ) ;
2013-06-08 14:56:03 +04:00
out :
return segs ;
}
2014-01-20 15:59:19 +04:00
int udp_add_offload ( struct udp_offload * uo )
{
2014-01-29 20:08:59 +04:00
struct udp_offload_priv * new_offload = kzalloc ( sizeof ( * new_offload ) , GFP_ATOMIC ) ;
2014-01-20 15:59:19 +04:00
if ( ! new_offload )
return - ENOMEM ;
new_offload - > offload = uo ;
spin_lock ( & udp_offload_lock ) ;
2014-02-02 17:42:10 +04:00
new_offload - > next = udp_offload_base ;
rcu_assign_pointer ( udp_offload_base , new_offload ) ;
2014-01-20 15:59:19 +04:00
spin_unlock ( & udp_offload_lock ) ;
return 0 ;
}
EXPORT_SYMBOL ( udp_add_offload ) ;
static void udp_offload_free_routine ( struct rcu_head * head )
{
struct udp_offload_priv * ou_priv = container_of ( head , struct udp_offload_priv , rcu ) ;
kfree ( ou_priv ) ;
}
void udp_del_offload ( struct udp_offload * uo )
{
struct udp_offload_priv __rcu * * head = & udp_offload_base ;
struct udp_offload_priv * uo_priv ;
spin_lock ( & udp_offload_lock ) ;
2014-02-02 17:42:10 +04:00
uo_priv = udp_deref_protected ( * head ) ;
2014-01-20 15:59:19 +04:00
for ( ; uo_priv ! = NULL ;
2014-02-02 17:42:10 +04:00
uo_priv = udp_deref_protected ( * head ) ) {
2014-01-20 15:59:19 +04:00
if ( uo_priv - > offload = = uo ) {
2014-02-02 17:42:10 +04:00
rcu_assign_pointer ( * head ,
udp_deref_protected ( uo_priv - > next ) ) ;
2014-01-20 15:59:19 +04:00
goto unlock ;
}
head = & uo_priv - > next ;
}
2014-01-22 17:23:29 +04:00
pr_warn ( " udp_del_offload: didn't find offload for port %d \n " , ntohs ( uo - > port ) ) ;
2014-01-20 15:59:19 +04:00
unlock :
spin_unlock ( & udp_offload_lock ) ;
if ( uo_priv ! = NULL )
call_rcu ( & uo_priv - > rcu , udp_offload_free_routine ) ;
}
EXPORT_SYMBOL ( udp_del_offload ) ;
2014-08-23 00:34:44 +04:00
struct sk_buff * * udp_gro_receive ( struct sk_buff * * head , struct sk_buff * skb ,
struct udphdr * uh )
2014-01-20 15:59:19 +04:00
{
struct udp_offload_priv * uo_priv ;
struct sk_buff * p , * * pp = NULL ;
2014-08-23 00:34:44 +04:00
struct udphdr * uh2 ;
unsigned int off = skb_gro_offset ( skb ) ;
2014-01-20 15:59:19 +04:00
int flush = 1 ;
if ( NAPI_GRO_CB ( skb ) - > udp_mark | |
2014-08-28 08:26:56 +04:00
( skb - > ip_summed ! = CHECKSUM_PARTIAL & &
NAPI_GRO_CB ( skb ) - > csum_cnt = = 0 & &
! NAPI_GRO_CB ( skb ) - > csum_valid ) )
2014-01-20 15:59:19 +04:00
goto out ;
/* mark that this skb passed once through the udp gro layer */
NAPI_GRO_CB ( skb ) - > udp_mark = 1 ;
rcu_read_lock ( ) ;
uo_priv = rcu_dereference ( udp_offload_base ) ;
for ( ; uo_priv ! = NULL ; uo_priv = rcu_dereference ( uo_priv - > next ) ) {
if ( uo_priv - > offload - > port = = uh - > dest & &
uo_priv - > offload - > callbacks . gro_receive )
goto unflush ;
}
goto out_unlock ;
unflush :
flush = 0 ;
for ( p = * head ; p ; p = p - > next ) {
if ( ! NAPI_GRO_CB ( p ) - > same_flow )
continue ;
uh2 = ( struct udphdr * ) ( p - > data + off ) ;
2014-08-23 00:34:44 +04:00
/* Match ports and either checksums are either both zero
* or nonzero .
*/
if ( ( * ( u32 * ) & uh - > source ! = * ( u32 * ) & uh2 - > source ) | |
( ! uh - > check ^ ! uh2 - > check ) ) {
2014-01-20 15:59:19 +04:00
NAPI_GRO_CB ( p ) - > same_flow = 0 ;
continue ;
}
}
skb_gro_pull ( skb , sizeof ( struct udphdr ) ) ; /* pull encapsulating udp header */
2014-06-11 05:54:26 +04:00
skb_gro_postpull_rcsum ( skb , uh , sizeof ( struct udphdr ) ) ;
2014-09-17 23:25:57 +04:00
NAPI_GRO_CB ( skb ) - > proto = uo_priv - > offload - > ipproto ;
2014-01-20 15:59:19 +04:00
pp = uo_priv - > offload - > callbacks . gro_receive ( head , skb ) ;
out_unlock :
rcu_read_unlock ( ) ;
out :
NAPI_GRO_CB ( skb ) - > flush | = flush ;
return pp ;
}
2014-08-23 00:34:44 +04:00
static struct sk_buff * * udp4_gro_receive ( struct sk_buff * * head ,
struct sk_buff * skb )
{
struct udphdr * uh = udp_gro_udphdr ( skb ) ;
2014-09-01 02:12:43 +04:00
if ( unlikely ( ! uh ) )
goto flush ;
2014-08-23 00:34:44 +04:00
2014-09-01 02:12:43 +04:00
/* Don't bother verifying checksum if we're going to flush anyway. */
2014-09-11 06:23:18 +04:00
if ( NAPI_GRO_CB ( skb ) - > flush )
2014-09-01 02:12:43 +04:00
goto skip ;
if ( skb_gro_checksum_validate_zero_check ( skb , IPPROTO_UDP , uh - > check ,
inet_gro_compute_pseudo ) )
goto flush ;
else if ( uh - > check )
skb_gro_checksum_try_convert ( skb , IPPROTO_UDP , uh - > check ,
inet_gro_compute_pseudo ) ;
skip :
2014-08-23 00:34:44 +04:00
return udp_gro_receive ( head , skb , uh ) ;
2014-09-01 02:12:43 +04:00
flush :
NAPI_GRO_CB ( skb ) - > flush = 1 ;
return NULL ;
2014-08-23 00:34:44 +04:00
}
int udp_gro_complete ( struct sk_buff * skb , int nhoff )
2014-01-20 15:59:19 +04:00
{
struct udp_offload_priv * uo_priv ;
__be16 newlen = htons ( skb - > len - nhoff ) ;
struct udphdr * uh = ( struct udphdr * ) ( skb - > data + nhoff ) ;
int err = - ENOSYS ;
uh - > len = newlen ;
rcu_read_lock ( ) ;
uo_priv = rcu_dereference ( udp_offload_base ) ;
for ( ; uo_priv ! = NULL ; uo_priv = rcu_dereference ( uo_priv - > next ) ) {
if ( uo_priv - > offload - > port = = uh - > dest & &
uo_priv - > offload - > callbacks . gro_complete )
break ;
}
2014-09-17 23:25:57 +04:00
if ( uo_priv ! = NULL ) {
NAPI_GRO_CB ( skb ) - > proto = uo_priv - > offload - > ipproto ;
2014-01-20 15:59:19 +04:00
err = uo_priv - > offload - > callbacks . gro_complete ( skb , nhoff + sizeof ( struct udphdr ) ) ;
2014-09-17 23:25:57 +04:00
}
2014-01-20 15:59:19 +04:00
rcu_read_unlock ( ) ;
return err ;
}
2014-09-09 19:29:12 +04:00
static int udp4_gro_complete ( struct sk_buff * skb , int nhoff )
2014-08-23 00:34:44 +04:00
{
const struct iphdr * iph = ip_hdr ( skb ) ;
struct udphdr * uh = ( struct udphdr * ) ( skb - > data + nhoff ) ;
if ( uh - > check )
uh - > check = ~ udp_v4_check ( skb - > len - nhoff , iph - > saddr ,
iph - > daddr , 0 ) ;
return udp_gro_complete ( skb , nhoff ) ;
}
2013-06-08 14:56:03 +04:00
static const struct net_offload udpv4_offload = {
. callbacks = {
. gso_segment = udp4_ufo_fragment ,
2014-08-23 00:34:44 +04:00
. gro_receive = udp4_gro_receive ,
. gro_complete = udp4_gro_complete ,
2013-06-08 14:56:03 +04:00
} ,
} ;
int __init udpv4_offload_init ( void )
{
return inet_add_offload ( & udpv4_offload , IPPROTO_UDP ) ;
}