2013-06-18 04:49:56 +04:00
/*
* Copyright ( c ) 2013 Nicira , Inc .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA
* 02110 - 1301 , USA
*/
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/skbuff.h>
# include <linux/netdevice.h>
# include <linux/in.h>
# include <linux/if_arp.h>
# include <linux/mroute.h>
# include <linux/init.h>
# include <linux/in6.h>
# include <linux/inetdevice.h>
# include <linux/netfilter_ipv4.h>
# include <linux/etherdevice.h>
# include <linux/if_ether.h>
# include <linux/if_vlan.h>
2015-07-21 11:44:01 +03:00
# include <linux/static_key.h>
2013-06-18 04:49:56 +04:00
# include <net/ip.h>
# include <net/icmp.h>
# include <net/protocol.h>
# include <net/ip_tunnels.h>
# include <net/arp.h>
# include <net/checksum.h>
# include <net/dsfield.h>
# include <net/inet_ecn.h>
# include <net/xfrm.h>
# include <net/net_namespace.h>
# include <net/netns/generic.h>
# include <net/rtnetlink.h>
2014-04-15 21:47:15 +04:00
int iptunnel_xmit ( struct sock * sk , struct rtable * rt , struct sk_buff * skb ,
2013-06-18 04:49:56 +04:00
__be32 src , __be32 dst , __u8 proto ,
2013-09-02 17:34:57 +04:00
__u8 tos , __u8 ttl , __be16 df , bool xnet )
2013-06-18 04:49:56 +04:00
{
int pkt_len = skb - > len ;
struct iphdr * iph ;
int err ;
2013-09-02 17:34:57 +04:00
skb_scrub_packet ( skb , xnet ) ;
2013-12-16 10:12:18 +04:00
skb_clear_hash ( skb ) ;
2013-06-18 04:49:56 +04:00
skb_dst_set ( skb , & rt - > dst ) ;
memset ( IPCB ( skb ) , 0 , sizeof ( * IPCB ( skb ) ) ) ;
/* Push down and install the IP header. */
2013-10-01 13:35:51 +04:00
skb_push ( skb , sizeof ( struct iphdr ) ) ;
2013-06-18 04:49:56 +04:00
skb_reset_network_header ( skb ) ;
iph = ip_hdr ( skb ) ;
iph - > version = 4 ;
iph - > ihl = sizeof ( struct iphdr ) > > 2 ;
iph - > frag_off = df ;
iph - > protocol = proto ;
iph - > tos = tos ;
iph - > daddr = dst ;
iph - > saddr = src ;
iph - > ttl = ttl ;
2015-04-08 18:01:22 +03:00
__ip_select_ident ( dev_net ( rt - > dst . dev ) , iph ,
skb_shinfo ( skb ) - > gso_segs ? : 1 ) ;
2013-06-18 04:49:56 +04:00
2014-04-15 21:47:15 +04:00
err = ip_local_out_sk ( sk , skb ) ;
2013-06-18 04:49:56 +04:00
if ( unlikely ( net_xmit_eval ( err ) ) )
pkt_len = 0 ;
return pkt_len ;
}
EXPORT_SYMBOL_GPL ( iptunnel_xmit ) ;
2013-06-18 04:50:02 +04:00
int iptunnel_pull_header ( struct sk_buff * skb , int hdr_len , __be16 inner_proto )
{
if ( unlikely ( ! pskb_may_pull ( skb , hdr_len ) ) )
return - ENOMEM ;
skb_pull_rcsum ( skb , hdr_len ) ;
if ( inner_proto = = htons ( ETH_P_TEB ) ) {
2014-10-17 12:53:23 +04:00
struct ethhdr * eh ;
2013-06-18 04:50:02 +04:00
if ( unlikely ( ! pskb_may_pull ( skb , ETH_HLEN ) ) )
return - ENOMEM ;
2014-10-17 12:53:23 +04:00
eh = ( struct ethhdr * ) skb - > data ;
2015-05-05 00:33:59 +03:00
if ( likely ( eth_proto_is_802_3 ( eh - > h_proto ) ) )
2013-06-18 04:50:02 +04:00
skb - > protocol = eh - > h_proto ;
else
skb - > protocol = htons ( ETH_P_802_2 ) ;
} else {
skb - > protocol = inner_proto ;
}
nf_reset ( skb ) ;
secpath_reset ( skb ) ;
2013-12-16 10:12:18 +04:00
skb_clear_hash_if_not_l4 ( skb ) ;
2014-03-24 09:06:36 +04:00
skb_dst_drop ( skb ) ;
2013-06-18 04:50:02 +04:00
skb - > vlan_tci = 0 ;
skb_set_queue_mapping ( skb , 0 ) ;
skb - > pkt_type = PACKET_HOST ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( iptunnel_pull_header ) ;
2013-10-19 22:42:55 +04:00
struct sk_buff * iptunnel_handle_offloads ( struct sk_buff * skb ,
bool csum_help ,
int gso_type_mask )
{
int err ;
if ( likely ( ! skb - > encapsulation ) ) {
skb_reset_inner_headers ( skb ) ;
skb - > encapsulation = 1 ;
}
if ( skb_is_gso ( skb ) ) {
err = skb_unclone ( skb , GFP_ATOMIC ) ;
if ( unlikely ( err ) )
goto error ;
skb_shinfo ( skb ) - > gso_type | = gso_type_mask ;
return skb ;
}
2014-06-05 04:20:02 +04:00
/* If packet is not gso and we are resolving any partial checksum,
* clear encapsulation flag . This allows setting CHECKSUM_PARTIAL
* on the outer header without confusing devices that implement
* NETIF_F_IP_CSUM with encapsulation .
*/
if ( csum_help )
skb - > encapsulation = 0 ;
2013-10-19 22:42:55 +04:00
if ( skb - > ip_summed = = CHECKSUM_PARTIAL & & csum_help ) {
err = skb_checksum_help ( skb ) ;
if ( unlikely ( err ) )
goto error ;
} else if ( skb - > ip_summed ! = CHECKSUM_PARTIAL )
skb - > ip_summed = CHECKSUM_NONE ;
return skb ;
error :
kfree_skb ( skb ) ;
return ERR_PTR ( err ) ;
}
EXPORT_SYMBOL_GPL ( iptunnel_handle_offloads ) ;
2014-02-20 11:14:23 +04:00
/* Often modified stats are per cpu, other are shared (netdev->stats) */
struct rtnl_link_stats64 * ip_tunnel_get_stats64 ( struct net_device * dev ,
struct rtnl_link_stats64 * tot )
{
int i ;
2015-05-15 00:31:28 +03:00
netdev_stats_to_stats64 ( tot , & dev - > stats ) ;
2014-02-20 11:14:23 +04:00
for_each_possible_cpu ( i ) {
const struct pcpu_sw_netstats * tstats =
per_cpu_ptr ( dev - > tstats , i ) ;
u64 rx_packets , rx_bytes , tx_packets , tx_bytes ;
unsigned int start ;
do {
2014-03-14 08:26:42 +04:00
start = u64_stats_fetch_begin_irq ( & tstats - > syncp ) ;
2014-02-20 11:14:23 +04:00
rx_packets = tstats - > rx_packets ;
tx_packets = tstats - > tx_packets ;
rx_bytes = tstats - > rx_bytes ;
tx_bytes = tstats - > tx_bytes ;
2014-03-14 08:26:42 +04:00
} while ( u64_stats_fetch_retry_irq ( & tstats - > syncp , start ) ) ;
2014-02-20 11:14:23 +04:00
tot - > rx_packets + = rx_packets ;
tot - > tx_packets + = tx_packets ;
tot - > rx_bytes + = rx_bytes ;
tot - > tx_bytes + = tx_bytes ;
}
return tot ;
}
EXPORT_SYMBOL_GPL ( ip_tunnel_get_stats64 ) ;
2015-07-21 11:44:00 +03:00
2015-08-14 17:40:40 +03:00
static const struct nla_policy ip_tun_policy [ LWTUNNEL_IP_MAX + 1 ] = {
[ LWTUNNEL_IP_ID ] = { . type = NLA_U64 } ,
[ LWTUNNEL_IP_DST ] = { . type = NLA_U32 } ,
[ LWTUNNEL_IP_SRC ] = { . type = NLA_U32 } ,
[ LWTUNNEL_IP_TTL ] = { . type = NLA_U8 } ,
[ LWTUNNEL_IP_TOS ] = { . type = NLA_U8 } ,
[ LWTUNNEL_IP_SPORT ] = { . type = NLA_U16 } ,
[ LWTUNNEL_IP_DPORT ] = { . type = NLA_U16 } ,
[ LWTUNNEL_IP_FLAGS ] = { . type = NLA_U16 } ,
2015-07-21 11:44:00 +03:00
} ;
static int ip_tun_build_state ( struct net_device * dev , struct nlattr * attr ,
struct lwtunnel_state * * ts )
{
struct ip_tunnel_info * tun_info ;
struct lwtunnel_state * new_state ;
2015-08-14 17:40:40 +03:00
struct nlattr * tb [ LWTUNNEL_IP_MAX + 1 ] ;
2015-07-21 11:44:00 +03:00
int err ;
2015-08-14 17:40:40 +03:00
err = nla_parse_nested ( tb , LWTUNNEL_IP_MAX , attr , ip_tun_policy ) ;
2015-07-21 11:44:00 +03:00
if ( err < 0 )
return err ;
new_state = lwtunnel_state_alloc ( sizeof ( * tun_info ) ) ;
if ( ! new_state )
return - ENOMEM ;
new_state - > type = LWTUNNEL_ENCAP_IP ;
tun_info = lwt_tun_info ( new_state ) ;
2015-08-14 17:40:40 +03:00
if ( tb [ LWTUNNEL_IP_ID ] )
tun_info - > key . tun_id = nla_get_u64 ( tb [ LWTUNNEL_IP_ID ] ) ;
2015-07-21 11:44:00 +03:00
2015-08-14 17:40:40 +03:00
if ( tb [ LWTUNNEL_IP_DST ] )
tun_info - > key . ipv4_dst = nla_get_be32 ( tb [ LWTUNNEL_IP_DST ] ) ;
2015-07-21 11:44:00 +03:00
2015-08-14 17:40:40 +03:00
if ( tb [ LWTUNNEL_IP_SRC ] )
tun_info - > key . ipv4_src = nla_get_be32 ( tb [ LWTUNNEL_IP_SRC ] ) ;
2015-07-21 11:44:00 +03:00
2015-08-14 17:40:40 +03:00
if ( tb [ LWTUNNEL_IP_TTL ] )
tun_info - > key . ipv4_ttl = nla_get_u8 ( tb [ LWTUNNEL_IP_TTL ] ) ;
2015-07-21 11:44:00 +03:00
2015-08-14 17:40:40 +03:00
if ( tb [ LWTUNNEL_IP_TOS ] )
tun_info - > key . ipv4_tos = nla_get_u8 ( tb [ LWTUNNEL_IP_TOS ] ) ;
2015-07-21 11:44:00 +03:00
2015-08-14 17:40:40 +03:00
if ( tb [ LWTUNNEL_IP_SPORT ] )
tun_info - > key . tp_src = nla_get_be16 ( tb [ LWTUNNEL_IP_SPORT ] ) ;
2015-07-21 11:44:00 +03:00
2015-08-14 17:40:40 +03:00
if ( tb [ LWTUNNEL_IP_DPORT ] )
tun_info - > key . tp_dst = nla_get_be16 ( tb [ LWTUNNEL_IP_DPORT ] ) ;
2015-07-21 11:44:00 +03:00
2015-08-14 17:40:40 +03:00
if ( tb [ LWTUNNEL_IP_FLAGS ] )
tun_info - > key . tun_flags = nla_get_u16 ( tb [ LWTUNNEL_IP_FLAGS ] ) ;
2015-07-21 11:44:00 +03:00
tun_info - > mode = IP_TUNNEL_INFO_TX ;
tun_info - > options = NULL ;
tun_info - > options_len = 0 ;
* ts = new_state ;
return 0 ;
}
static int ip_tun_fill_encap_info ( struct sk_buff * skb ,
struct lwtunnel_state * lwtstate )
{
struct ip_tunnel_info * tun_info = lwt_tun_info ( lwtstate ) ;
2015-08-14 17:40:40 +03:00
if ( nla_put_u64 ( skb , LWTUNNEL_IP_ID , tun_info - > key . tun_id ) | |
nla_put_be32 ( skb , LWTUNNEL_IP_DST , tun_info - > key . ipv4_dst ) | |
nla_put_be32 ( skb , LWTUNNEL_IP_SRC , tun_info - > key . ipv4_src ) | |
nla_put_u8 ( skb , LWTUNNEL_IP_TOS , tun_info - > key . ipv4_tos ) | |
nla_put_u8 ( skb , LWTUNNEL_IP_TTL , tun_info - > key . ipv4_ttl ) | |
nla_put_u16 ( skb , LWTUNNEL_IP_SPORT , tun_info - > key . tp_src ) | |
nla_put_u16 ( skb , LWTUNNEL_IP_DPORT , tun_info - > key . tp_dst ) | |
nla_put_u16 ( skb , LWTUNNEL_IP_FLAGS , tun_info - > key . tun_flags ) )
2015-07-21 11:44:00 +03:00
return - ENOMEM ;
return 0 ;
}
static int ip_tun_encap_nlsize ( struct lwtunnel_state * lwtstate )
{
2015-08-14 17:40:40 +03:00
return nla_total_size ( 8 ) /* LWTUNNEL_IP_ID */
+ nla_total_size ( 4 ) /* LWTUNNEL_IP_DST */
+ nla_total_size ( 4 ) /* LWTUNNEL_IP_SRC */
+ nla_total_size ( 1 ) /* LWTUNNEL_IP_TOS */
+ nla_total_size ( 1 ) /* LWTUNNEL_IP_TTL */
+ nla_total_size ( 2 ) /* LWTUNNEL_IP_SPORT */
+ nla_total_size ( 2 ) /* LWTUNNEL_IP_DPORT */
+ nla_total_size ( 2 ) ; /* LWTUNNEL_IP_FLAGS */
2015-07-21 11:44:00 +03:00
}
2015-08-18 19:42:09 +03:00
static int ip_tun_cmp_encap ( struct lwtunnel_state * a , struct lwtunnel_state * b )
{
return memcmp ( lwt_tun_info ( a ) , lwt_tun_info ( b ) ,
sizeof ( struct ip_tunnel_info ) ) ;
}
2015-07-21 11:44:00 +03:00
static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
. build_state = ip_tun_build_state ,
. fill_encap = ip_tun_fill_encap_info ,
. get_encap_size = ip_tun_encap_nlsize ,
2015-08-18 19:42:09 +03:00
. cmp_encap = ip_tun_cmp_encap ,
2015-07-21 11:44:00 +03:00
} ;
2015-07-23 11:08:44 +03:00
void __init ip_tunnel_core_init ( void )
2015-07-21 11:44:00 +03:00
{
lwtunnel_encap_add_ops ( & ip_tun_lwt_ops , LWTUNNEL_ENCAP_IP ) ;
}
2015-07-21 11:44:01 +03:00
struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE ;
EXPORT_SYMBOL ( ip_tunnel_metadata_cnt ) ;
void ip_tunnel_need_metadata ( void )
{
static_key_slow_inc ( & ip_tunnel_metadata_cnt ) ;
}
EXPORT_SYMBOL_GPL ( ip_tunnel_need_metadata ) ;
void ip_tunnel_unneed_metadata ( void )
{
static_key_slow_dec ( & ip_tunnel_metadata_cnt ) ;
}
EXPORT_SYMBOL_GPL ( ip_tunnel_unneed_metadata ) ;