2013-03-25 18:49:35 +04:00
# ifndef __NET_IP_TUNNELS_H
# define __NET_IP_TUNNELS_H 1
# include <linux/if_tunnel.h>
# include <linux/netdevice.h>
# include <linux/skbuff.h>
# include <linux/types.h>
# include <linux/u64_stats_sync.h>
# include <net/dsfield.h>
# include <net/gro_cells.h>
# include <net/inet_ecn.h>
# include <net/ip.h>
2014-09-17 23:25:58 +04:00
# include <net/netns/generic.h>
2013-03-25 18:49:35 +04:00
# include <net/rtnetlink.h>
# if IS_ENABLED(CONFIG_IPV6)
# include <net/ipv6.h>
# include <net/ip6_fib.h>
# include <net/ip6_route.h>
# endif
/* Keep error state on tunnel for 30 sec */
# define IPTUNNEL_ERR_TIMEO (30*HZ)
/* 6rd prefix/relay information */
# ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm {
struct in6_addr prefix ;
__be32 relay_prefix ;
u16 prefixlen ;
u16 relay_prefixlen ;
} ;
# endif
2014-09-17 23:25:58 +04:00
struct ip_tunnel_encap {
__u16 type ;
__u16 flags ;
__be16 sport ;
__be16 dport ;
} ;
2013-03-25 18:49:35 +04:00
struct ip_tunnel_prl_entry {
struct ip_tunnel_prl_entry __rcu * next ;
__be32 addr ;
u16 flags ;
struct rcu_head rcu_head ;
} ;
2014-01-02 23:48:33 +04:00
struct ip_tunnel_dst {
struct dst_entry __rcu * dst ;
2014-07-29 03:07:52 +04:00
__be32 saddr ;
2014-01-02 23:48:33 +04:00
} ;
2013-03-25 18:49:35 +04:00
struct ip_tunnel {
struct ip_tunnel __rcu * next ;
struct hlist_node hash_node ;
struct net_device * dev ;
2013-06-26 18:11:28 +04:00
struct net * net ; /* netns for packet i/o */
2013-03-25 18:49:35 +04:00
int err_count ; /* Number of arrived ICMP errors */
unsigned long err_time ; /* Time when the last ICMP error
* arrived */
/* These four fields used only by GRE */
__u32 i_seqno ; /* The last seen seqno */
__u32 o_seqno ; /* The last output seqno */
2014-09-17 23:25:58 +04:00
int tun_hlen ; /* Precalculated header length */
2013-03-25 18:49:35 +04:00
int mlink ;
2014-01-02 23:48:33 +04:00
struct ip_tunnel_dst __percpu * dst_cache ;
2014-01-02 23:48:26 +04:00
2013-03-25 18:49:35 +04:00
struct ip_tunnel_parm parms ;
2014-09-17 23:25:58 +04:00
int encap_hlen ; /* Encap header length (FOU,GUE) */
struct ip_tunnel_encap encap ;
int hlen ; /* tun_hlen + encap_hlen */
2013-03-25 18:49:35 +04:00
/* for SIT */
# ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm ip6rd ;
# endif
struct ip_tunnel_prl_entry __rcu * prl ; /* potential router list */
unsigned int prl_count ; /* # of entries in PRL */
int ip_tnl_net_id ;
struct gro_cells gro_cells ;
} ;
# define TUNNEL_CSUM __cpu_to_be16(0x01)
# define TUNNEL_ROUTING __cpu_to_be16(0x02)
# define TUNNEL_KEY __cpu_to_be16(0x04)
# define TUNNEL_SEQ __cpu_to_be16(0x08)
# define TUNNEL_STRICT __cpu_to_be16(0x10)
# define TUNNEL_REC __cpu_to_be16(0x20)
# define TUNNEL_VERSION __cpu_to_be16(0x40)
# define TUNNEL_NO_KEY __cpu_to_be16(0x80)
2013-06-18 04:50:07 +04:00
# define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
2014-10-04 02:35:28 +04:00
# define TUNNEL_OAM __cpu_to_be16(0x0200)
# define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
2013-03-25 18:49:35 +04:00
struct tnl_ptk_info {
__be16 flags ;
__be16 proto ;
__be32 key ;
__be32 seq ;
} ;
# define PACKET_RCVD 0
# define PACKET_REJECT 1
2013-08-06 09:51:37 +04:00
# define IP_TNL_HASH_BITS 7
2013-03-25 18:49:35 +04:00
# define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
struct ip_tunnel_net {
struct net_device * fb_tunnel_dev ;
2013-08-06 09:51:37 +04:00
struct hlist_head tunnels [ IP_TNL_HASH_SIZE ] ;
2013-03-25 18:49:35 +04:00
} ;
2013-06-22 03:17:11 +04:00
# ifdef CONFIG_INET
2013-03-25 18:49:35 +04:00
int ip_tunnel_init ( struct net_device * dev ) ;
void ip_tunnel_uninit ( struct net_device * dev ) ;
void ip_tunnel_dellink ( struct net_device * dev , struct list_head * head ) ;
2013-06-08 00:26:05 +04:00
int ip_tunnel_init_net ( struct net * net , int ip_tnl_net_id ,
struct rtnl_link_ops * ops , char * devname ) ;
2013-03-25 18:49:35 +04:00
2013-08-13 19:51:11 +04:00
void ip_tunnel_delete_net ( struct ip_tunnel_net * itn , struct rtnl_link_ops * ops ) ;
2013-03-25 18:49:35 +04:00
void ip_tunnel_xmit ( struct sk_buff * skb , struct net_device * dev ,
2013-05-28 03:48:15 +04:00
const struct iphdr * tnl_params , const u8 protocol ) ;
2013-03-25 18:49:35 +04:00
int ip_tunnel_ioctl ( struct net_device * dev , struct ip_tunnel_parm * p , int cmd ) ;
2014-09-17 23:25:58 +04:00
int ip_tunnel_encap ( struct sk_buff * skb , struct ip_tunnel * t ,
u8 * protocol , struct flowi4 * fl4 ) ;
2013-03-25 18:49:35 +04:00
int ip_tunnel_change_mtu ( struct net_device * dev , int new_mtu ) ;
struct rtnl_link_stats64 * ip_tunnel_get_stats64 ( struct net_device * dev ,
struct rtnl_link_stats64 * tot ) ;
struct ip_tunnel * ip_tunnel_lookup ( struct ip_tunnel_net * itn ,
int link , __be16 flags ,
__be32 remote , __be32 local ,
__be32 key ) ;
int ip_tunnel_rcv ( struct ip_tunnel * tunnel , struct sk_buff * skb ,
const struct tnl_ptk_info * tpi , bool log_ecn_error ) ;
int ip_tunnel_changelink ( struct net_device * dev , struct nlattr * tb [ ] ,
struct ip_tunnel_parm * p ) ;
int ip_tunnel_newlink ( struct net_device * dev , struct nlattr * tb [ ] ,
struct ip_tunnel_parm * p ) ;
void ip_tunnel_setup ( struct net_device * dev , int net_id ) ;
2014-02-20 13:19:31 +04:00
void ip_tunnel_dst_reset_all ( struct ip_tunnel * t ) ;
2014-09-17 23:25:58 +04:00
int ip_tunnel_encap_setup ( struct ip_tunnel * t ,
struct ip_tunnel_encap * ipencap ) ;
2013-03-25 18:49:35 +04:00
/* Extract dsfield from inner protocol */
static inline u8 ip_tunnel_get_dsfield ( const struct iphdr * iph ,
const struct sk_buff * skb )
{
if ( skb - > protocol = = htons ( ETH_P_IP ) )
return iph - > tos ;
else if ( skb - > protocol = = htons ( ETH_P_IPV6 ) )
return ipv6_get_dsfield ( ( const struct ipv6hdr * ) iph ) ;
else
return 0 ;
}
/* Propogate ECN bits out */
static inline u8 ip_tunnel_ecn_encap ( u8 tos , const struct iphdr * iph ,
const struct sk_buff * skb )
{
u8 inner = ip_tunnel_get_dsfield ( iph , skb ) ;
return INET_ECN_encapsulate ( tos , inner ) ;
}
2013-06-18 04:50:02 +04:00
int iptunnel_pull_header ( struct sk_buff * skb , int hdr_len , __be16 inner_proto ) ;
2014-04-15 21:47:15 +04:00
int iptunnel_xmit ( struct sock * sk , struct rtable * rt , struct sk_buff * skb ,
2013-06-18 04:49:56 +04:00
__be32 src , __be32 dst , __u8 proto ,
2013-09-02 17:34:57 +04:00
__u8 tos , __u8 ttl , __be16 df , bool xnet ) ;
2013-06-18 04:49:56 +04:00
2013-10-19 22:42:55 +04:00
struct sk_buff * iptunnel_handle_offloads ( struct sk_buff * skb , bool gre_csum ,
int gso_type_mask ) ;
2013-06-18 04:49:56 +04:00
static inline void iptunnel_xmit_stats ( int err ,
struct net_device_stats * err_stats ,
2014-01-04 09:57:59 +04:00
struct pcpu_sw_netstats __percpu * stats )
2013-03-25 18:49:35 +04:00
{
2013-06-18 04:49:56 +04:00
if ( err > 0 ) {
2014-01-04 09:57:59 +04:00
struct pcpu_sw_netstats * tstats = this_cpu_ptr ( stats ) ;
2013-03-25 18:49:35 +04:00
u64_stats_update_begin ( & tstats - > syncp ) ;
2013-06-18 04:49:56 +04:00
tstats - > tx_bytes + = err ;
2013-03-25 18:49:35 +04:00
tstats - > tx_packets + + ;
u64_stats_update_end ( & tstats - > syncp ) ;
2013-06-18 04:49:56 +04:00
} else if ( err < 0 ) {
err_stats - > tx_errors + + ;
err_stats - > tx_aborted_errors + + ;
2013-03-25 18:49:35 +04:00
} else {
2013-06-18 04:49:56 +04:00
err_stats - > tx_dropped + + ;
2013-03-25 18:49:35 +04:00
}
}
2013-06-22 03:17:11 +04:00
# endif /* CONFIG_INET */
2013-03-25 18:49:35 +04:00
# endif /* __NET_IP_TUNNELS_H */