2005-04-17 02:20:36 +04:00
# ifndef _NET_IP6_TUNNEL_H
# define _NET_IP6_TUNNEL_H
# include <linux/ipv6.h>
# include <linux/netdevice.h>
2013-03-25 18:49:35 +04:00
# include <linux/if_tunnel.h>
2005-04-17 02:20:36 +04:00
# include <linux/ip6_tunnel.h>
2015-12-25 01:34:54 +03:00
# include <net/ip_tunnels.h>
2016-02-12 17:43:54 +03:00
# include <net/dst_cache.h>
2005-04-17 02:20:36 +04:00
2012-08-10 04:51:50 +04:00
# define IP6TUNNEL_ERR_TIMEO (30*HZ)
2005-04-17 02:20:36 +04:00
/* capable of sending packets */
# define IP6_TNL_F_CAP_XMIT 0x10000
/* capable of receiving packets */
# define IP6_TNL_F_CAP_RCV 0x20000
2012-06-28 22:15:52 +04:00
/* determine capability on a per-packet basis */
# define IP6_TNL_F_CAP_PER_PACKET 0x40000
2005-04-17 02:20:36 +04:00
2012-08-10 04:51:50 +04:00
struct __ip6_tnl_parm {
char name [ IFNAMSIZ ] ; /* name of tunnel device */
int link ; /* ifindex of underlying L2 interface */
__u8 proto ; /* tunnel protocol */
__u8 encap_limit ; /* encapsulation limit for tunnel */
__u8 hop_limit ; /* hop limit for tunnel */
2016-09-15 23:00:30 +03:00
bool collect_md ;
2012-08-10 04:51:50 +04:00
__be32 flowinfo ; /* traffic class and flowlabel for tunnel */
__u32 flags ; /* tunnel flags */
struct in6_addr laddr ; /* local tunnel end-point address */
struct in6_addr raddr ; /* remote tunnel end-point address */
__be16 i_flags ;
__be16 o_flags ;
__be32 i_key ;
__be32 o_key ;
2017-04-19 19:30:53 +03:00
__u32 fwmark ;
2012-08-10 04:51:50 +04:00
} ;
2005-04-17 02:20:36 +04:00
2012-08-10 04:51:50 +04:00
/* IPv6 tunnel */
2005-04-17 02:20:36 +04:00
struct ip6_tnl {
2010-10-25 01:33:16 +04:00
struct ip6_tnl __rcu * next ; /* next tunnel in list */
2005-04-17 02:20:36 +04:00
struct net_device * dev ; /* virtual device associated with tunnel */
2013-08-13 19:51:12 +04:00
struct net * net ; /* netns for packet i/o */
2012-08-10 04:51:50 +04:00
struct __ip6_tnl_parm parms ; /* tunnel configuration parameters */
2005-04-17 02:20:36 +04:00
struct flowi fl ; /* flowi template for xmit */
2016-02-12 17:43:54 +03:00
struct dst_cache dst_cache ; /* cached dst */
2016-04-30 03:12:15 +03:00
struct gro_cells gro_cells ;
2012-08-10 04:51:50 +04:00
int err_count ;
unsigned long err_time ;
/* These fields used only by GRE */
__u32 i_seqno ; /* The last seen seqno */
__u32 o_seqno ; /* The last output seqno */
2016-04-30 03:12:20 +03:00
int hlen ; /* tun_hlen + encap_hlen */
int tun_hlen ; /* Precalculated header length */
2016-05-18 19:06:17 +03:00
int encap_hlen ; /* Encap header length (FOU,GUE) */
struct ip_tunnel_encap encap ;
2012-08-10 04:51:50 +04:00
int mlink ;
2016-05-18 19:06:17 +03:00
} ;
2016-04-30 03:12:20 +03:00
2016-05-18 19:06:17 +03:00
struct ip6_tnl_encap_ops {
size_t ( * encap_hlen ) ( struct ip_tunnel_encap * e ) ;
int ( * build_header ) ( struct sk_buff * skb , struct ip_tunnel_encap * e ,
u8 * protocol , struct flowi6 * fl6 ) ;
2005-04-17 02:20:36 +04:00
} ;
2016-05-25 17:50:45 +03:00
# ifdef CONFIG_INET
2016-05-18 19:06:17 +03:00
extern const struct ip6_tnl_encap_ops __rcu *
ip6tun_encaps [ MAX_IPTUN_ENCAP_OPS ] ;
int ip6_tnl_encap_add_ops ( const struct ip6_tnl_encap_ops * ops ,
unsigned int num ) ;
int ip6_tnl_encap_del_ops ( const struct ip6_tnl_encap_ops * ops ,
unsigned int num ) ;
int ip6_tnl_encap_setup ( struct ip6_tnl * t ,
struct ip_tunnel_encap * ipencap ) ;
static inline int ip6_encap_hlen ( struct ip_tunnel_encap * e )
{
const struct ip6_tnl_encap_ops * ops ;
int hlen = - EINVAL ;
if ( e - > type = = TUNNEL_ENCAP_NONE )
return 0 ;
if ( e - > type > = MAX_IPTUN_ENCAP_OPS )
return - EINVAL ;
rcu_read_lock ( ) ;
ops = rcu_dereference ( ip6tun_encaps [ e - > type ] ) ;
if ( likely ( ops & & ops - > encap_hlen ) )
hlen = ops - > encap_hlen ( e ) ;
rcu_read_unlock ( ) ;
return hlen ;
}
static inline int ip6_tnl_encap ( struct sk_buff * skb , struct ip6_tnl * t ,
u8 * protocol , struct flowi6 * fl6 )
{
const struct ip6_tnl_encap_ops * ops ;
int ret = - EINVAL ;
if ( t - > encap . type = = TUNNEL_ENCAP_NONE )
return 0 ;
if ( t - > encap . type > = MAX_IPTUN_ENCAP_OPS )
return - EINVAL ;
rcu_read_lock ( ) ;
ops = rcu_dereference ( ip6tun_encaps [ t - > encap . type ] ) ;
if ( likely ( ops & & ops - > build_header ) )
ret = ops - > build_header ( skb , & t - > encap , protocol , fl6 ) ;
rcu_read_unlock ( ) ;
return ret ;
}
2005-04-17 02:20:36 +04:00
/* Tunnel encapsulation limit destination sub-option */
struct ipv6_tlv_tnl_enc_lim {
__u8 type ; /* type-code for option */
__u8 length ; /* option length */
__u8 encap_limit ; /* tunnel encapsulation limit */
2010-06-03 14:21:52 +04:00
} __packed ;
2005-04-17 02:20:36 +04:00
2012-08-10 04:51:50 +04:00
int ip6_tnl_rcv_ctl ( struct ip6_tnl * t , const struct in6_addr * laddr ,
const struct in6_addr * raddr ) ;
2016-04-30 03:12:15 +03:00
int ip6_tnl_rcv ( struct ip6_tnl * tunnel , struct sk_buff * skb ,
const struct tnl_ptk_info * tpi , struct metadata_dst * tun_dst ,
bool log_ecn_error ) ;
2014-11-05 10:02:48 +03:00
int ip6_tnl_xmit_ctl ( struct ip6_tnl * t , const struct in6_addr * laddr ,
const struct in6_addr * raddr ) ;
2016-04-30 03:12:18 +03:00
int ip6_tnl_xmit ( struct sk_buff * skb , struct net_device * dev , __u8 dsfield ,
struct flowi6 * fl6 , int encap_limit , __u32 * pmtu , __u8 proto ) ;
2012-08-10 04:51:50 +04:00
__u16 ip6_tnl_parse_tlv_enc_lim ( struct sk_buff * skb , __u8 * raw ) ;
__u32 ip6_tnl_get_cap ( struct ip6_tnl * t , const struct in6_addr * laddr ,
const struct in6_addr * raddr ) ;
2015-01-15 17:11:17 +03:00
struct net * ip6_tnl_get_link_net ( const struct net_device * dev ) ;
2015-04-02 18:07:01 +03:00
int ip6_tnl_get_iflink ( const struct net_device * dev ) ;
2016-04-30 03:12:20 +03:00
int ip6_tnl_change_mtu ( struct net_device * dev , int new_mtu ) ;
2012-08-10 04:51:50 +04:00
2015-04-06 05:19:09 +03:00
static inline void ip6tunnel_xmit ( struct sock * sk , struct sk_buff * skb ,
struct net_device * dev )
2013-03-10 03:00:39 +04:00
{
int pkt_len , err ;
2016-11-01 18:45:12 +03:00
memset ( skb - > cb , 0 , sizeof ( struct inet6_skb_parm ) ) ;
ip6tunnel: make rx/tx bytes counters consistent
Like the previous patch, which fixes ipv4 tunnels, here is the ipv6 part.
Before the patch, the external ipv6 header + gre header were included on
tx.
After the patch:
$ ping -c1 192.168.6.121 ; ip -s l ls dev ip6gre1
PING 192.168.6.121 (192.168.6.121) 56(84) bytes of data.
64 bytes from 192.168.6.121: icmp_req=1 ttl=64 time=1.92 ms
--- 192.168.6.121 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 1.923/1.923/1.923/0.000 ms
7: ip6gre1@NONE: <POINTOPOINT,NOARP,UP,LOWER_UP> mtu 1440 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/gre6 20:01:06:60:30:08:c1:c3:00:00:00:00:00:00:01:23 peer 20:01:06:60:30:08:c1:c3:00:00:00:00:00:00:01:21
RX: bytes packets errors dropped overrun mcast
84 1 0 0 0 0
TX: bytes packets errors dropped carrier collsns
84 1 0 0 0 0
$ ping -c1 192.168.1.121 ; ip -s l ls dev ip6tnl1
PING 192.168.1.121 (192.168.1.121) 56(84) bytes of data.
64 bytes from 192.168.1.121: icmp_req=1 ttl=64 time=2.28 ms
--- 192.168.1.121 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 2.288/2.288/2.288/0.000 ms
8: ip6tnl1@NONE: <POINTOPOINT,NOARP,UP,LOWER_UP> mtu 1452 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/tunnel6 2001:660:3008:c1c3::123 peer 2001:660:3008:c1c3::121
RX: bytes packets errors dropped overrun mcast
84 1 0 0 0 0
TX: bytes packets errors dropped carrier collsns
84 1 0 0 0 0
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-09-18 12:47:41 +03:00
pkt_len = skb - > len - skb_inner_network_offset ( skb ) ;
2015-10-08 00:48:46 +03:00
err = ip6_local_out ( dev_net ( skb_dst ( skb ) - > dev ) , sk , skb ) ;
2015-12-25 01:34:54 +03:00
if ( unlikely ( net_xmit_eval ( err ) ) )
pkt_len = - 1 ;
iptunnel_xmit_stats ( dev , pkt_len ) ;
2013-03-10 03:00:39 +04:00
}
2005-04-17 02:20:36 +04:00
# endif
2016-01-01 15:18:48 +03:00
# endif