2014-07-14 06:49:37 +04:00
# ifndef __NET_UDP_TUNNEL_H
# define __NET_UDP_TUNNEL_H
2014-09-17 04:31:17 +04:00
# include <net/ip_tunnels.h>
# include <net/udp.h>
# if IS_ENABLED(CONFIG_IPV6)
# include <net/ipv6.h>
# include <net/addrconf.h>
# endif
2014-07-14 06:49:37 +04:00
struct udp_port_cfg {
u8 family ;
/* Used only for kernel-created sockets */
union {
struct in_addr local_ip ;
# if IS_ENABLED(CONFIG_IPV6)
struct in6_addr local_ip6 ;
# endif
} ;
union {
struct in_addr peer_ip ;
# if IS_ENABLED(CONFIG_IPV6)
struct in6_addr peer_ip6 ;
# endif
} ;
__be16 local_udp_port ;
__be16 peer_udp_port ;
unsigned int use_udp_checksums : 1 ,
use_udp6_tx_checksums : 1 ,
2015-08-28 21:48:22 +03:00
use_udp6_rx_checksums : 1 ,
ipv6_v6only : 1 ;
2014-07-14 06:49:37 +04:00
} ;
2014-09-17 04:31:16 +04:00
int udp_sock_create4 ( struct net * net , struct udp_port_cfg * cfg ,
struct socket * * sockp ) ;
# if IS_ENABLED(CONFIG_IPV6)
int udp_sock_create6 ( struct net * net , struct udp_port_cfg * cfg ,
struct socket * * sockp ) ;
# else
static inline int udp_sock_create6 ( struct net * net , struct udp_port_cfg * cfg ,
struct socket * * sockp )
{
return 0 ;
}
# endif
static inline int udp_sock_create ( struct net * net ,
struct udp_port_cfg * cfg ,
struct socket * * sockp )
{
if ( cfg - > family = = AF_INET )
return udp_sock_create4 ( net , cfg , sockp ) ;
if ( cfg - > family = = AF_INET6 )
return udp_sock_create6 ( net , cfg , sockp ) ;
return - EPFNOSUPPORT ;
}
2014-07-14 06:49:37 +04:00
2014-09-17 04:31:17 +04:00
typedef int ( * udp_tunnel_encap_rcv_t ) ( struct sock * sk , struct sk_buff * skb ) ;
typedef void ( * udp_tunnel_encap_destroy_t ) ( struct sock * sk ) ;
2016-04-05 18:22:52 +03:00
typedef struct sk_buff * * ( * udp_tunnel_gro_receive_t ) ( struct sock * sk ,
struct sk_buff * * head ,
struct sk_buff * skb ) ;
typedef int ( * udp_tunnel_gro_complete_t ) ( struct sock * sk , struct sk_buff * skb ,
int nhoff ) ;
2014-09-17 04:31:17 +04:00
struct udp_tunnel_sock_cfg {
void * sk_user_data ; /* user data used by encap_rcv call back */
/* Used for setting up udp_sock fields, see udp.h for details */
__u8 encap_type ;
udp_tunnel_encap_rcv_t encap_rcv ;
udp_tunnel_encap_destroy_t encap_destroy ;
2016-04-05 18:22:52 +03:00
udp_tunnel_gro_receive_t gro_receive ;
udp_tunnel_gro_complete_t gro_complete ;
2014-09-17 04:31:17 +04:00
} ;
/* Setup the given (UDP) sock to receive UDP encapsulated packets */
void setup_udp_tunnel_sock ( struct net * net , struct socket * sock ,
struct udp_tunnel_sock_cfg * sock_cfg ) ;
2016-06-16 22:20:52 +03:00
/* -- List of parsable UDP tunnel types --
*
* Adding to this list will result in serious debate . The main issue is
* that this list is essentially a list of workarounds for either poorly
* designed tunnels , or poorly designed device offloads .
*
* The parsing supported via these types should really be used for Rx
* traffic only as the network stack will have already inserted offsets for
* the location of the headers in the skb . In addition any ports that are
* pushed should be kept within the namespace without leaking to other
* devices such as VFs or other ports on the same device .
*
* It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
* need to use this for Rx checksum offload . It should not be necessary to
* call this function to perform Tx offloads on outgoing traffic .
*/
enum udp_parsable_tunnel_type {
UDP_TUNNEL_TYPE_VXLAN , /* RFC 7348 */
UDP_TUNNEL_TYPE_GENEVE , /* draft-ietf-nvo3-geneve */
2016-06-16 22:23:19 +03:00
UDP_TUNNEL_TYPE_VXLAN_GPE , /* draft-ietf-nvo3-vxlan-gpe */
2016-06-16 22:20:52 +03:00
} ;
struct udp_tunnel_info {
unsigned short type ;
sa_family_t sa_family ;
__be16 port ;
} ;
/* Notify network devices of offloadable types */
void udp_tunnel_push_rx_port ( struct net_device * dev , struct socket * sock ,
unsigned short type ) ;
void udp_tunnel_notify_add_rx_port ( struct socket * sock , unsigned short type ) ;
void udp_tunnel_notify_del_rx_port ( struct socket * sock , unsigned short type ) ;
2016-06-16 22:21:00 +03:00
static inline void udp_tunnel_get_rx_info ( struct net_device * dev )
{
ASSERT_RTNL ( ) ;
call_netdevice_notifiers ( NETDEV_UDP_TUNNEL_PUSH_INFO , dev ) ;
}
2014-09-17 04:31:17 +04:00
/* Transmit the skb using UDP encapsulation. */
2015-12-25 01:34:54 +03:00
void udp_tunnel_xmit_skb ( struct rtable * rt , struct sock * sk , struct sk_buff * skb ,
__be32 src , __be32 dst , __u8 tos , __u8 ttl ,
__be16 df , __be16 src_port , __be16 dst_port ,
bool xnet , bool nocheck ) ;
2014-09-17 04:31:17 +04:00
# if IS_ENABLED(CONFIG_IPV6)
2015-04-06 05:19:09 +03:00
int udp_tunnel6_xmit_skb ( struct dst_entry * dst , struct sock * sk ,
struct sk_buff * skb ,
2015-01-20 22:23:04 +03:00
struct net_device * dev , struct in6_addr * saddr ,
struct in6_addr * daddr ,
2016-03-09 05:00:02 +03:00
__u8 prio , __u8 ttl , __be32 label ,
__be16 src_port , __be16 dst_port , bool nocheck ) ;
2014-09-17 04:31:17 +04:00
# endif
void udp_tunnel_sock_release ( struct socket * sock ) ;
2015-08-27 09:46:50 +03:00
struct metadata_dst * udp_tun_rx_dst ( struct sk_buff * skb , unsigned short family ,
__be16 flags , __be64 tunnel_id ,
int md_size ) ;
2016-06-16 22:20:44 +03:00
# ifdef CONFIG_INET
2016-04-14 22:33:37 +03:00
static inline int udp_tunnel_handle_offloads ( struct sk_buff * skb , bool udp_csum )
2014-09-17 04:31:17 +04:00
{
int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL ;
2016-02-12 00:02:31 +03:00
return iptunnel_handle_offloads ( skb , type ) ;
2014-09-17 04:31:17 +04:00
}
2016-06-16 22:20:44 +03:00
# endif
2014-09-17 04:31:17 +04:00
static inline void udp_tunnel_encap_enable ( struct socket * sock )
{
# if IS_ENABLED(CONFIG_IPV6)
if ( sock - > sk - > sk_family = = PF_INET6 )
ipv6_stub - > udpv6_encap_enable ( ) ;
else
# endif
udp_encap_enable ( ) ;
}
2014-07-14 06:49:37 +04:00
# endif