2014-09-17 23:25:56 +04:00
# include <linux/module.h>
# include <linux/errno.h>
# include <linux/socket.h>
# include <linux/skbuff.h>
# include <linux/ip.h>
# include <linux/udp.h>
# include <linux/types.h>
# include <linux/kernel.h>
# include <net/genetlink.h>
2014-10-04 02:48:09 +04:00
# include <net/gue.h>
2014-09-17 23:25:56 +04:00
# include <net/ip.h>
2014-09-17 23:25:57 +04:00
# include <net/protocol.h>
2014-09-17 23:25:56 +04:00
# include <net/udp.h>
# include <net/udp_tunnel.h>
# include <net/xfrm.h>
# include <uapi/linux/fou.h>
# include <uapi/linux/genetlink.h>
struct fou {
struct socket * sock ;
u8 protocol ;
2015-02-11 03:30:33 +03:00
u8 flags ;
2015-04-10 22:00:28 +03:00
__be16 port ;
2015-04-10 22:00:30 +03:00
u16 type ;
2014-09-17 23:25:57 +04:00
struct udp_offload udp_offloads ;
2014-09-17 23:25:56 +04:00
struct list_head list ;
} ;
2015-02-11 03:30:33 +03:00
# define FOU_F_REMCSUM_NOPARTIAL BIT(0)
2014-09-17 23:25:56 +04:00
struct fou_cfg {
2014-10-04 02:48:09 +04:00
u16 type ;
2014-09-17 23:25:56 +04:00
u8 protocol ;
2015-02-11 03:30:33 +03:00
u8 flags ;
2014-09-17 23:25:56 +04:00
struct udp_port_cfg udp_config ;
} ;
2015-04-10 22:00:29 +03:00
static unsigned int fou_net_id ;
struct fou_net {
struct list_head fou_list ;
struct mutex fou_lock ;
} ;
2014-09-17 23:25:56 +04:00
static inline struct fou * fou_from_sock ( struct sock * sk )
{
return sk - > sk_user_data ;
}
2014-11-04 20:06:53 +03:00
static void fou_recv_pull ( struct sk_buff * skb , size_t len )
2014-09-17 23:25:56 +04:00
{
struct iphdr * iph = ip_hdr ( skb ) ;
/* Remove 'len' bytes from the packet (UDP header and
2014-11-04 20:06:53 +03:00
* FOU header if present ) .
2014-09-17 23:25:56 +04:00
*/
iph - > tot_len = htons ( ntohs ( iph - > tot_len ) - len ) ;
__skb_pull ( skb , len ) ;
skb_postpull_rcsum ( skb , udp_hdr ( skb ) , len ) ;
skb_reset_transport_header ( skb ) ;
}
static int fou_udp_recv ( struct sock * sk , struct sk_buff * skb )
{
struct fou * fou = fou_from_sock ( sk ) ;
if ( ! fou )
return 1 ;
2014-11-04 20:06:53 +03:00
fou_recv_pull ( skb , sizeof ( struct udphdr ) ) ;
return - fou - > protocol ;
}
2014-11-04 20:06:57 +03:00
static struct guehdr * gue_remcsum ( struct sk_buff * skb , struct guehdr * guehdr ,
2015-02-11 03:30:33 +03:00
void * data , size_t hdrlen , u8 ipproto ,
bool nopartial )
2014-11-04 20:06:57 +03:00
{
__be16 * pd = data ;
2014-11-25 22:21:20 +03:00
size_t start = ntohs ( pd [ 0 ] ) ;
size_t offset = ntohs ( pd [ 1 ] ) ;
2015-08-20 03:07:32 +03:00
size_t plen = sizeof ( struct udphdr ) + hdrlen +
max_t ( size_t , offset + sizeof ( u16 ) , start ) ;
if ( skb - > remcsum_offload )
return guehdr ;
2014-11-04 20:06:57 +03:00
if ( ! pskb_may_pull ( skb , plen ) )
return NULL ;
guehdr = ( struct guehdr * ) & udp_hdr ( skb ) [ 1 ] ;
2015-02-11 03:30:33 +03:00
skb_remcsum_process ( skb , ( void * ) guehdr + hdrlen ,
start , offset , nopartial ) ;
2014-11-04 20:06:57 +03:00
return guehdr ;
}
2014-11-04 20:06:53 +03:00
static int gue_control_message ( struct sk_buff * skb , struct guehdr * guehdr )
{
/* No support yet */
kfree_skb ( skb ) ;
return 0 ;
2014-09-17 23:25:56 +04:00
}
2014-10-04 02:48:09 +04:00
static int gue_udp_recv ( struct sock * sk , struct sk_buff * skb )
{
struct fou * fou = fou_from_sock ( sk ) ;
2014-11-04 20:06:53 +03:00
size_t len , optlen , hdrlen ;
2014-10-04 02:48:09 +04:00
struct guehdr * guehdr ;
2014-11-04 20:06:53 +03:00
void * data ;
2014-11-04 20:06:57 +03:00
u16 doffset = 0 ;
2014-10-04 02:48:09 +04:00
if ( ! fou )
return 1 ;
len = sizeof ( struct udphdr ) + sizeof ( struct guehdr ) ;
if ( ! pskb_may_pull ( skb , len ) )
goto drop ;
2014-11-04 20:06:53 +03:00
guehdr = ( struct guehdr * ) & udp_hdr ( skb ) [ 1 ] ;
optlen = guehdr - > hlen < < 2 ;
len + = optlen ;
2014-10-04 02:48:09 +04:00
if ( ! pskb_may_pull ( skb , len ) )
goto drop ;
2014-11-04 20:06:53 +03:00
/* guehdr may change after pull */
guehdr = ( struct guehdr * ) & udp_hdr ( skb ) [ 1 ] ;
2014-10-17 12:53:47 +04:00
2014-11-04 20:06:53 +03:00
hdrlen = sizeof ( struct guehdr ) + optlen ;
2014-10-04 02:48:09 +04:00
2014-11-04 20:06:53 +03:00
if ( guehdr - > version ! = 0 | | validate_gue_flags ( guehdr , optlen ) )
2014-10-04 02:48:09 +04:00
goto drop ;
2014-11-04 20:06:53 +03:00
2014-11-04 20:06:57 +03:00
hdrlen = sizeof ( struct guehdr ) + optlen ;
ip_hdr ( skb ) - > tot_len = htons ( ntohs ( ip_hdr ( skb ) - > tot_len ) - len ) ;
/* Pull csum through the guehdr now . This can be used if
* there is a remote checksum offload .
*/
skb_postpull_rcsum ( skb , udp_hdr ( skb ) , len ) ;
2014-11-04 20:06:53 +03:00
data = & guehdr [ 1 ] ;
if ( guehdr - > flags & GUE_FLAG_PRIV ) {
2014-11-04 20:06:57 +03:00
__be32 flags = * ( __be32 * ) ( data + doffset ) ;
doffset + = GUE_LEN_PRIV ;
if ( flags & GUE_PFLAG_REMCSUM ) {
guehdr = gue_remcsum ( skb , guehdr , data + doffset ,
2015-02-11 03:30:33 +03:00
hdrlen , guehdr - > proto_ctype ,
! ! ( fou - > flags &
FOU_F_REMCSUM_NOPARTIAL ) ) ;
2014-11-04 20:06:57 +03:00
if ( ! guehdr )
goto drop ;
data = & guehdr [ 1 ] ;
2014-11-04 20:06:53 +03:00
2014-11-04 20:06:57 +03:00
doffset + = GUE_PLEN_REMCSUM ;
}
2014-10-04 02:48:09 +04:00
}
2014-11-04 20:06:53 +03:00
if ( unlikely ( guehdr - > control ) )
return gue_control_message ( skb , guehdr ) ;
2014-11-25 22:21:20 +03:00
__skb_pull ( skb , sizeof ( struct udphdr ) + hdrlen ) ;
2014-11-04 20:06:57 +03:00
skb_reset_transport_header ( skb ) ;
2014-11-04 20:06:53 +03:00
return - guehdr - > proto_ctype ;
2014-10-04 02:48:09 +04:00
drop :
kfree_skb ( skb ) ;
return 0 ;
}
2014-09-17 23:25:57 +04:00
static struct sk_buff * * fou_gro_receive ( struct sk_buff * * head ,
2015-01-13 04:00:37 +03:00
struct sk_buff * skb ,
struct udp_offload * uoff )
2014-09-17 23:25:57 +04:00
{
const struct net_offload * ops ;
struct sk_buff * * pp = NULL ;
u8 proto = NAPI_GRO_CB ( skb ) - > proto ;
2014-10-04 02:48:08 +04:00
const struct net_offload * * offloads ;
2014-09-17 23:25:57 +04:00
rcu_read_lock ( ) ;
2014-10-04 02:48:08 +04:00
offloads = NAPI_GRO_CB ( skb ) - > is_ipv6 ? inet6_offloads : inet_offloads ;
2014-09-17 23:25:57 +04:00
ops = rcu_dereference ( offloads [ proto ] ) ;
if ( ! ops | | ! ops - > callbacks . gro_receive )
goto out_unlock ;
pp = ops - > callbacks . gro_receive ( head , skb ) ;
out_unlock :
rcu_read_unlock ( ) ;
return pp ;
}
2015-01-13 04:00:37 +03:00
static int fou_gro_complete ( struct sk_buff * skb , int nhoff ,
struct udp_offload * uoff )
2014-09-17 23:25:57 +04:00
{
const struct net_offload * ops ;
u8 proto = NAPI_GRO_CB ( skb ) - > proto ;
int err = - ENOSYS ;
2014-10-04 02:48:08 +04:00
const struct net_offload * * offloads ;
2014-09-17 23:25:57 +04:00
2014-11-10 22:45:13 +03:00
udp_tunnel_gro_complete ( skb , nhoff ) ;
2014-09-17 23:25:57 +04:00
rcu_read_lock ( ) ;
2014-10-04 02:48:08 +04:00
offloads = NAPI_GRO_CB ( skb ) - > is_ipv6 ? inet6_offloads : inet_offloads ;
2014-09-17 23:25:57 +04:00
ops = rcu_dereference ( offloads [ proto ] ) ;
if ( WARN_ON ( ! ops | | ! ops - > callbacks . gro_complete ) )
goto out_unlock ;
err = ops - > callbacks . gro_complete ( skb , nhoff ) ;
out_unlock :
rcu_read_unlock ( ) ;
return err ;
}
2014-11-04 20:06:57 +03:00
static struct guehdr * gue_gro_remcsum ( struct sk_buff * skb , unsigned int off ,
struct guehdr * guehdr , void * data ,
2015-08-20 03:07:32 +03:00
size_t hdrlen , struct gro_remcsum * grc ,
bool nopartial )
2014-11-04 20:06:57 +03:00
{
__be16 * pd = data ;
2014-11-25 22:21:20 +03:00
size_t start = ntohs ( pd [ 0 ] ) ;
size_t offset = ntohs ( pd [ 1 ] ) ;
2014-11-04 20:06:57 +03:00
if ( skb - > remcsum_offload )
2015-08-20 03:07:32 +03:00
return guehdr ;
2014-11-04 20:06:57 +03:00
2014-11-25 22:21:20 +03:00
if ( ! NAPI_GRO_CB ( skb ) - > csum_valid )
2014-11-04 20:06:57 +03:00
return NULL ;
2015-08-20 03:07:32 +03:00
guehdr = skb_gro_remcsum_process ( skb , ( void * ) guehdr , off , hdrlen ,
start , offset , grc , nopartial ) ;
2014-11-04 20:06:57 +03:00
skb - > remcsum_offload = 1 ;
return guehdr ;
}
2014-10-04 02:48:09 +04:00
static struct sk_buff * * gue_gro_receive ( struct sk_buff * * head ,
2015-01-13 04:00:37 +03:00
struct sk_buff * skb ,
struct udp_offload * uoff )
2014-10-04 02:48:09 +04:00
{
const struct net_offload * * offloads ;
const struct net_offload * ops ;
struct sk_buff * * pp = NULL ;
struct sk_buff * p ;
struct guehdr * guehdr ;
2014-11-04 20:06:53 +03:00
size_t len , optlen , hdrlen , off ;
void * data ;
2014-11-04 20:06:57 +03:00
u16 doffset = 0 ;
2014-10-04 02:48:09 +04:00
int flush = 1 ;
2015-02-11 03:30:33 +03:00
struct fou * fou = container_of ( uoff , struct fou , udp_offloads ) ;
2015-02-11 03:30:27 +03:00
struct gro_remcsum grc ;
skb_gro_remcsum_init ( & grc ) ;
2014-10-04 02:48:09 +04:00
off = skb_gro_offset ( skb ) ;
2014-11-04 20:06:53 +03:00
len = off + sizeof ( * guehdr ) ;
2014-10-04 02:48:09 +04:00
guehdr = skb_gro_header_fast ( skb , off ) ;
2014-11-04 20:06:53 +03:00
if ( skb_gro_header_hard ( skb , len ) ) {
guehdr = skb_gro_header_slow ( skb , len , off ) ;
2014-10-04 02:48:09 +04:00
if ( unlikely ( ! guehdr ) )
goto out ;
}
2014-11-04 20:06:53 +03:00
optlen = guehdr - > hlen < < 2 ;
len + = optlen ;
2014-10-04 02:48:09 +04:00
2014-11-04 20:06:53 +03:00
if ( skb_gro_header_hard ( skb , len ) ) {
guehdr = skb_gro_header_slow ( skb , len , off ) ;
if ( unlikely ( ! guehdr ) )
goto out ;
}
2014-10-04 02:48:09 +04:00
2014-11-04 20:06:53 +03:00
if ( unlikely ( guehdr - > control ) | | guehdr - > version ! = 0 | |
validate_gue_flags ( guehdr , optlen ) )
goto out ;
2014-10-04 02:48:09 +04:00
2014-11-04 20:06:53 +03:00
hdrlen = sizeof ( * guehdr ) + optlen ;
2014-11-04 20:06:57 +03:00
/* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
* this is needed if there is a remote checkcsum offload .
*/
2014-11-04 20:06:53 +03:00
skb_gro_postpull_rcsum ( skb , guehdr , hdrlen ) ;
data = & guehdr [ 1 ] ;
if ( guehdr - > flags & GUE_FLAG_PRIV ) {
2014-11-04 20:06:57 +03:00
__be32 flags = * ( __be32 * ) ( data + doffset ) ;
2014-11-04 20:06:53 +03:00
2014-11-04 20:06:57 +03:00
doffset + = GUE_LEN_PRIV ;
if ( flags & GUE_PFLAG_REMCSUM ) {
guehdr = gue_gro_remcsum ( skb , off , guehdr ,
2015-08-20 03:07:32 +03:00
data + doffset , hdrlen , & grc ,
2015-02-11 03:30:33 +03:00
! ! ( fou - > flags &
FOU_F_REMCSUM_NOPARTIAL ) ) ;
2015-08-20 03:07:32 +03:00
2014-11-04 20:06:57 +03:00
if ( ! guehdr )
goto out ;
data = & guehdr [ 1 ] ;
doffset + = GUE_PLEN_REMCSUM ;
}
2014-10-04 02:48:09 +04:00
}
2014-11-04 20:06:57 +03:00
skb_gro_pull ( skb , hdrlen ) ;
2014-10-04 02:48:09 +04:00
flush = 0 ;
for ( p = * head ; p ; p = p - > next ) {
const struct guehdr * guehdr2 ;
if ( ! NAPI_GRO_CB ( p ) - > same_flow )
continue ;
guehdr2 = ( struct guehdr * ) ( p - > data + off ) ;
/* Compare base GUE header to be equal (covers
2014-11-04 20:06:53 +03:00
* hlen , version , proto_ctype , and flags .
2014-10-04 02:48:09 +04:00
*/
if ( guehdr - > word ! = guehdr2 - > word ) {
NAPI_GRO_CB ( p ) - > same_flow = 0 ;
continue ;
}
/* Compare optional fields are the same. */
if ( guehdr - > hlen & & memcmp ( & guehdr [ 1 ] , & guehdr2 [ 1 ] ,
guehdr - > hlen < < 2 ) ) {
NAPI_GRO_CB ( p ) - > same_flow = 0 ;
continue ;
}
}
2014-11-04 20:06:53 +03:00
rcu_read_lock ( ) ;
offloads = NAPI_GRO_CB ( skb ) - > is_ipv6 ? inet6_offloads : inet_offloads ;
ops = rcu_dereference ( offloads [ guehdr - > proto_ctype ] ) ;
if ( WARN_ON ( ! ops | | ! ops - > callbacks . gro_receive ) )
goto out_unlock ;
2014-10-04 02:48:09 +04:00
pp = ops - > callbacks . gro_receive ( head , skb ) ;
out_unlock :
rcu_read_unlock ( ) ;
out :
NAPI_GRO_CB ( skb ) - > flush | = flush ;
2015-02-11 03:30:27 +03:00
skb_gro_remcsum_cleanup ( skb , & grc ) ;
2014-10-04 02:48:09 +04:00
return pp ;
}
2015-01-13 04:00:37 +03:00
static int gue_gro_complete ( struct sk_buff * skb , int nhoff ,
struct udp_offload * uoff )
2014-10-04 02:48:09 +04:00
{
const struct net_offload * * offloads ;
struct guehdr * guehdr = ( struct guehdr * ) ( skb - > data + nhoff ) ;
const struct net_offload * ops ;
unsigned int guehlen ;
u8 proto ;
int err = - ENOENT ;
2014-11-04 20:06:53 +03:00
proto = guehdr - > proto_ctype ;
2014-10-04 02:48:09 +04:00
guehlen = sizeof ( * guehdr ) + ( guehdr - > hlen < < 2 ) ;
rcu_read_lock ( ) ;
offloads = NAPI_GRO_CB ( skb ) - > is_ipv6 ? inet6_offloads : inet_offloads ;
ops = rcu_dereference ( offloads [ proto ] ) ;
if ( WARN_ON ( ! ops | | ! ops - > callbacks . gro_complete ) )
goto out_unlock ;
err = ops - > callbacks . gro_complete ( skb , nhoff + guehlen ) ;
out_unlock :
rcu_read_unlock ( ) ;
return err ;
}
2015-04-10 22:00:29 +03:00
static int fou_add_to_port_list ( struct net * net , struct fou * fou )
2014-09-17 23:25:56 +04:00
{
2015-04-10 22:00:29 +03:00
struct fou_net * fn = net_generic ( net , fou_net_id ) ;
2014-09-17 23:25:56 +04:00
struct fou * fout ;
2015-04-10 22:00:29 +03:00
mutex_lock ( & fn - > fou_lock ) ;
list_for_each_entry ( fout , & fn - > fou_list , list ) {
2014-09-17 23:25:56 +04:00
if ( fou - > port = = fout - > port ) {
2015-04-10 22:00:29 +03:00
mutex_unlock ( & fn - > fou_lock ) ;
2014-09-17 23:25:56 +04:00
return - EALREADY ;
}
}
2015-04-10 22:00:29 +03:00
list_add ( & fou - > list , & fn - > fou_list ) ;
mutex_unlock ( & fn - > fou_lock ) ;
2014-09-17 23:25:56 +04:00
return 0 ;
}
static void fou_release ( struct fou * fou )
{
struct socket * sock = fou - > sock ;
struct sock * sk = sock - > sk ;
2015-04-10 22:00:26 +03:00
if ( sk - > sk_family = = AF_INET )
udp_del_offload ( & fou - > udp_offloads ) ;
2014-09-17 23:25:56 +04:00
list_del ( & fou - > list ) ;
2015-04-10 22:00:29 +03:00
udp_tunnel_sock_release ( sock ) ;
2014-09-17 23:25:56 +04:00
kfree ( fou ) ;
}
2014-10-04 02:48:09 +04:00
static int fou_encap_init ( struct sock * sk , struct fou * fou , struct fou_cfg * cfg )
{
udp_sk ( sk ) - > encap_rcv = fou_udp_recv ;
fou - > protocol = cfg - > protocol ;
fou - > udp_offloads . callbacks . gro_receive = fou_gro_receive ;
fou - > udp_offloads . callbacks . gro_complete = fou_gro_complete ;
fou - > udp_offloads . port = cfg - > udp_config . local_udp_port ;
fou - > udp_offloads . ipproto = cfg - > protocol ;
return 0 ;
}
static int gue_encap_init ( struct sock * sk , struct fou * fou , struct fou_cfg * cfg )
{
udp_sk ( sk ) - > encap_rcv = gue_udp_recv ;
fou - > udp_offloads . callbacks . gro_receive = gue_gro_receive ;
fou - > udp_offloads . callbacks . gro_complete = gue_gro_complete ;
fou - > udp_offloads . port = cfg - > udp_config . local_udp_port ;
return 0 ;
}
2014-09-17 23:25:56 +04:00
static int fou_create ( struct net * net , struct fou_cfg * cfg ,
struct socket * * sockp )
{
struct socket * sock = NULL ;
2015-04-10 22:00:29 +03:00
struct fou * fou = NULL ;
2014-09-17 23:25:56 +04:00
struct sock * sk ;
2015-04-10 22:00:29 +03:00
int err ;
2014-09-17 23:25:56 +04:00
/* Open UDP socket */
err = udp_sock_create ( net , & cfg - > udp_config , & sock ) ;
if ( err < 0 )
goto error ;
/* Allocate FOU port structure */
fou = kzalloc ( sizeof ( * fou ) , GFP_KERNEL ) ;
if ( ! fou ) {
err = - ENOMEM ;
goto error ;
}
sk = sock - > sk ;
2015-02-11 03:30:33 +03:00
fou - > flags = cfg - > flags ;
2014-10-04 02:48:09 +04:00
fou - > port = cfg - > udp_config . local_udp_port ;
/* Initial for fou type */
switch ( cfg - > type ) {
case FOU_ENCAP_DIRECT :
err = fou_encap_init ( sk , fou , cfg ) ;
if ( err )
goto error ;
break ;
case FOU_ENCAP_GUE :
err = gue_encap_init ( sk , fou , cfg ) ;
if ( err )
goto error ;
break ;
default :
err = - EINVAL ;
goto error ;
}
2014-09-17 23:25:56 +04:00
2015-04-10 22:00:30 +03:00
fou - > type = cfg - > type ;
2014-09-17 23:25:56 +04:00
udp_sk ( sk ) - > encap_type = 1 ;
udp_encap_enable ( ) ;
sk - > sk_user_data = fou ;
fou - > sock = sock ;
2015-01-06 00:56:14 +03:00
inet_inc_convert_csum ( sk ) ;
2014-09-17 23:25:56 +04:00
sk - > sk_allocation = GFP_ATOMIC ;
2014-09-17 23:25:57 +04:00
if ( cfg - > udp_config . family = = AF_INET ) {
err = udp_add_offload ( & fou - > udp_offloads ) ;
if ( err )
goto error ;
}
2015-04-10 22:00:29 +03:00
err = fou_add_to_port_list ( net , fou ) ;
2014-09-17 23:25:56 +04:00
if ( err )
goto error ;
if ( sockp )
* sockp = sock ;
return 0 ;
error :
kfree ( fou ) ;
if ( sock )
2015-04-10 22:00:29 +03:00
udp_tunnel_sock_release ( sock ) ;
2014-09-17 23:25:56 +04:00
return err ;
}
static int fou_destroy ( struct net * net , struct fou_cfg * cfg )
{
2015-04-10 22:00:29 +03:00
struct fou_net * fn = net_generic ( net , fou_net_id ) ;
2015-04-10 22:00:28 +03:00
__be16 port = cfg - > udp_config . local_udp_port ;
2014-09-17 23:25:56 +04:00
int err = - EINVAL ;
2015-04-10 22:00:29 +03:00
struct fou * fou ;
2014-09-17 23:25:56 +04:00
2015-04-10 22:00:29 +03:00
mutex_lock ( & fn - > fou_lock ) ;
list_for_each_entry ( fou , & fn - > fou_list , list ) {
2014-09-17 23:25:56 +04:00
if ( fou - > port = = port ) {
fou_release ( fou ) ;
err = 0 ;
break ;
}
}
2015-04-10 22:00:29 +03:00
mutex_unlock ( & fn - > fou_lock ) ;
2014-09-17 23:25:56 +04:00
return err ;
}
static struct genl_family fou_nl_family = {
. id = GENL_ID_GENERATE ,
. hdrsize = 0 ,
. name = FOU_GENL_NAME ,
. version = FOU_GENL_VERSION ,
. maxattr = FOU_ATTR_MAX ,
. netnsok = true ,
} ;
static struct nla_policy fou_nl_policy [ FOU_ATTR_MAX + 1 ] = {
[ FOU_ATTR_PORT ] = { . type = NLA_U16 , } ,
[ FOU_ATTR_AF ] = { . type = NLA_U8 , } ,
[ FOU_ATTR_IPPROTO ] = { . type = NLA_U8 , } ,
2014-10-04 02:48:09 +04:00
[ FOU_ATTR_TYPE ] = { . type = NLA_U8 , } ,
2015-02-11 03:30:33 +03:00
[ FOU_ATTR_REMCSUM_NOPARTIAL ] = { . type = NLA_FLAG , } ,
2014-09-17 23:25:56 +04:00
} ;
static int parse_nl_config ( struct genl_info * info ,
struct fou_cfg * cfg )
{
memset ( cfg , 0 , sizeof ( * cfg ) ) ;
cfg - > udp_config . family = AF_INET ;
if ( info - > attrs [ FOU_ATTR_AF ] ) {
u8 family = nla_get_u8 ( info - > attrs [ FOU_ATTR_AF ] ) ;
if ( family ! = AF_INET & & family ! = AF_INET6 )
return - EINVAL ;
cfg - > udp_config . family = family ;
}
if ( info - > attrs [ FOU_ATTR_PORT ] ) {
2015-04-10 22:00:28 +03:00
__be16 port = nla_get_be16 ( info - > attrs [ FOU_ATTR_PORT ] ) ;
2014-09-17 23:25:56 +04:00
cfg - > udp_config . local_udp_port = port ;
}
if ( info - > attrs [ FOU_ATTR_IPPROTO ] )
cfg - > protocol = nla_get_u8 ( info - > attrs [ FOU_ATTR_IPPROTO ] ) ;
2014-10-04 02:48:09 +04:00
if ( info - > attrs [ FOU_ATTR_TYPE ] )
cfg - > type = nla_get_u8 ( info - > attrs [ FOU_ATTR_TYPE ] ) ;
2015-02-11 03:30:33 +03:00
if ( info - > attrs [ FOU_ATTR_REMCSUM_NOPARTIAL ] )
cfg - > flags | = FOU_F_REMCSUM_NOPARTIAL ;
2014-09-17 23:25:56 +04:00
return 0 ;
}
static int fou_nl_cmd_add_port ( struct sk_buff * skb , struct genl_info * info )
{
2015-04-10 22:00:29 +03:00
struct net * net = genl_info_net ( info ) ;
2014-09-17 23:25:56 +04:00
struct fou_cfg cfg ;
int err ;
err = parse_nl_config ( info , & cfg ) ;
if ( err )
return err ;
2015-04-10 22:00:29 +03:00
return fou_create ( net , & cfg , NULL ) ;
2014-09-17 23:25:56 +04:00
}
static int fou_nl_cmd_rm_port ( struct sk_buff * skb , struct genl_info * info )
{
2015-04-10 22:00:29 +03:00
struct net * net = genl_info_net ( info ) ;
2014-09-17 23:25:56 +04:00
struct fou_cfg cfg ;
2015-04-10 22:00:27 +03:00
int err ;
2014-09-17 23:25:56 +04:00
2015-04-10 22:00:27 +03:00
err = parse_nl_config ( info , & cfg ) ;
if ( err )
return err ;
2014-09-17 23:25:56 +04:00
2015-04-10 22:00:29 +03:00
return fou_destroy ( net , & cfg ) ;
2014-09-17 23:25:56 +04:00
}
2015-04-10 22:00:30 +03:00
static int fou_fill_info ( struct fou * fou , struct sk_buff * msg )
{
if ( nla_put_u8 ( msg , FOU_ATTR_AF , fou - > sock - > sk - > sk_family ) | |
nla_put_be16 ( msg , FOU_ATTR_PORT , fou - > port ) | |
nla_put_u8 ( msg , FOU_ATTR_IPPROTO , fou - > protocol ) | |
nla_put_u8 ( msg , FOU_ATTR_TYPE , fou - > type ) )
return - 1 ;
if ( fou - > flags & FOU_F_REMCSUM_NOPARTIAL )
if ( nla_put_flag ( msg , FOU_ATTR_REMCSUM_NOPARTIAL ) )
return - 1 ;
return 0 ;
}
static int fou_dump_info ( struct fou * fou , u32 portid , u32 seq ,
u32 flags , struct sk_buff * skb , u8 cmd )
{
void * hdr ;
hdr = genlmsg_put ( skb , portid , seq , & fou_nl_family , flags , cmd ) ;
if ( ! hdr )
return - ENOMEM ;
if ( fou_fill_info ( fou , skb ) < 0 )
goto nla_put_failure ;
genlmsg_end ( skb , hdr ) ;
return 0 ;
nla_put_failure :
genlmsg_cancel ( skb , hdr ) ;
return - EMSGSIZE ;
}
static int fou_nl_cmd_get_port ( struct sk_buff * skb , struct genl_info * info )
{
struct net * net = genl_info_net ( info ) ;
struct fou_net * fn = net_generic ( net , fou_net_id ) ;
struct sk_buff * msg ;
struct fou_cfg cfg ;
struct fou * fout ;
__be16 port ;
int ret ;
ret = parse_nl_config ( info , & cfg ) ;
if ( ret )
return ret ;
port = cfg . udp_config . local_udp_port ;
if ( port = = 0 )
return - EINVAL ;
msg = nlmsg_new ( NLMSG_DEFAULT_SIZE , GFP_KERNEL ) ;
if ( ! msg )
return - ENOMEM ;
ret = - ESRCH ;
mutex_lock ( & fn - > fou_lock ) ;
list_for_each_entry ( fout , & fn - > fou_list , list ) {
if ( port = = fout - > port ) {
ret = fou_dump_info ( fout , info - > snd_portid ,
info - > snd_seq , 0 , msg ,
info - > genlhdr - > cmd ) ;
break ;
}
}
mutex_unlock ( & fn - > fou_lock ) ;
if ( ret < 0 )
goto out_free ;
return genlmsg_reply ( msg , info ) ;
out_free :
nlmsg_free ( msg ) ;
return ret ;
}
static int fou_nl_dump ( struct sk_buff * skb , struct netlink_callback * cb )
{
struct net * net = sock_net ( skb - > sk ) ;
struct fou_net * fn = net_generic ( net , fou_net_id ) ;
struct fou * fout ;
int idx = 0 , ret ;
mutex_lock ( & fn - > fou_lock ) ;
list_for_each_entry ( fout , & fn - > fou_list , list ) {
if ( idx + + < cb - > args [ 0 ] )
continue ;
ret = fou_dump_info ( fout , NETLINK_CB ( cb - > skb ) . portid ,
cb - > nlh - > nlmsg_seq , NLM_F_MULTI ,
skb , FOU_CMD_GET ) ;
if ( ret )
2015-04-15 21:48:49 +03:00
break ;
2015-04-10 22:00:30 +03:00
}
mutex_unlock ( & fn - > fou_lock ) ;
cb - > args [ 0 ] = idx ;
return skb - > len ;
}
2014-09-17 23:25:56 +04:00
static const struct genl_ops fou_nl_ops [ ] = {
{
. cmd = FOU_CMD_ADD ,
. doit = fou_nl_cmd_add_port ,
. policy = fou_nl_policy ,
. flags = GENL_ADMIN_PERM ,
} ,
{
. cmd = FOU_CMD_DEL ,
. doit = fou_nl_cmd_rm_port ,
. policy = fou_nl_policy ,
. flags = GENL_ADMIN_PERM ,
} ,
2015-04-10 22:00:30 +03:00
{
. cmd = FOU_CMD_GET ,
. doit = fou_nl_cmd_get_port ,
. dumpit = fou_nl_dump ,
. policy = fou_nl_policy ,
} ,
2014-09-17 23:25:56 +04:00
} ;
2014-11-12 22:54:09 +03:00
size_t fou_encap_hlen ( struct ip_tunnel_encap * e )
{
return sizeof ( struct udphdr ) ;
}
EXPORT_SYMBOL ( fou_encap_hlen ) ;
size_t gue_encap_hlen ( struct ip_tunnel_encap * e )
{
size_t len ;
bool need_priv = false ;
len = sizeof ( struct udphdr ) + sizeof ( struct guehdr ) ;
if ( e - > flags & TUNNEL_ENCAP_FLAG_REMCSUM ) {
len + = GUE_PLEN_REMCSUM ;
need_priv = true ;
}
len + = need_priv ? GUE_LEN_PRIV : 0 ;
return len ;
}
EXPORT_SYMBOL ( gue_encap_hlen ) ;
2014-11-04 20:06:51 +03:00
static void fou_build_udp ( struct sk_buff * skb , struct ip_tunnel_encap * e ,
struct flowi4 * fl4 , u8 * protocol , __be16 sport )
{
struct udphdr * uh ;
skb_push ( skb , sizeof ( struct udphdr ) ) ;
skb_reset_transport_header ( skb ) ;
uh = udp_hdr ( skb ) ;
uh - > dest = e - > dport ;
uh - > source = sport ;
uh - > len = htons ( skb - > len ) ;
uh - > check = 0 ;
udp_set_csum ( ! ( e - > flags & TUNNEL_ENCAP_FLAG_CSUM ) , skb ,
fl4 - > saddr , fl4 - > daddr , skb - > len ) ;
* protocol = IPPROTO_UDP ;
}
int fou_build_header ( struct sk_buff * skb , struct ip_tunnel_encap * e ,
u8 * protocol , struct flowi4 * fl4 )
{
bool csum = ! ! ( e - > flags & TUNNEL_ENCAP_FLAG_CSUM ) ;
int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL ;
__be16 sport ;
skb = iptunnel_handle_offloads ( skb , csum , type ) ;
if ( IS_ERR ( skb ) )
return PTR_ERR ( skb ) ;
sport = e - > sport ? : udp_flow_src_port ( dev_net ( skb - > dev ) ,
skb , 0 , 0 , false ) ;
fou_build_udp ( skb , e , fl4 , protocol , sport ) ;
return 0 ;
}
EXPORT_SYMBOL ( fou_build_header ) ;
int gue_build_header ( struct sk_buff * skb , struct ip_tunnel_encap * e ,
u8 * protocol , struct flowi4 * fl4 )
{
bool csum = ! ! ( e - > flags & TUNNEL_ENCAP_FLAG_CSUM ) ;
int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL ;
struct guehdr * guehdr ;
2014-11-04 20:06:56 +03:00
size_t hdrlen , optlen = 0 ;
2014-11-04 20:06:51 +03:00
__be16 sport ;
2014-11-04 20:06:53 +03:00
void * data ;
bool need_priv = false ;
2014-11-04 20:06:56 +03:00
if ( ( e - > flags & TUNNEL_ENCAP_FLAG_REMCSUM ) & &
skb - > ip_summed = = CHECKSUM_PARTIAL ) {
csum = false ;
optlen + = GUE_PLEN_REMCSUM ;
type | = SKB_GSO_TUNNEL_REMCSUM ;
need_priv = true ;
}
2014-11-04 20:06:53 +03:00
optlen + = need_priv ? GUE_LEN_PRIV : 0 ;
2014-11-04 20:06:51 +03:00
skb = iptunnel_handle_offloads ( skb , csum , type ) ;
if ( IS_ERR ( skb ) )
return PTR_ERR ( skb ) ;
/* Get source port (based on flow hash) before skb_push */
sport = e - > sport ? : udp_flow_src_port ( dev_net ( skb - > dev ) ,
skb , 0 , 0 , false ) ;
2014-11-04 20:06:56 +03:00
hdrlen = sizeof ( struct guehdr ) + optlen ;
skb_push ( skb , hdrlen ) ;
2014-11-04 20:06:51 +03:00
guehdr = ( struct guehdr * ) skb - > data ;
2014-11-04 20:06:53 +03:00
guehdr - > control = 0 ;
2014-11-04 20:06:51 +03:00
guehdr - > version = 0 ;
2014-11-04 20:06:53 +03:00
guehdr - > hlen = optlen > > 2 ;
2014-11-04 20:06:51 +03:00
guehdr - > flags = 0 ;
2014-11-04 20:06:53 +03:00
guehdr - > proto_ctype = * protocol ;
data = & guehdr [ 1 ] ;
if ( need_priv ) {
__be32 * flags = data ;
guehdr - > flags | = GUE_FLAG_PRIV ;
* flags = 0 ;
data + = GUE_LEN_PRIV ;
2014-11-04 20:06:56 +03:00
if ( type & SKB_GSO_TUNNEL_REMCSUM ) {
u16 csum_start = skb_checksum_start_offset ( skb ) ;
__be16 * pd = data ;
if ( csum_start < hdrlen )
return - EINVAL ;
csum_start - = hdrlen ;
pd [ 0 ] = htons ( csum_start ) ;
pd [ 1 ] = htons ( csum_start + skb - > csum_offset ) ;
if ( ! skb_is_gso ( skb ) ) {
skb - > ip_summed = CHECKSUM_NONE ;
skb - > encapsulation = 0 ;
}
* flags | = GUE_PFLAG_REMCSUM ;
data + = GUE_PLEN_REMCSUM ;
}
2014-11-04 20:06:53 +03:00
}
2014-11-04 20:06:51 +03:00
fou_build_udp ( skb , e , fl4 , protocol , sport ) ;
return 0 ;
}
EXPORT_SYMBOL ( gue_build_header ) ;
2014-11-12 22:54:09 +03:00
# ifdef CONFIG_NET_FOU_IP_TUNNELS
2015-04-08 16:04:31 +03:00
static const struct ip_tunnel_encap_ops fou_iptun_ops = {
2014-11-12 22:54:09 +03:00
. encap_hlen = fou_encap_hlen ,
. build_header = fou_build_header ,
} ;
2015-04-08 16:04:31 +03:00
static const struct ip_tunnel_encap_ops gue_iptun_ops = {
2014-11-12 22:54:09 +03:00
. encap_hlen = gue_encap_hlen ,
. build_header = gue_build_header ,
} ;
static int ip_tunnel_encap_add_fou_ops ( void )
{
int ret ;
ret = ip_tunnel_encap_add_ops ( & fou_iptun_ops , TUNNEL_ENCAP_FOU ) ;
if ( ret < 0 ) {
pr_err ( " can't add fou ops \n " ) ;
return ret ;
}
ret = ip_tunnel_encap_add_ops ( & gue_iptun_ops , TUNNEL_ENCAP_GUE ) ;
if ( ret < 0 ) {
pr_err ( " can't add gue ops \n " ) ;
ip_tunnel_encap_del_ops ( & fou_iptun_ops , TUNNEL_ENCAP_FOU ) ;
return ret ;
}
return 0 ;
}
static void ip_tunnel_encap_del_fou_ops ( void )
{
ip_tunnel_encap_del_ops ( & fou_iptun_ops , TUNNEL_ENCAP_FOU ) ;
ip_tunnel_encap_del_ops ( & gue_iptun_ops , TUNNEL_ENCAP_GUE ) ;
}
# else
static int ip_tunnel_encap_add_fou_ops ( void )
{
return 0 ;
}
2014-11-13 14:48:21 +03:00
static void ip_tunnel_encap_del_fou_ops ( void )
2014-11-12 22:54:09 +03:00
{
}
# endif
2015-04-10 22:00:29 +03:00
static __net_init int fou_init_net ( struct net * net )
{
struct fou_net * fn = net_generic ( net , fou_net_id ) ;
INIT_LIST_HEAD ( & fn - > fou_list ) ;
mutex_init ( & fn - > fou_lock ) ;
return 0 ;
}
static __net_exit void fou_exit_net ( struct net * net )
{
struct fou_net * fn = net_generic ( net , fou_net_id ) ;
struct fou * fou , * next ;
/* Close all the FOU sockets */
mutex_lock ( & fn - > fou_lock ) ;
list_for_each_entry_safe ( fou , next , & fn - > fou_list , list )
fou_release ( fou ) ;
mutex_unlock ( & fn - > fou_lock ) ;
}
static struct pernet_operations fou_net_ops = {
. init = fou_init_net ,
. exit = fou_exit_net ,
. id = & fou_net_id ,
. size = sizeof ( struct fou_net ) ,
} ;
2014-09-17 23:25:56 +04:00
static int __init fou_init ( void )
{
int ret ;
2015-04-10 22:00:29 +03:00
ret = register_pernet_device ( & fou_net_ops ) ;
if ( ret )
goto exit ;
2014-09-17 23:25:56 +04:00
ret = genl_register_family_with_ops ( & fou_nl_family ,
fou_nl_ops ) ;
2014-11-12 22:54:09 +03:00
if ( ret < 0 )
2015-04-10 22:00:29 +03:00
goto unregister ;
2014-11-12 22:54:09 +03:00
ret = ip_tunnel_encap_add_fou_ops ( ) ;
2015-04-10 22:00:29 +03:00
if ( ret = = 0 )
return 0 ;
2014-11-12 22:54:09 +03:00
2015-04-10 22:00:29 +03:00
genl_unregister_family ( & fou_nl_family ) ;
unregister :
unregister_pernet_device ( & fou_net_ops ) ;
2014-11-12 22:54:09 +03:00
exit :
2014-09-17 23:25:56 +04:00
return ret ;
}
static void __exit fou_fini ( void )
{
2014-11-12 22:54:09 +03:00
ip_tunnel_encap_del_fou_ops ( ) ;
2014-09-17 23:25:56 +04:00
genl_unregister_family ( & fou_nl_family ) ;
2015-04-10 22:00:29 +03:00
unregister_pernet_device ( & fou_net_ops ) ;
2014-09-17 23:25:56 +04:00
}
module_init ( fou_init ) ;
module_exit ( fou_fini ) ;
MODULE_AUTHOR ( " Tom Herbert <therbert@google.com> " ) ;
MODULE_LICENSE ( " GPL " ) ;