2010-04-02 10:18:33 +04:00
/*
* L2TP core .
*
* Copyright ( c ) 2008 , 2009 , 2010 Katalix Systems Ltd
*
* This file contains some code of the original L2TPv2 pppol2tp
* driver , which has the following copyright :
*
* Authors : Martijn van Oosterhout < kleptog @ svana . org >
* James Chapman ( jchapman @ katalix . com )
* Contributors :
* Michal Ostrowski < mostrows @ speakeasy . net >
* Arnaldo Carvalho de Melo < acme @ xconectiva . com . br >
* David S . Miller ( davem @ redhat . com )
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2012-05-16 13:55:56 +04:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2010-04-02 10:18:33 +04:00
# include <linux/module.h>
# include <linux/string.h>
# include <linux/list.h>
2010-04-02 10:19:16 +04:00
# include <linux/rculist.h>
2010-04-02 10:18:33 +04:00
# include <linux/uaccess.h>
# include <linux/kernel.h>
# include <linux/spinlock.h>
# include <linux/kthread.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/errno.h>
# include <linux/jiffies.h>
# include <linux/netdevice.h>
# include <linux/net.h>
# include <linux/inetdevice.h>
# include <linux/skbuff.h>
# include <linux/init.h>
2010-04-02 10:19:00 +04:00
# include <linux/in.h>
2010-04-02 10:18:33 +04:00
# include <linux/ip.h>
# include <linux/udp.h>
2010-04-02 10:19:00 +04:00
# include <linux/l2tp.h>
2010-04-02 10:18:33 +04:00
# include <linux/hash.h>
# include <linux/sort.h>
# include <linux/file.h>
# include <linux/nsproxy.h>
# include <net/net_namespace.h>
# include <net/netns/generic.h>
# include <net/dst.h>
# include <net/ip.h>
# include <net/udp.h>
2010-04-02 10:19:10 +04:00
# include <net/inet_common.h>
2010-04-02 10:18:33 +04:00
# include <net/xfrm.h>
2010-04-02 10:19:00 +04:00
# include <net/protocol.h>
2012-04-27 12:24:18 +04:00
# include <net/inet6_connection_sock.h>
# include <net/inet_ecn.h>
# include <net/ip6_route.h>
2012-04-30 21:21:28 +04:00
# include <net/ip6_checksum.h>
2010-04-02 10:18:33 +04:00
# include <asm/byteorder.h>
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2010-04-02 10:18:33 +04:00
# include "l2tp_core.h"
# define L2TP_DRV_VERSION "V2.0"
/* L2TP header constants */
# define L2TP_HDRFLAG_T 0x8000
# define L2TP_HDRFLAG_L 0x4000
# define L2TP_HDRFLAG_S 0x0800
# define L2TP_HDRFLAG_O 0x0200
# define L2TP_HDRFLAG_P 0x0100
# define L2TP_HDR_VER_MASK 0x000F
# define L2TP_HDR_VER_2 0x0002
2010-04-02 10:18:49 +04:00
# define L2TP_HDR_VER_3 0x0003
2010-04-02 10:18:33 +04:00
/* L2TPv3 default L2-specific sublayer */
# define L2TP_SLFLAG_S 0x40000000
# define L2TP_SL_SEQ_MASK 0x00ffffff
# define L2TP_HDR_SIZE_SEQ 10
# define L2TP_HDR_SIZE_NOSEQ 6
/* Default trace flags */
# define L2TP_DEFAULT_DEBUG_FLAGS 0
/* Private data stored for received packets in the skb.
*/
struct l2tp_skb_cb {
2010-04-02 10:18:49 +04:00
u32 ns ;
2010-04-02 10:18:33 +04:00
u16 has_seq ;
u16 length ;
unsigned long expires ;
} ;
# define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
static atomic_t l2tp_tunnel_count ;
static atomic_t l2tp_session_count ;
/* per-net private data for this module */
static unsigned int l2tp_net_id ;
struct l2tp_net {
struct list_head l2tp_tunnel_list ;
2010-04-02 10:19:16 +04:00
spinlock_t l2tp_tunnel_list_lock ;
2010-04-02 10:18:49 +04:00
struct hlist_head l2tp_session_hlist [ L2TP_HASH_SIZE_2 ] ;
2010-04-02 10:19:16 +04:00
spinlock_t l2tp_session_hlist_lock ;
2010-04-02 10:18:33 +04:00
} ;
2010-10-21 11:50:46 +04:00
static void l2tp_session_set_header_len ( struct l2tp_session * session , int version ) ;
static void l2tp_tunnel_free ( struct l2tp_tunnel * tunnel ) ;
static void l2tp_tunnel_closeall ( struct l2tp_tunnel * tunnel ) ;
2010-04-02 10:18:33 +04:00
static inline struct l2tp_net * l2tp_pernet ( struct net * net )
{
BUG_ON ( ! net ) ;
return net_generic ( net , l2tp_net_id ) ;
}
2010-10-21 11:50:46 +04:00
/* Tunnel reference counts. Incremented per session that is added to
* the tunnel .
*/
static inline void l2tp_tunnel_inc_refcount_1 ( struct l2tp_tunnel * tunnel )
{
atomic_inc ( & tunnel - > ref_count ) ;
}
static inline void l2tp_tunnel_dec_refcount_1 ( struct l2tp_tunnel * tunnel )
{
if ( atomic_dec_and_test ( & tunnel - > ref_count ) )
l2tp_tunnel_free ( tunnel ) ;
}
# ifdef L2TP_REFCNT_DEBUG
2012-05-16 13:55:56 +04:00
# define l2tp_tunnel_inc_refcount(_t) \
do { \
pr_debug ( " l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d \n " , \
__func__ , __LINE__ , ( _t ) - > name , \
atomic_read ( & _t - > ref_count ) ) ; \
l2tp_tunnel_inc_refcount_1 ( _t ) ; \
} while ( 0 )
# define l2tp_tunnel_dec_refcount(_t)
do { \
pr_debug ( " l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d \n " , \
__func__ , __LINE__ , ( _t ) - > name , \
atomic_read ( & _t - > ref_count ) ) ; \
l2tp_tunnel_dec_refcount_1 ( _t ) ; \
} while ( 0 )
2010-10-21 11:50:46 +04:00
# else
# define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
# define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
# endif
2010-04-02 10:18:49 +04:00
/* Session hash global list for L2TPv3.
* The session_id SHOULD be random according to RFC3931 , but several
* L2TP implementations use incrementing session_ids . So we do a real
* hash on the session_id , rather than a simple bitmask .
*/
static inline struct hlist_head *
l2tp_session_id_hash_2 ( struct l2tp_net * pn , u32 session_id )
{
return & pn - > l2tp_session_hlist [ hash_32 ( session_id , L2TP_HASH_BITS_2 ) ] ;
}
/* Lookup a session by id in the global session list
*/
static struct l2tp_session * l2tp_session_find_2 ( struct net * net , u32 session_id )
{
struct l2tp_net * pn = l2tp_pernet ( net ) ;
struct hlist_head * session_list =
l2tp_session_id_hash_2 ( pn , session_id ) ;
struct l2tp_session * session ;
struct hlist_node * walk ;
2010-04-02 10:19:16 +04:00
rcu_read_lock_bh ( ) ;
hlist_for_each_entry_rcu ( session , walk , session_list , global_hlist ) {
2010-04-02 10:18:49 +04:00
if ( session - > session_id = = session_id ) {
2010-04-02 10:19:16 +04:00
rcu_read_unlock_bh ( ) ;
2010-04-02 10:18:49 +04:00
return session ;
}
}
2010-04-02 10:19:16 +04:00
rcu_read_unlock_bh ( ) ;
2010-04-02 10:18:49 +04:00
return NULL ;
}
2010-04-02 10:18:33 +04:00
/* Session hash list.
* The session_id SHOULD be random according to RFC2661 , but several
* L2TP implementations ( Cisco and Microsoft ) use incrementing
* session_ids . So we do a real hash on the session_id , rather than a
* simple bitmask .
*/
static inline struct hlist_head *
l2tp_session_id_hash ( struct l2tp_tunnel * tunnel , u32 session_id )
{
return & tunnel - > session_hlist [ hash_32 ( session_id , L2TP_HASH_BITS ) ] ;
}
/* Lookup a session by id
*/
2010-04-02 10:18:49 +04:00
struct l2tp_session * l2tp_session_find ( struct net * net , struct l2tp_tunnel * tunnel , u32 session_id )
2010-04-02 10:18:33 +04:00
{
2010-04-02 10:18:49 +04:00
struct hlist_head * session_list ;
2010-04-02 10:18:33 +04:00
struct l2tp_session * session ;
struct hlist_node * walk ;
2010-04-02 10:18:49 +04:00
/* In L2TPv3, session_ids are unique over all tunnels and we
* sometimes need to look them up before we know the
* tunnel .
*/
if ( tunnel = = NULL )
return l2tp_session_find_2 ( net , session_id ) ;
session_list = l2tp_session_id_hash ( tunnel , session_id ) ;
2010-04-02 10:18:33 +04:00
read_lock_bh ( & tunnel - > hlist_lock ) ;
hlist_for_each_entry ( session , walk , session_list , hlist ) {
if ( session - > session_id = = session_id ) {
read_unlock_bh ( & tunnel - > hlist_lock ) ;
return session ;
}
}
read_unlock_bh ( & tunnel - > hlist_lock ) ;
return NULL ;
}
EXPORT_SYMBOL_GPL ( l2tp_session_find ) ;
struct l2tp_session * l2tp_session_find_nth ( struct l2tp_tunnel * tunnel , int nth )
{
int hash ;
struct hlist_node * walk ;
struct l2tp_session * session ;
int count = 0 ;
read_lock_bh ( & tunnel - > hlist_lock ) ;
for ( hash = 0 ; hash < L2TP_HASH_SIZE ; hash + + ) {
hlist_for_each_entry ( session , walk , & tunnel - > session_hlist [ hash ] , hlist ) {
if ( + + count > nth ) {
read_unlock_bh ( & tunnel - > hlist_lock ) ;
return session ;
}
}
}
read_unlock_bh ( & tunnel - > hlist_lock ) ;
return NULL ;
}
EXPORT_SYMBOL_GPL ( l2tp_session_find_nth ) ;
2010-04-02 10:19:10 +04:00
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces .
*/
struct l2tp_session * l2tp_session_find_by_ifname ( struct net * net , char * ifname )
{
struct l2tp_net * pn = l2tp_pernet ( net ) ;
int hash ;
struct hlist_node * walk ;
struct l2tp_session * session ;
2010-04-02 10:19:16 +04:00
rcu_read_lock_bh ( ) ;
2010-04-02 10:19:10 +04:00
for ( hash = 0 ; hash < L2TP_HASH_SIZE_2 ; hash + + ) {
2010-04-02 10:19:16 +04:00
hlist_for_each_entry_rcu ( session , walk , & pn - > l2tp_session_hlist [ hash ] , global_hlist ) {
2010-04-02 10:19:10 +04:00
if ( ! strcmp ( session - > ifname , ifname ) ) {
2010-04-02 10:19:16 +04:00
rcu_read_unlock_bh ( ) ;
2010-04-02 10:19:10 +04:00
return session ;
}
}
}
2010-04-02 10:19:16 +04:00
rcu_read_unlock_bh ( ) ;
2010-04-02 10:19:10 +04:00
return NULL ;
}
EXPORT_SYMBOL_GPL ( l2tp_session_find_by_ifname ) ;
2010-04-02 10:18:33 +04:00
/* Lookup a tunnel by id
*/
struct l2tp_tunnel * l2tp_tunnel_find ( struct net * net , u32 tunnel_id )
{
struct l2tp_tunnel * tunnel ;
struct l2tp_net * pn = l2tp_pernet ( net ) ;
2010-04-02 10:19:16 +04:00
rcu_read_lock_bh ( ) ;
list_for_each_entry_rcu ( tunnel , & pn - > l2tp_tunnel_list , list ) {
2010-04-02 10:18:33 +04:00
if ( tunnel - > tunnel_id = = tunnel_id ) {
2010-04-02 10:19:16 +04:00
rcu_read_unlock_bh ( ) ;
2010-04-02 10:18:33 +04:00
return tunnel ;
}
}
2010-04-02 10:19:16 +04:00
rcu_read_unlock_bh ( ) ;
2010-04-02 10:18:33 +04:00
return NULL ;
}
EXPORT_SYMBOL_GPL ( l2tp_tunnel_find ) ;
struct l2tp_tunnel * l2tp_tunnel_find_nth ( struct net * net , int nth )
{
struct l2tp_net * pn = l2tp_pernet ( net ) ;
struct l2tp_tunnel * tunnel ;
int count = 0 ;
2010-04-02 10:19:16 +04:00
rcu_read_lock_bh ( ) ;
list_for_each_entry_rcu ( tunnel , & pn - > l2tp_tunnel_list , list ) {
2010-04-02 10:18:33 +04:00
if ( + + count > nth ) {
2010-04-02 10:19:16 +04:00
rcu_read_unlock_bh ( ) ;
2010-04-02 10:18:33 +04:00
return tunnel ;
}
}
2010-04-02 10:19:16 +04:00
rcu_read_unlock_bh ( ) ;
2010-04-02 10:18:33 +04:00
return NULL ;
}
EXPORT_SYMBOL_GPL ( l2tp_tunnel_find_nth ) ;
/*****************************************************************************
* Receive data handling
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Queue a skb in order. We come here only if the skb has an L2TP sequence
* number .
*/
static void l2tp_recv_queue_skb ( struct l2tp_session * session , struct sk_buff * skb )
{
struct sk_buff * skbp ;
struct sk_buff * tmp ;
2010-04-02 10:18:49 +04:00
u32 ns = L2TP_SKB_CB ( skb ) - > ns ;
2012-04-30 01:48:46 +04:00
struct l2tp_stats * sstats ;
2010-04-02 10:18:33 +04:00
spin_lock_bh ( & session - > reorder_q . lock ) ;
2012-04-30 01:48:46 +04:00
sstats = & session - > stats ;
2010-04-02 10:18:33 +04:00
skb_queue_walk_safe ( & session - > reorder_q , skbp , tmp ) {
if ( L2TP_SKB_CB ( skbp ) - > ns > ns ) {
__skb_queue_before ( & session - > reorder_q , skbp , skb ) ;
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ ,
" %s: pkt %hu, inserted before %hu, reorder_q len=%d \n " ,
session - > name , ns , L2TP_SKB_CB ( skbp ) - > ns ,
skb_queue_len ( & session - > reorder_q ) ) ;
2012-04-30 01:48:46 +04:00
u64_stats_update_begin ( & sstats - > syncp ) ;
sstats - > rx_oos_packets + + ;
u64_stats_update_end ( & sstats - > syncp ) ;
2010-04-02 10:18:33 +04:00
goto out ;
}
}
__skb_queue_tail ( & session - > reorder_q , skb ) ;
out :
spin_unlock_bh ( & session - > reorder_q . lock ) ;
}
/* Dequeue a single skb.
*/
static void l2tp_recv_dequeue_skb ( struct l2tp_session * session , struct sk_buff * skb )
{
struct l2tp_tunnel * tunnel = session - > tunnel ;
int length = L2TP_SKB_CB ( skb ) - > length ;
2012-04-30 01:48:46 +04:00
struct l2tp_stats * tstats , * sstats ;
2010-04-02 10:18:33 +04:00
/* We're about to requeue the skb, so return resources
* to its current owner ( a socket receive buffer ) .
*/
skb_orphan ( skb ) ;
2012-04-30 01:48:46 +04:00
tstats = & tunnel - > stats ;
u64_stats_update_begin ( & tstats - > syncp ) ;
sstats = & session - > stats ;
u64_stats_update_begin ( & sstats - > syncp ) ;
tstats - > rx_packets + + ;
tstats - > rx_bytes + = length ;
sstats - > rx_packets + + ;
sstats - > rx_bytes + = length ;
u64_stats_update_end ( & tstats - > syncp ) ;
u64_stats_update_end ( & sstats - > syncp ) ;
2010-04-02 10:18:33 +04:00
if ( L2TP_SKB_CB ( skb ) - > has_seq ) {
/* Bump our Nr */
session - > nr + + ;
2010-04-02 10:18:49 +04:00
if ( tunnel - > version = = L2TP_HDR_VER_2 )
session - > nr & = 0xffff ;
else
session - > nr & = 0xffffff ;
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ , " %s: updated nr to %hu \n " ,
session - > name , session - > nr ) ;
2010-04-02 10:18:33 +04:00
}
/* call private receive handler */
if ( session - > recv_skb ! = NULL )
( * session - > recv_skb ) ( session , skb , L2TP_SKB_CB ( skb ) - > length ) ;
else
kfree_skb ( skb ) ;
if ( session - > deref )
( * session - > deref ) ( session ) ;
}
/* Dequeue skbs from the session's reorder_q, subject to packet order.
* Skbs that have been in the queue for too long are simply discarded .
*/
static void l2tp_recv_dequeue ( struct l2tp_session * session )
{
struct sk_buff * skb ;
struct sk_buff * tmp ;
2012-04-30 01:48:46 +04:00
struct l2tp_stats * sstats ;
2010-04-02 10:18:33 +04:00
/* If the pkt at the head of the queue has the nr that we
* expect to send up next , dequeue it and any other
* in - sequence packets behind it .
*/
2011-11-03 02:47:44 +04:00
start :
2010-04-02 10:18:33 +04:00
spin_lock_bh ( & session - > reorder_q . lock ) ;
2012-04-30 01:48:46 +04:00
sstats = & session - > stats ;
2010-04-02 10:18:33 +04:00
skb_queue_walk_safe ( & session - > reorder_q , skb , tmp ) {
if ( time_after ( jiffies , L2TP_SKB_CB ( skb ) - > expires ) ) {
2012-04-30 01:48:46 +04:00
u64_stats_update_begin ( & sstats - > syncp ) ;
sstats - > rx_seq_discards + + ;
sstats - > rx_errors + + ;
u64_stats_update_end ( & sstats - > syncp ) ;
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ ,
" %s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d \n " ,
session - > name , L2TP_SKB_CB ( skb ) - > ns ,
L2TP_SKB_CB ( skb ) - > length , session - > nr ,
skb_queue_len ( & session - > reorder_q ) ) ;
2012-05-10 03:43:08 +04:00
session - > reorder_skip = 1 ;
2010-04-02 10:18:33 +04:00
__skb_unlink ( skb , & session - > reorder_q ) ;
kfree_skb ( skb ) ;
if ( session - > deref )
( * session - > deref ) ( session ) ;
continue ;
}
if ( L2TP_SKB_CB ( skb ) - > has_seq ) {
2012-05-10 03:43:08 +04:00
if ( session - > reorder_skip ) {
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ ,
" %s: advancing nr to next pkt: %u -> %u " ,
session - > name , session - > nr ,
L2TP_SKB_CB ( skb ) - > ns ) ;
2012-05-10 03:43:08 +04:00
session - > reorder_skip = 0 ;
session - > nr = L2TP_SKB_CB ( skb ) - > ns ;
}
2010-04-02 10:18:33 +04:00
if ( L2TP_SKB_CB ( skb ) - > ns ! = session - > nr ) {
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ ,
" %s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d \n " ,
session - > name , L2TP_SKB_CB ( skb ) - > ns ,
L2TP_SKB_CB ( skb ) - > length , session - > nr ,
skb_queue_len ( & session - > reorder_q ) ) ;
2010-04-02 10:18:33 +04:00
goto out ;
}
}
__skb_unlink ( skb , & session - > reorder_q ) ;
/* Process the skb. We release the queue lock while we
* do so to let other contexts process the queue .
*/
spin_unlock_bh ( & session - > reorder_q . lock ) ;
l2tp_recv_dequeue_skb ( session , skb ) ;
2011-11-03 02:47:44 +04:00
goto start ;
2010-04-02 10:18:33 +04:00
}
out :
spin_unlock_bh ( & session - > reorder_q . lock ) ;
}
static inline int l2tp_verify_udp_checksum ( struct sock * sk ,
struct sk_buff * skb )
{
struct udphdr * uh = udp_hdr ( skb ) ;
u16 ulen = ntohs ( uh - > len ) ;
__wsum psum ;
2012-04-27 12:24:18 +04:00
if ( sk - > sk_no_check | | skb_csum_unnecessary ( skb ) )
2010-04-02 10:18:33 +04:00
return 0 ;
2012-04-27 12:24:18 +04:00
# if IS_ENABLED(CONFIG_IPV6)
if ( sk - > sk_family = = PF_INET6 ) {
if ( ! uh - > check ) {
LIMIT_NETDEBUG ( KERN_INFO " L2TP: IPv6: checksum is 0 \n " ) ;
return 1 ;
}
if ( ( skb - > ip_summed = = CHECKSUM_COMPLETE ) & &
! csum_ipv6_magic ( & ipv6_hdr ( skb ) - > saddr ,
& ipv6_hdr ( skb ) - > daddr , ulen ,
IPPROTO_UDP , skb - > csum ) ) {
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
return 0 ;
}
skb - > csum = ~ csum_unfold ( csum_ipv6_magic ( & ipv6_hdr ( skb ) - > saddr ,
& ipv6_hdr ( skb ) - > daddr ,
skb - > len , IPPROTO_UDP ,
0 ) ) ;
} else
# endif
{
struct inet_sock * inet ;
if ( ! uh - > check )
return 0 ;
inet = inet_sk ( sk ) ;
psum = csum_tcpudp_nofold ( inet - > inet_saddr , inet - > inet_daddr ,
ulen , IPPROTO_UDP , 0 ) ;
if ( ( skb - > ip_summed = = CHECKSUM_COMPLETE ) & &
! csum_fold ( csum_add ( psum , skb - > csum ) ) )
return 0 ;
skb - > csum = psum ;
}
2010-04-02 10:18:33 +04:00
return __skb_checksum_complete ( skb ) ;
}
2010-04-02 10:18:49 +04:00
/* Do receive processing of L2TP data frames. We handle both L2TPv2
* and L2TPv3 data frames here .
*
* L2TPv2 Data Message Header
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | T | L | x | x | S | x | O | P | x | x | x | x | Ver | Length ( opt ) |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Tunnel ID | Session ID |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Ns ( opt ) | Nr ( opt ) |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Offset Size ( opt ) | Offset pad . . . ( opt )
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
*
* Data frames are marked by T = 0. All other fields are the same as
* those in L2TP control frames .
*
* L2TPv3 Data Message Header
*
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | L2TP Session Header |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | L2 - Specific Sublayer |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Tunnel Payload . . .
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
*
* L2TPv3 Session Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Session ID |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Cookie ( optional , maximum 64 bits ) . . .
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
*
* L2TPv3 L2 - Specific Sublayer Format
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | x | S | x | x | x | x | x | x | Sequence Number |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
*
* Cookie value , sublayer format and offset ( pad ) are negotiated with
* the peer when the session is set up . Unlike L2TPv2 , we do not need
* to parse the packet header to determine if optional fields are
* present .
*
* Caller must already have parsed the frame and determined that it is
* a data ( not control ) frame before coming here . Fields up to the
* session - id have already been parsed and ptr points to the data
* after the session - id .
2010-04-02 10:18:33 +04:00
*/
2010-04-02 10:18:49 +04:00
void l2tp_recv_common ( struct l2tp_session * session , struct sk_buff * skb ,
unsigned char * ptr , unsigned char * optr , u16 hdrflags ,
int length , int ( * payload_hook ) ( struct sk_buff * skb ) )
2010-04-02 10:18:33 +04:00
{
2010-04-02 10:18:49 +04:00
struct l2tp_tunnel * tunnel = session - > tunnel ;
2010-04-02 10:18:33 +04:00
int offset ;
2010-04-02 10:18:49 +04:00
u32 ns , nr ;
2012-04-30 01:48:46 +04:00
struct l2tp_stats * sstats = & session - > stats ;
2010-04-02 10:18:33 +04:00
/* The ref count is increased since we now hold a pointer to
* the session . Take care to decrement the refcnt when exiting
* this function from now on . . .
*/
l2tp_session_inc_refcount ( session ) ;
if ( session - > ref )
( * session - > ref ) ( session ) ;
2010-04-02 10:18:49 +04:00
/* Parse and check optional cookie */
if ( session - > peer_cookie_len > 0 ) {
if ( memcmp ( ptr , & session - > peer_cookie [ 0 ] , session - > peer_cookie_len ) ) {
2012-05-16 13:55:56 +04:00
l2tp_info ( tunnel , L2TP_MSG_DATA ,
" %s: cookie mismatch (%u/%u). Discarding. \n " ,
tunnel - > name , tunnel - > tunnel_id ,
session - > session_id ) ;
2012-04-30 01:48:46 +04:00
u64_stats_update_begin ( & sstats - > syncp ) ;
sstats - > rx_cookie_discards + + ;
u64_stats_update_end ( & sstats - > syncp ) ;
2010-04-02 10:18:49 +04:00
goto discard ;
}
ptr + = session - > peer_cookie_len ;
}
2010-04-02 10:18:33 +04:00
/* Handle the optional sequence numbers. Sequence numbers are
* in different places for L2TPv2 and L2TPv3 .
*
* If we are the LAC , enable / disable sequence numbers under
* the control of the LNS . If no sequence numbers present but
* we were expecting them , discard frame .
*/
ns = nr = 0 ;
L2TP_SKB_CB ( skb ) - > has_seq = 0 ;
2010-04-02 10:18:49 +04:00
if ( tunnel - > version = = L2TP_HDR_VER_2 ) {
if ( hdrflags & L2TP_HDRFLAG_S ) {
ns = ntohs ( * ( __be16 * ) ptr ) ;
ptr + = 2 ;
nr = ntohs ( * ( __be16 * ) ptr ) ;
ptr + = 2 ;
2010-04-02 10:18:33 +04:00
2010-04-02 10:18:49 +04:00
/* Store L2TP info in the skb */
L2TP_SKB_CB ( skb ) - > ns = ns ;
L2TP_SKB_CB ( skb ) - > has_seq = 1 ;
2010-04-02 10:18:33 +04:00
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ ,
" %s: recv data ns=%u, nr=%u, session nr=%u \n " ,
session - > name , ns , nr , session - > nr ) ;
2010-04-02 10:18:49 +04:00
}
} else if ( session - > l2specific_type = = L2TP_L2SPECTYPE_DEFAULT ) {
u32 l2h = ntohl ( * ( __be32 * ) ptr ) ;
if ( l2h & 0x40000000 ) {
ns = l2h & 0x00ffffff ;
/* Store L2TP info in the skb */
L2TP_SKB_CB ( skb ) - > ns = ns ;
L2TP_SKB_CB ( skb ) - > has_seq = 1 ;
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ ,
" %s: recv data ns=%u, session nr=%u \n " ,
session - > name , ns , session - > nr ) ;
2010-04-02 10:18:49 +04:00
}
2010-04-02 10:18:33 +04:00
}
2010-04-02 10:18:49 +04:00
/* Advance past L2-specific header, if present */
ptr + = session - > l2specific_len ;
2010-04-02 10:18:33 +04:00
if ( L2TP_SKB_CB ( skb ) - > has_seq ) {
/* Received a packet with sequence numbers. If we're the LNS,
* check if we sre sending sequence numbers and if not ,
* configure it so .
*/
if ( ( ! session - > lns_mode ) & & ( ! session - > send_seq ) ) {
2012-05-16 13:55:56 +04:00
l2tp_info ( session , L2TP_MSG_SEQ ,
" %s: requested to enable seq numbers by LNS \n " ,
session - > name ) ;
2010-04-02 10:18:33 +04:00
session - > send_seq = - 1 ;
2010-04-02 10:18:49 +04:00
l2tp_session_set_header_len ( session , tunnel - > version ) ;
2010-04-02 10:18:33 +04:00
}
} else {
/* No sequence numbers.
* If user has configured mandatory sequence numbers , discard .
*/
if ( session - > recv_seq ) {
2012-05-16 13:55:56 +04:00
l2tp_warn ( session , L2TP_MSG_SEQ ,
" %s: recv data has no seq numbers when required. Discarding. \n " ,
session - > name ) ;
2012-04-30 01:48:46 +04:00
u64_stats_update_begin ( & sstats - > syncp ) ;
sstats - > rx_seq_discards + + ;
u64_stats_update_end ( & sstats - > syncp ) ;
2010-04-02 10:18:33 +04:00
goto discard ;
}
/* If we're the LAC and we're sending sequence numbers, the
* LNS has requested that we no longer send sequence numbers .
* If we ' re the LNS and we ' re sending sequence numbers , the
* LAC is broken . Discard the frame .
*/
if ( ( ! session - > lns_mode ) & & ( session - > send_seq ) ) {
2012-05-16 13:55:56 +04:00
l2tp_info ( session , L2TP_MSG_SEQ ,
" %s: requested to disable seq numbers by LNS \n " ,
session - > name ) ;
2010-04-02 10:18:33 +04:00
session - > send_seq = 0 ;
2010-04-02 10:18:49 +04:00
l2tp_session_set_header_len ( session , tunnel - > version ) ;
2010-04-02 10:18:33 +04:00
} else if ( session - > send_seq ) {
2012-05-16 13:55:56 +04:00
l2tp_warn ( session , L2TP_MSG_SEQ ,
" %s: recv data has no seq numbers when required. Discarding. \n " ,
session - > name ) ;
2012-04-30 01:48:46 +04:00
u64_stats_update_begin ( & sstats - > syncp ) ;
sstats - > rx_seq_discards + + ;
u64_stats_update_end ( & sstats - > syncp ) ;
2010-04-02 10:18:33 +04:00
goto discard ;
}
}
2010-04-02 10:18:49 +04:00
/* Session data offset is handled differently for L2TPv2 and
* L2TPv3 . For L2TPv2 , there is an optional 16 - bit value in
* the header . For L2TPv3 , the offset is negotiated using AVPs
* in the session setup control protocol .
*/
if ( tunnel - > version = = L2TP_HDR_VER_2 ) {
/* If offset bit set, skip it. */
if ( hdrflags & L2TP_HDRFLAG_O ) {
offset = ntohs ( * ( __be16 * ) ptr ) ;
ptr + = 2 + offset ;
}
} else
ptr + = session - > offset ;
2010-04-02 10:18:33 +04:00
offset = ptr - optr ;
if ( ! pskb_may_pull ( skb , offset ) )
goto discard ;
__skb_pull ( skb , offset ) ;
/* If caller wants to process the payload before we queue the
* packet , do so now .
*/
if ( payload_hook )
if ( ( * payload_hook ) ( skb ) )
goto discard ;
/* Prepare skb for adding to the session's reorder_q. Hold
* packets for max reorder_timeout or 1 second if not
* reordering .
*/
L2TP_SKB_CB ( skb ) - > length = length ;
L2TP_SKB_CB ( skb ) - > expires = jiffies +
( session - > reorder_timeout ? session - > reorder_timeout : HZ ) ;
/* Add packet to the session's receive queue. Reordering is done here, if
* enabled . Saved L2TP protocol info is stored in skb - > sb [ ] .
*/
if ( L2TP_SKB_CB ( skb ) - > has_seq ) {
if ( session - > reorder_timeout ! = 0 ) {
/* Packet reordering enabled. Add skb to session's
* reorder queue , in order of ns .
*/
l2tp_recv_queue_skb ( session , skb ) ;
} else {
/* Packet reordering disabled. Discard out-of-sequence
* packets
*/
if ( L2TP_SKB_CB ( skb ) - > ns ! = session - > nr ) {
2012-04-30 01:48:46 +04:00
u64_stats_update_begin ( & sstats - > syncp ) ;
sstats - > rx_seq_discards + + ;
u64_stats_update_end ( & sstats - > syncp ) ;
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ ,
" %s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d \n " ,
session - > name , L2TP_SKB_CB ( skb ) - > ns ,
L2TP_SKB_CB ( skb ) - > length , session - > nr ,
skb_queue_len ( & session - > reorder_q ) ) ;
2010-04-02 10:18:33 +04:00
goto discard ;
}
skb_queue_tail ( & session - > reorder_q , skb ) ;
}
} else {
/* No sequence numbers. Add the skb to the tail of the
* reorder queue . This ensures that it will be
* delivered after all previous sequenced skbs .
*/
skb_queue_tail ( & session - > reorder_q , skb ) ;
}
/* Try to dequeue as many skbs from reorder_q as we can. */
l2tp_recv_dequeue ( session ) ;
l2tp_session_dec_refcount ( session ) ;
2010-04-02 10:18:49 +04:00
return ;
2010-04-02 10:18:33 +04:00
discard :
2012-04-30 01:48:46 +04:00
u64_stats_update_begin ( & sstats - > syncp ) ;
sstats - > rx_errors + + ;
u64_stats_update_end ( & sstats - > syncp ) ;
2010-04-02 10:18:33 +04:00
kfree_skb ( skb ) ;
if ( session - > deref )
( * session - > deref ) ( session ) ;
l2tp_session_dec_refcount ( session ) ;
2010-04-02 10:18:49 +04:00
}
EXPORT_SYMBOL ( l2tp_recv_common ) ;
/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
* here . The skb is not on a list when we get here .
* Returns 0 if the packet was a data packet and was successfully passed on .
* Returns 1 if the packet was not a good data packet and could not be
* forwarded . All such packets are passed up to userspace to deal with .
*/
2010-10-21 11:50:46 +04:00
static int l2tp_udp_recv_core ( struct l2tp_tunnel * tunnel , struct sk_buff * skb ,
int ( * payload_hook ) ( struct sk_buff * skb ) )
2010-04-02 10:18:49 +04:00
{
struct l2tp_session * session = NULL ;
unsigned char * ptr , * optr ;
u16 hdrflags ;
u32 tunnel_id , session_id ;
u16 version ;
int length ;
2012-04-30 01:48:46 +04:00
struct l2tp_stats * tstats ;
2010-04-02 10:18:49 +04:00
if ( tunnel - > sock & & l2tp_verify_udp_checksum ( tunnel - > sock , skb ) )
goto discard_bad_csum ;
/* UDP always verifies the packet length. */
__skb_pull ( skb , sizeof ( struct udphdr ) ) ;
/* Short packet? */
if ( ! pskb_may_pull ( skb , L2TP_HDR_SIZE_SEQ ) ) {
2012-05-16 13:55:56 +04:00
l2tp_info ( tunnel , L2TP_MSG_DATA ,
" %s: recv short packet (len=%d) \n " ,
tunnel - > name , skb - > len ) ;
2010-04-02 10:18:49 +04:00
goto error ;
}
/* Trace packet contents, if enabled */
if ( tunnel - > debug & L2TP_MSG_DATA ) {
length = min ( 32u , skb - > len ) ;
if ( ! pskb_may_pull ( skb , length ) )
goto error ;
2012-05-16 13:55:56 +04:00
pr_debug ( " %s: recv \n " , tunnel - > name ) ;
print_hex_dump_bytes ( " " , DUMP_PREFIX_OFFSET , skb - > data , length ) ;
2010-04-02 10:18:49 +04:00
}
2011-11-08 22:59:44 +04:00
/* Point to L2TP header */
optr = ptr = skb - > data ;
2010-04-02 10:18:49 +04:00
/* Get L2TP header flags */
hdrflags = ntohs ( * ( __be16 * ) ptr ) ;
/* Check protocol version */
version = hdrflags & L2TP_HDR_VER_MASK ;
if ( version ! = tunnel - > version ) {
2012-05-16 13:55:56 +04:00
l2tp_info ( tunnel , L2TP_MSG_DATA ,
" %s: recv protocol version mismatch: got %d expected %d \n " ,
tunnel - > name , version , tunnel - > version ) ;
2010-04-02 10:18:49 +04:00
goto error ;
}
/* Get length of L2TP packet */
length = skb - > len ;
/* If type is control packet, it is handled by userspace. */
if ( hdrflags & L2TP_HDRFLAG_T ) {
2012-05-16 13:55:56 +04:00
l2tp_dbg ( tunnel , L2TP_MSG_DATA ,
" %s: recv control packet, len=%d \n " ,
tunnel - > name , length ) ;
2010-04-02 10:18:49 +04:00
goto error ;
}
/* Skip flags */
ptr + = 2 ;
if ( tunnel - > version = = L2TP_HDR_VER_2 ) {
/* If length is present, skip it */
if ( hdrflags & L2TP_HDRFLAG_L )
ptr + = 2 ;
/* Extract tunnel and session ID */
tunnel_id = ntohs ( * ( __be16 * ) ptr ) ;
ptr + = 2 ;
session_id = ntohs ( * ( __be16 * ) ptr ) ;
ptr + = 2 ;
} else {
ptr + = 2 ; /* skip reserved bits */
tunnel_id = tunnel - > tunnel_id ;
session_id = ntohl ( * ( __be32 * ) ptr ) ;
ptr + = 4 ;
}
/* Find the session context */
session = l2tp_session_find ( tunnel - > l2tp_net , tunnel , session_id ) ;
2010-04-02 10:19:10 +04:00
if ( ! session | | ! session - > recv_skb ) {
2010-04-02 10:18:49 +04:00
/* Not found? Pass to userspace to deal with */
2012-05-16 13:55:56 +04:00
l2tp_info ( tunnel , L2TP_MSG_DATA ,
" %s: no session found (%u/%u). Passing up. \n " ,
tunnel - > name , tunnel_id , session_id ) ;
2010-04-02 10:18:49 +04:00
goto error ;
}
l2tp_recv_common ( session , skb , ptr , optr , hdrflags , length , payload_hook ) ;
2010-04-02 10:18:33 +04:00
return 0 ;
discard_bad_csum :
LIMIT_NETDEBUG ( " %s: UDP: bad checksum \n " , tunnel - > name ) ;
UDP_INC_STATS_USER ( tunnel - > l2tp_net , UDP_MIB_INERRORS , 0 ) ;
2012-04-30 01:48:46 +04:00
tstats = & tunnel - > stats ;
u64_stats_update_begin ( & tstats - > syncp ) ;
tstats - > rx_errors + + ;
u64_stats_update_end ( & tstats - > syncp ) ;
2010-04-02 10:18:33 +04:00
kfree_skb ( skb ) ;
return 0 ;
error :
/* Put UDP header back */
__skb_push ( skb , sizeof ( struct udphdr ) ) ;
return 1 ;
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
* Return codes :
* 0 : success .
* < 0 : error
* > 0 : skb should be passed up to userspace as UDP .
*/
int l2tp_udp_encap_recv ( struct sock * sk , struct sk_buff * skb )
{
struct l2tp_tunnel * tunnel ;
tunnel = l2tp_sock_to_tunnel ( sk ) ;
if ( tunnel = = NULL )
goto pass_up ;
2012-05-16 13:55:56 +04:00
l2tp_dbg ( tunnel , L2TP_MSG_DATA , " %s: received %d bytes \n " ,
tunnel - > name , skb - > len ) ;
2010-04-02 10:18:33 +04:00
if ( l2tp_udp_recv_core ( tunnel , skb , tunnel - > recv_payload_hook ) )
goto pass_up_put ;
sock_put ( sk ) ;
return 0 ;
pass_up_put :
sock_put ( sk ) ;
pass_up :
return 1 ;
}
EXPORT_SYMBOL_GPL ( l2tp_udp_encap_recv ) ;
/************************************************************************
* Transmit handling
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Build an L2TP header for the session into the buffer provided.
*/
2010-04-02 10:18:49 +04:00
static int l2tp_build_l2tpv2_header ( struct l2tp_session * session , void * buf )
2010-04-02 10:18:33 +04:00
{
2010-04-02 10:18:49 +04:00
struct l2tp_tunnel * tunnel = session - > tunnel ;
2010-04-02 10:18:33 +04:00
__be16 * bufp = buf ;
2010-04-02 10:18:49 +04:00
__be16 * optr = buf ;
2010-04-02 10:18:33 +04:00
u16 flags = L2TP_HDR_VER_2 ;
u32 tunnel_id = tunnel - > peer_tunnel_id ;
u32 session_id = session - > peer_session_id ;
if ( session - > send_seq )
flags | = L2TP_HDRFLAG_S ;
/* Setup L2TP header. */
* bufp + + = htons ( flags ) ;
* bufp + + = htons ( tunnel_id ) ;
* bufp + + = htons ( session_id ) ;
if ( session - > send_seq ) {
* bufp + + = htons ( session - > ns ) ;
* bufp + + = 0 ;
session - > ns + + ;
2010-04-02 10:18:49 +04:00
session - > ns & = 0xffff ;
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ , " %s: updated ns to %u \n " ,
session - > name , session - > ns ) ;
2010-04-02 10:18:33 +04:00
}
2010-04-02 10:18:49 +04:00
return bufp - optr ;
2010-04-02 10:18:33 +04:00
}
2010-04-02 10:18:49 +04:00
static int l2tp_build_l2tpv3_header ( struct l2tp_session * session , void * buf )
2010-04-02 10:18:33 +04:00
{
2010-04-02 10:19:00 +04:00
struct l2tp_tunnel * tunnel = session - > tunnel ;
2010-04-02 10:18:49 +04:00
char * bufp = buf ;
char * optr = bufp ;
2010-04-02 10:19:00 +04:00
/* Setup L2TP header. The header differs slightly for UDP and
* IP encapsulations . For UDP , there is 4 bytes of flags .
*/
if ( tunnel - > encap = = L2TP_ENCAPTYPE_UDP ) {
u16 flags = L2TP_HDR_VER_3 ;
* ( ( __be16 * ) bufp ) = htons ( flags ) ;
bufp + = 2 ;
* ( ( __be16 * ) bufp ) = 0 ;
bufp + = 2 ;
}
2010-04-02 10:18:49 +04:00
* ( ( __be32 * ) bufp ) = htonl ( session - > peer_session_id ) ;
bufp + = 4 ;
if ( session - > cookie_len ) {
memcpy ( bufp , & session - > cookie [ 0 ] , session - > cookie_len ) ;
bufp + = session - > cookie_len ;
}
if ( session - > l2specific_len ) {
if ( session - > l2specific_type = = L2TP_L2SPECTYPE_DEFAULT ) {
u32 l2h = 0 ;
if ( session - > send_seq ) {
l2h = 0x40000000 | session - > ns ;
session - > ns + + ;
session - > ns & = 0xffffff ;
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_SEQ ,
" %s: updated ns to %u \n " ,
session - > name , session - > ns ) ;
2010-04-02 10:18:49 +04:00
}
* ( ( __be32 * ) bufp ) = htonl ( l2h ) ;
}
bufp + = session - > l2specific_len ;
}
if ( session - > offset )
bufp + = session - > offset ;
2010-04-02 10:18:33 +04:00
2010-04-02 10:18:49 +04:00
return bufp - optr ;
2010-04-02 10:18:33 +04:00
}
2010-10-21 11:50:46 +04:00
static int l2tp_xmit_core ( struct l2tp_session * session , struct sk_buff * skb ,
2011-05-07 09:23:20 +04:00
struct flowi * fl , size_t data_len )
2010-04-02 10:18:33 +04:00
{
struct l2tp_tunnel * tunnel = session - > tunnel ;
unsigned int len = skb - > len ;
int error ;
2012-04-30 01:48:46 +04:00
struct l2tp_stats * tstats , * sstats ;
2010-04-02 10:18:33 +04:00
/* Debug */
if ( session - > send_seq )
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_DATA , " %s: send %Zd bytes, ns=%u \n " ,
session - > name , data_len , session - > ns - 1 ) ;
2010-04-02 10:18:33 +04:00
else
2012-05-16 13:55:56 +04:00
l2tp_dbg ( session , L2TP_MSG_DATA , " %s: send %Zd bytes \n " ,
session - > name , data_len ) ;
2010-04-02 10:18:33 +04:00
if ( session - > debug & L2TP_MSG_DATA ) {
2010-04-02 10:19:00 +04:00
int uhlen = ( tunnel - > encap = = L2TP_ENCAPTYPE_UDP ) ? sizeof ( struct udphdr ) : 0 ;
unsigned char * datap = skb - > data + uhlen ;
2010-04-02 10:18:33 +04:00
2012-05-16 13:55:56 +04:00
pr_debug ( " %s: xmit \n " , session - > name ) ;
print_hex_dump_bytes ( " " , DUMP_PREFIX_OFFSET ,
datap , min_t ( size_t , 32 , len - uhlen ) ) ;
2010-04-02 10:18:33 +04:00
}
/* Queue the packet to IP for output */
2010-04-15 20:43:08 +04:00
skb - > local_df = 1 ;
2012-04-27 12:24:18 +04:00
# if IS_ENABLED(CONFIG_IPV6)
if ( skb - > sk - > sk_family = = PF_INET6 )
error = inet6_csk_xmit ( skb , NULL ) ;
else
# endif
error = ip_queue_xmit ( skb , fl ) ;
2010-04-02 10:18:33 +04:00
/* Update stats */
2012-04-30 01:48:46 +04:00
tstats = & tunnel - > stats ;
u64_stats_update_begin ( & tstats - > syncp ) ;
sstats = & session - > stats ;
u64_stats_update_begin ( & sstats - > syncp ) ;
2010-04-02 10:18:33 +04:00
if ( error > = 0 ) {
2012-04-30 01:48:46 +04:00
tstats - > tx_packets + + ;
tstats - > tx_bytes + = len ;
sstats - > tx_packets + + ;
sstats - > tx_bytes + = len ;
2010-04-02 10:18:33 +04:00
} else {
2012-04-30 01:48:46 +04:00
tstats - > tx_errors + + ;
sstats - > tx_errors + + ;
2010-04-02 10:18:33 +04:00
}
2012-04-30 01:48:46 +04:00
u64_stats_update_end ( & tstats - > syncp ) ;
u64_stats_update_end ( & sstats - > syncp ) ;
2010-04-02 10:18:33 +04:00
return 0 ;
}
/* Automatically called when the skb is freed.
*/
static void l2tp_sock_wfree ( struct sk_buff * skb )
{
sock_put ( skb - > sk ) ;
}
/* For data skbs that we transmit, we associate with the tunnel socket
* but don ' t do accounting .
*/
static inline void l2tp_skb_set_owner_w ( struct sk_buff * skb , struct sock * sk )
{
sock_hold ( sk ) ;
skb - > sk = sk ;
skb - > destructor = l2tp_sock_wfree ;
}
2012-04-27 12:24:18 +04:00
# if IS_ENABLED(CONFIG_IPV6)
static void l2tp_xmit_ipv6_csum ( struct sock * sk , struct sk_buff * skb ,
int udp_len )
{
struct ipv6_pinfo * np = inet6_sk ( sk ) ;
struct udphdr * uh = udp_hdr ( skb ) ;
if ( ! skb_dst ( skb ) | | ! skb_dst ( skb ) - > dev | |
! ( skb_dst ( skb ) - > dev - > features & NETIF_F_IPV6_CSUM ) ) {
__wsum csum = skb_checksum ( skb , 0 , udp_len , 0 ) ;
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
uh - > check = csum_ipv6_magic ( & np - > saddr , & np - > daddr , udp_len ,
IPPROTO_UDP , csum ) ;
if ( uh - > check = = 0 )
uh - > check = CSUM_MANGLED_0 ;
} else {
skb - > ip_summed = CHECKSUM_PARTIAL ;
skb - > csum_start = skb_transport_header ( skb ) - skb - > head ;
skb - > csum_offset = offsetof ( struct udphdr , check ) ;
uh - > check = ~ csum_ipv6_magic ( & np - > saddr , & np - > daddr ,
udp_len , IPPROTO_UDP , 0 ) ;
}
}
# endif
2010-04-02 10:18:33 +04:00
/* If caller requires the skb to have a ppp header, the header must be
* inserted in the skb data before calling this function .
*/
int l2tp_xmit_skb ( struct l2tp_session * session , struct sk_buff * skb , int hdr_len )
{
int data_len = skb - > len ;
2010-04-02 10:19:00 +04:00
struct l2tp_tunnel * tunnel = session - > tunnel ;
struct sock * sk = tunnel - > sock ;
2011-05-07 09:23:20 +04:00
struct flowi * fl ;
2010-04-02 10:18:33 +04:00
struct udphdr * uh ;
struct inet_sock * inet ;
__wsum csum ;
int old_headroom ;
int new_headroom ;
int headroom ;
2010-04-02 10:19:00 +04:00
int uhlen = ( tunnel - > encap = = L2TP_ENCAPTYPE_UDP ) ? sizeof ( struct udphdr ) : 0 ;
int udp_len ;
2012-06-29 00:15:13 +04:00
int ret = NET_XMIT_SUCCESS ;
2010-04-02 10:18:33 +04:00
/* Check that there's enough headroom in the skb to insert IP,
* UDP and L2TP headers . If not enough , expand it to
* make room . Adjust truesize .
*/
headroom = NET_SKB_PAD + sizeof ( struct iphdr ) +
2010-04-02 10:19:00 +04:00
uhlen + hdr_len ;
2010-04-02 10:18:33 +04:00
old_headroom = skb_headroom ( skb ) ;
2011-10-07 09:35:46 +04:00
if ( skb_cow_head ( skb , headroom ) ) {
2012-06-29 00:15:13 +04:00
kfree_skb ( skb ) ;
return NET_XMIT_DROP ;
2011-10-07 09:35:46 +04:00
}
2010-04-02 10:18:33 +04:00
new_headroom = skb_headroom ( skb ) ;
skb_orphan ( skb ) ;
skb - > truesize + = new_headroom - old_headroom ;
/* Setup L2TP header */
2010-04-02 10:18:49 +04:00
session - > build_header ( session , __skb_push ( skb , hdr_len ) ) ;
2010-04-02 10:18:33 +04:00
2010-04-02 10:19:00 +04:00
/* Reset skb netfilter state */
2010-04-02 10:18:33 +04:00
memset ( & ( IPCB ( skb ) - > opt ) , 0 , sizeof ( IPCB ( skb ) - > opt ) ) ;
IPCB ( skb ) - > flags & = ~ ( IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED ) ;
nf_reset ( skb ) ;
2011-05-09 00:45:20 +04:00
bh_lock_sock ( sk ) ;
if ( sock_owned_by_user ( sk ) ) {
2012-06-29 00:15:13 +04:00
kfree_skb ( skb ) ;
ret = NET_XMIT_DROP ;
2011-05-09 00:45:20 +04:00
goto out_unlock ;
}
2010-04-02 10:18:33 +04:00
/* Get routing info from the tunnel socket */
skb_dst_drop ( skb ) ;
2011-11-25 10:47:16 +04:00
skb_dst_set ( skb , dst_clone ( __sk_dst_check ( sk , 0 ) ) ) ;
2010-04-02 10:18:33 +04:00
2011-05-07 09:23:20 +04:00
inet = inet_sk ( sk ) ;
fl = & inet - > cork . fl ;
2010-04-02 10:19:00 +04:00
switch ( tunnel - > encap ) {
case L2TP_ENCAPTYPE_UDP :
/* Setup UDP header */
__skb_push ( skb , sizeof ( * uh ) ) ;
skb_reset_transport_header ( skb ) ;
uh = udp_hdr ( skb ) ;
uh - > source = inet - > inet_sport ;
uh - > dest = inet - > inet_dport ;
udp_len = uhlen + hdr_len + data_len ;
uh - > len = htons ( udp_len ) ;
uh - > check = 0 ;
/* Calculate UDP checksum if configured to do so */
2012-04-27 12:24:18 +04:00
# if IS_ENABLED(CONFIG_IPV6)
if ( sk - > sk_family = = PF_INET6 )
l2tp_xmit_ipv6_csum ( sk , skb , udp_len ) ;
else
# endif
2010-04-02 10:19:00 +04:00
if ( sk - > sk_no_check = = UDP_CSUM_NOXMIT )
skb - > ip_summed = CHECKSUM_NONE ;
else if ( ( skb_dst ( skb ) & & skb_dst ( skb ) - > dev ) & &
( ! ( skb_dst ( skb ) - > dev - > features & NETIF_F_V4_CSUM ) ) ) {
skb - > ip_summed = CHECKSUM_COMPLETE ;
csum = skb_checksum ( skb , 0 , udp_len , 0 ) ;
uh - > check = csum_tcpudp_magic ( inet - > inet_saddr ,
inet - > inet_daddr ,
udp_len , IPPROTO_UDP , csum ) ;
if ( uh - > check = = 0 )
uh - > check = CSUM_MANGLED_0 ;
} else {
skb - > ip_summed = CHECKSUM_PARTIAL ;
skb - > csum_start = skb_transport_header ( skb ) - skb - > head ;
skb - > csum_offset = offsetof ( struct udphdr , check ) ;
uh - > check = ~ csum_tcpudp_magic ( inet - > inet_saddr ,
inet - > inet_daddr ,
udp_len , IPPROTO_UDP , 0 ) ;
}
break ;
case L2TP_ENCAPTYPE_IP :
break ;
2010-04-02 10:18:33 +04:00
}
2010-04-02 10:19:00 +04:00
l2tp_skb_set_owner_w ( skb , sk ) ;
2011-05-07 09:23:20 +04:00
l2tp_xmit_core ( session , skb , fl , data_len ) ;
2011-05-09 00:45:20 +04:00
out_unlock :
bh_unlock_sock ( sk ) ;
2010-04-02 10:18:33 +04:00
2012-06-29 00:15:13 +04:00
return ret ;
2010-04-02 10:18:33 +04:00
}
EXPORT_SYMBOL_GPL ( l2tp_xmit_skb ) ;
/*****************************************************************************
* Tinnel and session create / destroy .
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Tunnel socket destruct hook.
* The tunnel context is deleted only when all session sockets have been
* closed .
*/
2010-10-21 11:50:46 +04:00
static void l2tp_tunnel_destruct ( struct sock * sk )
2010-04-02 10:18:33 +04:00
{
struct l2tp_tunnel * tunnel ;
tunnel = sk - > sk_user_data ;
if ( tunnel = = NULL )
goto end ;
2012-05-16 13:55:56 +04:00
l2tp_info ( tunnel , L2TP_MSG_CONTROL , " %s: closing... \n " , tunnel - > name ) ;
2010-04-02 10:18:33 +04:00
/* Close all sessions */
l2tp_tunnel_closeall ( tunnel ) ;
2010-04-02 10:19:00 +04:00
switch ( tunnel - > encap ) {
case L2TP_ENCAPTYPE_UDP :
/* No longer an encapsulation socket. See net/ipv4/udp.c */
( udp_sk ( sk ) ) - > encap_type = 0 ;
( udp_sk ( sk ) ) - > encap_rcv = NULL ;
break ;
case L2TP_ENCAPTYPE_IP :
break ;
}
2010-04-02 10:18:33 +04:00
/* Remove hooks into tunnel socket */
tunnel - > sock = NULL ;
sk - > sk_destruct = tunnel - > old_sk_destruct ;
sk - > sk_user_data = NULL ;
/* Call the original destructor */
if ( sk - > sk_destruct )
( * sk - > sk_destruct ) ( sk ) ;
/* We're finished with the socket */
l2tp_tunnel_dec_refcount ( tunnel ) ;
end :
return ;
}
/* When the tunnel is closed, all the attached sessions need to go too.
*/
2010-10-21 11:50:46 +04:00
static void l2tp_tunnel_closeall ( struct l2tp_tunnel * tunnel )
2010-04-02 10:18:33 +04:00
{
int hash ;
struct hlist_node * walk ;
struct hlist_node * tmp ;
struct l2tp_session * session ;
BUG_ON ( tunnel = = NULL ) ;
2012-05-16 13:55:56 +04:00
l2tp_info ( tunnel , L2TP_MSG_CONTROL , " %s: closing all sessions... \n " ,
tunnel - > name ) ;
2010-04-02 10:18:33 +04:00
write_lock_bh ( & tunnel - > hlist_lock ) ;
for ( hash = 0 ; hash < L2TP_HASH_SIZE ; hash + + ) {
again :
hlist_for_each_safe ( walk , tmp , & tunnel - > session_hlist [ hash ] ) {
session = hlist_entry ( walk , struct l2tp_session , hlist ) ;
2012-05-16 13:55:56 +04:00
l2tp_info ( session , L2TP_MSG_CONTROL ,
" %s: closing session \n " , session - > name ) ;
2010-04-02 10:18:33 +04:00
hlist_del_init ( & session - > hlist ) ;
/* Since we should hold the sock lock while
* doing any unbinding , we need to release the
* lock we ' re holding before taking that lock .
* Hold a reference to the sock so it doesn ' t
* disappear as we ' re jumping between locks .
*/
if ( session - > ref ! = NULL )
( * session - > ref ) ( session ) ;
write_unlock_bh ( & tunnel - > hlist_lock ) ;
2010-04-02 10:18:49 +04:00
if ( tunnel - > version ! = L2TP_HDR_VER_2 ) {
struct l2tp_net * pn = l2tp_pernet ( tunnel - > l2tp_net ) ;
2010-04-02 10:19:16 +04:00
spin_lock_bh ( & pn - > l2tp_session_hlist_lock ) ;
hlist_del_init_rcu ( & session - > global_hlist ) ;
spin_unlock_bh ( & pn - > l2tp_session_hlist_lock ) ;
synchronize_rcu ( ) ;
2010-04-02 10:18:49 +04:00
}
2010-04-02 10:18:33 +04:00
if ( session - > session_close ! = NULL )
( * session - > session_close ) ( session ) ;
if ( session - > deref ! = NULL )
( * session - > deref ) ( session ) ;
write_lock_bh ( & tunnel - > hlist_lock ) ;
/* Now restart from the beginning of this hash
* chain . We always remove a session from the
* list so we are guaranteed to make forward
* progress .
*/
goto again ;
}
}
write_unlock_bh ( & tunnel - > hlist_lock ) ;
}
/* Really kill the tunnel.
* Come here only when all sessions have been cleared from the tunnel .
*/
2010-10-21 11:50:46 +04:00
static void l2tp_tunnel_free ( struct l2tp_tunnel * tunnel )
2010-04-02 10:18:33 +04:00
{
struct l2tp_net * pn = l2tp_pernet ( tunnel - > l2tp_net ) ;
BUG_ON ( atomic_read ( & tunnel - > ref_count ) ! = 0 ) ;
BUG_ON ( tunnel - > sock ! = NULL ) ;
2012-05-16 13:55:56 +04:00
l2tp_info ( tunnel , L2TP_MSG_CONTROL , " %s: free... \n " , tunnel - > name ) ;
2010-04-02 10:18:33 +04:00
/* Remove from tunnel list */
2010-04-02 10:19:16 +04:00
spin_lock_bh ( & pn - > l2tp_tunnel_list_lock ) ;
list_del_rcu ( & tunnel - > list ) ;
spin_unlock_bh ( & pn - > l2tp_tunnel_list_lock ) ;
synchronize_rcu ( ) ;
2010-04-02 10:18:33 +04:00
atomic_dec ( & l2tp_tunnel_count ) ;
kfree ( tunnel ) ;
}
2010-04-02 10:19:40 +04:00
/* Create a socket for the tunnel, if one isn't set up by
* userspace . This is used for static tunnels where there is no
* managing L2TP daemon .
*/
static int l2tp_tunnel_sock_create ( u32 tunnel_id , u32 peer_tunnel_id , struct l2tp_tunnel_cfg * cfg , struct socket * * sockp )
{
int err = - EINVAL ;
struct sockaddr_in udp_addr ;
2012-04-30 01:48:52 +04:00
# if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 udp6_addr ;
2012-04-30 01:48:55 +04:00
struct sockaddr_l2tpip6 ip6_addr ;
2012-04-30 01:48:52 +04:00
# endif
2010-04-02 10:19:40 +04:00
struct sockaddr_l2tpip ip_addr ;
2010-04-04 12:02:46 +04:00
struct socket * sock = NULL ;
2010-04-02 10:19:40 +04:00
switch ( cfg - > encap ) {
case L2TP_ENCAPTYPE_UDP :
2012-04-30 01:48:52 +04:00
# if IS_ENABLED(CONFIG_IPV6)
if ( cfg - > local_ip6 & & cfg - > peer_ip6 ) {
err = sock_create ( AF_INET6 , SOCK_DGRAM , 0 , sockp ) ;
if ( err < 0 )
goto out ;
2010-04-02 10:19:40 +04:00
2012-04-30 01:48:52 +04:00
sock = * sockp ;
2010-04-02 10:19:40 +04:00
2012-04-30 01:48:52 +04:00
memset ( & udp6_addr , 0 , sizeof ( udp6_addr ) ) ;
udp6_addr . sin6_family = AF_INET6 ;
memcpy ( & udp6_addr . sin6_addr , cfg - > local_ip6 ,
sizeof ( udp6_addr . sin6_addr ) ) ;
udp6_addr . sin6_port = htons ( cfg - > local_udp_port ) ;
err = kernel_bind ( sock , ( struct sockaddr * ) & udp6_addr ,
sizeof ( udp6_addr ) ) ;
if ( err < 0 )
goto out ;
2010-04-02 10:19:40 +04:00
2012-04-30 01:48:52 +04:00
udp6_addr . sin6_family = AF_INET6 ;
memcpy ( & udp6_addr . sin6_addr , cfg - > peer_ip6 ,
sizeof ( udp6_addr . sin6_addr ) ) ;
udp6_addr . sin6_port = htons ( cfg - > peer_udp_port ) ;
err = kernel_connect ( sock ,
( struct sockaddr * ) & udp6_addr ,
sizeof ( udp6_addr ) , 0 ) ;
if ( err < 0 )
goto out ;
} else
# endif
{
err = sock_create ( AF_INET , SOCK_DGRAM , 0 , sockp ) ;
if ( err < 0 )
goto out ;
sock = * sockp ;
memset ( & udp_addr , 0 , sizeof ( udp_addr ) ) ;
udp_addr . sin_family = AF_INET ;
udp_addr . sin_addr = cfg - > local_ip ;
udp_addr . sin_port = htons ( cfg - > local_udp_port ) ;
err = kernel_bind ( sock , ( struct sockaddr * ) & udp_addr ,
sizeof ( udp_addr ) ) ;
if ( err < 0 )
goto out ;
udp_addr . sin_family = AF_INET ;
udp_addr . sin_addr = cfg - > peer_ip ;
udp_addr . sin_port = htons ( cfg - > peer_udp_port ) ;
err = kernel_connect ( sock ,
( struct sockaddr * ) & udp_addr ,
sizeof ( udp_addr ) , 0 ) ;
if ( err < 0 )
goto out ;
}
2010-04-02 10:19:40 +04:00
if ( ! cfg - > use_udp_checksums )
sock - > sk - > sk_no_check = UDP_CSUM_NOXMIT ;
break ;
case L2TP_ENCAPTYPE_IP :
2012-04-30 01:48:52 +04:00
# if IS_ENABLED(CONFIG_IPV6)
if ( cfg - > local_ip6 & & cfg - > peer_ip6 ) {
2012-04-30 01:48:55 +04:00
err = sock_create ( AF_INET6 , SOCK_DGRAM , IPPROTO_L2TP ,
sockp ) ;
if ( err < 0 )
goto out ;
2010-04-02 10:19:40 +04:00
2012-04-30 01:48:55 +04:00
sock = * sockp ;
2010-04-02 10:19:40 +04:00
2012-04-30 01:48:55 +04:00
memset ( & ip6_addr , 0 , sizeof ( ip6_addr ) ) ;
ip6_addr . l2tp_family = AF_INET6 ;
memcpy ( & ip6_addr . l2tp_addr , cfg - > local_ip6 ,
sizeof ( ip6_addr . l2tp_addr ) ) ;
ip6_addr . l2tp_conn_id = tunnel_id ;
err = kernel_bind ( sock , ( struct sockaddr * ) & ip6_addr ,
sizeof ( ip6_addr ) ) ;
if ( err < 0 )
goto out ;
2010-04-02 10:19:40 +04:00
2012-04-30 01:48:55 +04:00
ip6_addr . l2tp_family = AF_INET6 ;
memcpy ( & ip6_addr . l2tp_addr , cfg - > peer_ip6 ,
sizeof ( ip6_addr . l2tp_addr ) ) ;
ip6_addr . l2tp_conn_id = peer_tunnel_id ;
err = kernel_connect ( sock ,
( struct sockaddr * ) & ip6_addr ,
sizeof ( ip6_addr ) , 0 ) ;
if ( err < 0 )
goto out ;
} else
# endif
{
err = sock_create ( AF_INET , SOCK_DGRAM , IPPROTO_L2TP ,
sockp ) ;
if ( err < 0 )
goto out ;
2010-04-02 10:19:40 +04:00
2012-04-30 01:48:55 +04:00
sock = * sockp ;
memset ( & ip_addr , 0 , sizeof ( ip_addr ) ) ;
ip_addr . l2tp_family = AF_INET ;
ip_addr . l2tp_addr = cfg - > local_ip ;
ip_addr . l2tp_conn_id = tunnel_id ;
err = kernel_bind ( sock , ( struct sockaddr * ) & ip_addr ,
sizeof ( ip_addr ) ) ;
if ( err < 0 )
goto out ;
ip_addr . l2tp_family = AF_INET ;
ip_addr . l2tp_addr = cfg - > peer_ip ;
ip_addr . l2tp_conn_id = peer_tunnel_id ;
err = kernel_connect ( sock , ( struct sockaddr * ) & ip_addr ,
sizeof ( ip_addr ) , 0 ) ;
if ( err < 0 )
goto out ;
}
2010-04-02 10:19:40 +04:00
break ;
default :
goto out ;
}
out :
if ( ( err < 0 ) & & sock ) {
sock_release ( sock ) ;
* sockp = NULL ;
}
return err ;
}
2010-04-02 10:18:33 +04:00
int l2tp_tunnel_create ( struct net * net , int fd , int version , u32 tunnel_id , u32 peer_tunnel_id , struct l2tp_tunnel_cfg * cfg , struct l2tp_tunnel * * tunnelp )
{
struct l2tp_tunnel * tunnel = NULL ;
int err ;
struct socket * sock = NULL ;
struct sock * sk = NULL ;
struct l2tp_net * pn ;
2010-04-02 10:19:00 +04:00
enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP ;
2010-04-02 10:18:33 +04:00
/* Get the tunnel socket from the fd, which was opened by
2010-04-02 10:19:40 +04:00
* the userspace L2TP daemon . If not specified , create a
* kernel socket .
2010-04-02 10:18:33 +04:00
*/
2010-04-02 10:19:40 +04:00
if ( fd < 0 ) {
err = l2tp_tunnel_sock_create ( tunnel_id , peer_tunnel_id , cfg , & sock ) ;
if ( err < 0 )
goto err ;
} else {
err = - EBADF ;
sock = sockfd_lookup ( fd , & err ) ;
if ( ! sock ) {
2012-05-16 13:55:56 +04:00
pr_err ( " tunl %hu: sockfd_lookup(fd=%d) returned %d \n " ,
2010-04-02 10:19:40 +04:00
tunnel_id , fd , err ) ;
goto err ;
}
2010-04-02 10:18:33 +04:00
}
sk = sock - > sk ;
2010-04-02 10:19:00 +04:00
if ( cfg ! = NULL )
encap = cfg - > encap ;
2010-04-02 10:18:33 +04:00
/* Quick sanity checks */
2010-04-02 10:19:00 +04:00
switch ( encap ) {
case L2TP_ENCAPTYPE_UDP :
err = - EPROTONOSUPPORT ;
if ( sk - > sk_protocol ! = IPPROTO_UDP ) {
2012-05-16 13:55:56 +04:00
pr_err ( " tunl %hu: fd %d wrong protocol, got %d, expected %d \n " ,
2010-04-02 10:19:00 +04:00
tunnel_id , fd , sk - > sk_protocol , IPPROTO_UDP ) ;
goto err ;
}
break ;
case L2TP_ENCAPTYPE_IP :
err = - EPROTONOSUPPORT ;
if ( sk - > sk_protocol ! = IPPROTO_L2TP ) {
2012-05-16 13:55:56 +04:00
pr_err ( " tunl %hu: fd %d wrong protocol, got %d, expected %d \n " ,
2010-04-02 10:19:00 +04:00
tunnel_id , fd , sk - > sk_protocol , IPPROTO_L2TP ) ;
goto err ;
}
break ;
2010-04-02 10:18:33 +04:00
}
/* Check if this socket has already been prepped */
tunnel = ( struct l2tp_tunnel * ) sk - > sk_user_data ;
if ( tunnel ! = NULL ) {
/* This socket has already been prepped */
err = - EBUSY ;
goto err ;
}
tunnel = kzalloc ( sizeof ( struct l2tp_tunnel ) , GFP_KERNEL ) ;
if ( tunnel = = NULL ) {
err = - ENOMEM ;
goto err ;
}
tunnel - > version = version ;
tunnel - > tunnel_id = tunnel_id ;
tunnel - > peer_tunnel_id = peer_tunnel_id ;
tunnel - > debug = L2TP_DEFAULT_DEBUG_FLAGS ;
tunnel - > magic = L2TP_TUNNEL_MAGIC ;
sprintf ( & tunnel - > name [ 0 ] , " tunl %u " , tunnel_id ) ;
rwlock_init ( & tunnel - > hlist_lock ) ;
/* The net we belong to */
tunnel - > l2tp_net = net ;
pn = l2tp_pernet ( net ) ;
2010-04-02 10:19:00 +04:00
if ( cfg ! = NULL )
2010-04-02 10:18:33 +04:00
tunnel - > debug = cfg - > debug ;
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
2010-04-02 10:19:00 +04:00
tunnel - > encap = encap ;
if ( encap = = L2TP_ENCAPTYPE_UDP ) {
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
udp_sk ( sk ) - > encap_type = UDP_ENCAP_L2TPINUDP ;
udp_sk ( sk ) - > encap_rcv = l2tp_udp_encap_recv ;
2012-04-27 12:24:18 +04:00
# if IS_ENABLED(CONFIG_IPV6)
if ( sk - > sk_family = = PF_INET6 )
udpv6_encap_enable ( ) ;
else
# endif
2012-04-12 03:05:28 +04:00
udp_encap_enable ( ) ;
2010-04-02 10:19:00 +04:00
}
2010-04-02 10:18:33 +04:00
sk - > sk_user_data = tunnel ;
/* Hook on the tunnel socket destructor so that we can cleanup
* if the tunnel socket goes away .
*/
tunnel - > old_sk_destruct = sk - > sk_destruct ;
sk - > sk_destruct = & l2tp_tunnel_destruct ;
tunnel - > sock = sk ;
sk - > sk_allocation = GFP_ATOMIC ;
/* Add tunnel to our list */
INIT_LIST_HEAD ( & tunnel - > list ) ;
atomic_inc ( & l2tp_tunnel_count ) ;
/* Bump the reference count. The tunnel context is deleted
2011-05-11 22:22:36 +04:00
* only when this drops to zero . Must be done before list insertion
2010-04-02 10:18:33 +04:00
*/
l2tp_tunnel_inc_refcount ( tunnel ) ;
2011-05-11 22:22:36 +04:00
spin_lock_bh ( & pn - > l2tp_tunnel_list_lock ) ;
list_add_rcu ( & tunnel - > list , & pn - > l2tp_tunnel_list ) ;
spin_unlock_bh ( & pn - > l2tp_tunnel_list_lock ) ;
2010-04-02 10:18:33 +04:00
err = 0 ;
err :
if ( tunnelp )
* tunnelp = tunnel ;
2010-04-02 10:19:40 +04:00
/* If tunnel's socket was created by the kernel, it doesn't
* have a file .
*/
if ( sock & & sock - > file )
2010-04-02 10:18:33 +04:00
sockfd_put ( sock ) ;
return err ;
}
EXPORT_SYMBOL_GPL ( l2tp_tunnel_create ) ;
2010-04-02 10:19:10 +04:00
/* This function is used by the netlink TUNNEL_DELETE command.
*/
int l2tp_tunnel_delete ( struct l2tp_tunnel * tunnel )
{
int err = 0 ;
2010-04-02 10:19:40 +04:00
struct socket * sock = tunnel - > sock ? tunnel - > sock - > sk_socket : NULL ;
2010-04-02 10:19:10 +04:00
/* Force the tunnel socket to close. This will eventually
* cause the tunnel to be deleted via the normal socket close
* mechanisms when userspace closes the tunnel socket .
*/
2010-04-02 10:19:40 +04:00
if ( sock ! = NULL ) {
err = inet_shutdown ( sock , 2 ) ;
/* If the tunnel's socket was created by the kernel,
* close the socket here since the socket was not
* created by userspace .
*/
if ( sock - > file = = NULL )
err = inet_release ( sock ) ;
}
2010-04-02 10:19:10 +04:00
return err ;
}
EXPORT_SYMBOL_GPL ( l2tp_tunnel_delete ) ;
2010-04-02 10:18:33 +04:00
/* Really kill the session.
*/
void l2tp_session_free ( struct l2tp_session * session )
{
struct l2tp_tunnel * tunnel ;
BUG_ON ( atomic_read ( & session - > ref_count ) ! = 0 ) ;
tunnel = session - > tunnel ;
if ( tunnel ! = NULL ) {
BUG_ON ( tunnel - > magic ! = L2TP_TUNNEL_MAGIC ) ;
/* Delete the session from the hash */
write_lock_bh ( & tunnel - > hlist_lock ) ;
hlist_del_init ( & session - > hlist ) ;
write_unlock_bh ( & tunnel - > hlist_lock ) ;
2010-04-02 10:18:49 +04:00
/* Unlink from the global hash if not L2TPv2 */
if ( tunnel - > version ! = L2TP_HDR_VER_2 ) {
struct l2tp_net * pn = l2tp_pernet ( tunnel - > l2tp_net ) ;
2010-04-02 10:19:16 +04:00
spin_lock_bh ( & pn - > l2tp_session_hlist_lock ) ;
hlist_del_init_rcu ( & session - > global_hlist ) ;
spin_unlock_bh ( & pn - > l2tp_session_hlist_lock ) ;
synchronize_rcu ( ) ;
2010-04-02 10:18:49 +04:00
}
2010-04-02 10:18:33 +04:00
if ( session - > session_id ! = 0 )
atomic_dec ( & l2tp_session_count ) ;
sock_put ( tunnel - > sock ) ;
/* This will delete the tunnel context if this
* is the last session on the tunnel .
*/
session - > tunnel = NULL ;
l2tp_tunnel_dec_refcount ( tunnel ) ;
}
kfree ( session ) ;
return ;
}
EXPORT_SYMBOL_GPL ( l2tp_session_free ) ;
2010-04-02 10:19:10 +04:00
/* This function is used by the netlink SESSION_DELETE command and by
pseudowire modules .
*/
int l2tp_session_delete ( struct l2tp_session * session )
{
if ( session - > session_close ! = NULL )
( * session - > session_close ) ( session ) ;
l2tp_session_dec_refcount ( session ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( l2tp_session_delete ) ;
2010-04-02 10:18:49 +04:00
/* We come here whenever a session's send_seq, cookie_len or
* l2specific_len parameters are set .
*/
2010-10-21 11:50:46 +04:00
static void l2tp_session_set_header_len ( struct l2tp_session * session , int version )
2010-04-02 10:18:49 +04:00
{
if ( version = = L2TP_HDR_VER_2 ) {
session - > hdr_len = 6 ;
if ( session - > send_seq )
session - > hdr_len + = 4 ;
} else {
2010-04-02 10:19:00 +04:00
session - > hdr_len = 4 + session - > cookie_len + session - > l2specific_len + session - > offset ;
if ( session - > tunnel - > encap = = L2TP_ENCAPTYPE_UDP )
session - > hdr_len + = 4 ;
2010-04-02 10:18:49 +04:00
}
}
2010-04-02 10:18:33 +04:00
struct l2tp_session * l2tp_session_create ( int priv_size , struct l2tp_tunnel * tunnel , u32 session_id , u32 peer_session_id , struct l2tp_session_cfg * cfg )
{
struct l2tp_session * session ;
session = kzalloc ( sizeof ( struct l2tp_session ) + priv_size , GFP_KERNEL ) ;
if ( session ! = NULL ) {
session - > magic = L2TP_SESSION_MAGIC ;
session - > tunnel = tunnel ;
session - > session_id = session_id ;
session - > peer_session_id = peer_session_id ;
2012-05-10 03:43:09 +04:00
session - > nr = 0 ;
2010-04-02 10:18:33 +04:00
sprintf ( & session - > name [ 0 ] , " sess %u/%u " ,
tunnel - > tunnel_id , session - > session_id ) ;
skb_queue_head_init ( & session - > reorder_q ) ;
INIT_HLIST_NODE ( & session - > hlist ) ;
2010-04-02 10:18:49 +04:00
INIT_HLIST_NODE ( & session - > global_hlist ) ;
2010-04-02 10:18:33 +04:00
/* Inherit debug options from tunnel */
session - > debug = tunnel - > debug ;
if ( cfg ) {
2010-04-02 10:18:49 +04:00
session - > pwtype = cfg - > pw_type ;
2010-04-02 10:18:33 +04:00
session - > debug = cfg - > debug ;
session - > mtu = cfg - > mtu ;
session - > mru = cfg - > mru ;
session - > send_seq = cfg - > send_seq ;
session - > recv_seq = cfg - > recv_seq ;
session - > lns_mode = cfg - > lns_mode ;
2010-04-02 10:18:49 +04:00
session - > reorder_timeout = cfg - > reorder_timeout ;
session - > offset = cfg - > offset ;
session - > l2specific_type = cfg - > l2specific_type ;
session - > l2specific_len = cfg - > l2specific_len ;
session - > cookie_len = cfg - > cookie_len ;
memcpy ( & session - > cookie [ 0 ] , & cfg - > cookie [ 0 ] , cfg - > cookie_len ) ;
session - > peer_cookie_len = cfg - > peer_cookie_len ;
memcpy ( & session - > peer_cookie [ 0 ] , & cfg - > peer_cookie [ 0 ] , cfg - > peer_cookie_len ) ;
2010-04-02 10:18:33 +04:00
}
2010-04-02 10:18:49 +04:00
if ( tunnel - > version = = L2TP_HDR_VER_2 )
session - > build_header = l2tp_build_l2tpv2_header ;
else
session - > build_header = l2tp_build_l2tpv3_header ;
l2tp_session_set_header_len ( session , tunnel - > version ) ;
2010-04-02 10:18:33 +04:00
/* Bump the reference count. The session context is deleted
* only when this drops to zero .
*/
l2tp_session_inc_refcount ( session ) ;
l2tp_tunnel_inc_refcount ( tunnel ) ;
/* Ensure tunnel socket isn't deleted */
sock_hold ( tunnel - > sock ) ;
/* Add session to the tunnel's hash list */
write_lock_bh ( & tunnel - > hlist_lock ) ;
hlist_add_head ( & session - > hlist ,
l2tp_session_id_hash ( tunnel , session_id ) ) ;
write_unlock_bh ( & tunnel - > hlist_lock ) ;
2010-04-02 10:18:49 +04:00
/* And to the global session list if L2TPv3 */
if ( tunnel - > version ! = L2TP_HDR_VER_2 ) {
struct l2tp_net * pn = l2tp_pernet ( tunnel - > l2tp_net ) ;
2010-04-02 10:19:16 +04:00
spin_lock_bh ( & pn - > l2tp_session_hlist_lock ) ;
hlist_add_head_rcu ( & session - > global_hlist ,
l2tp_session_id_hash_2 ( pn , session_id ) ) ;
spin_unlock_bh ( & pn - > l2tp_session_hlist_lock ) ;
2010-04-02 10:18:49 +04:00
}
2010-04-02 10:18:33 +04:00
/* Ignore management session in session count value */
if ( session - > session_id ! = 0 )
atomic_inc ( & l2tp_session_count ) ;
}
return session ;
}
EXPORT_SYMBOL_GPL ( l2tp_session_create ) ;
/*****************************************************************************
* Init and cleanup
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static __net_init int l2tp_init_net ( struct net * net )
{
2010-04-23 04:53:39 +04:00
struct l2tp_net * pn = net_generic ( net , l2tp_net_id ) ;
2010-04-02 10:18:49 +04:00
int hash ;
2010-04-02 10:18:33 +04:00
INIT_LIST_HEAD ( & pn - > l2tp_tunnel_list ) ;
2010-04-02 10:19:16 +04:00
spin_lock_init ( & pn - > l2tp_tunnel_list_lock ) ;
2010-04-02 10:18:33 +04:00
2010-04-02 10:18:49 +04:00
for ( hash = 0 ; hash < L2TP_HASH_SIZE_2 ; hash + + )
INIT_HLIST_HEAD ( & pn - > l2tp_session_hlist [ hash ] ) ;
2010-04-02 10:19:16 +04:00
spin_lock_init ( & pn - > l2tp_session_hlist_lock ) ;
2010-04-02 10:18:49 +04:00
2010-04-02 10:18:33 +04:00
return 0 ;
}
static struct pernet_operations l2tp_net_ops = {
. init = l2tp_init_net ,
. id = & l2tp_net_id ,
. size = sizeof ( struct l2tp_net ) ,
} ;
static int __init l2tp_init ( void )
{
int rc = 0 ;
rc = register_pernet_device ( & l2tp_net_ops ) ;
if ( rc )
goto out ;
2012-05-16 13:55:56 +04:00
pr_info ( " L2TP core driver, %s \n " , L2TP_DRV_VERSION ) ;
2010-04-02 10:18:33 +04:00
out :
return rc ;
}
static void __exit l2tp_exit ( void )
{
unregister_pernet_device ( & l2tp_net_ops ) ;
}
module_init ( l2tp_init ) ;
module_exit ( l2tp_exit ) ;
MODULE_AUTHOR ( " James Chapman <jchapman@katalix.com> " ) ;
MODULE_DESCRIPTION ( " L2TP core " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( L2TP_DRV_VERSION ) ;