2010-04-02 10:19:00 +04:00
/*
* L2TPv3 IP encapsulation support
*
* Copyright ( c ) 2008 , 2009 , 2010 Katalix Systems Ltd
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
2012-05-16 13:55:56 +04:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2017-02-10 03:15:52 +03:00
# include <asm/ioctls.h>
2010-04-02 10:19:00 +04:00
# include <linux/icmp.h>
# include <linux/module.h>
# include <linux/skbuff.h>
# include <linux/random.h>
# include <linux/socket.h>
# include <linux/l2tp.h>
# include <linux/in.h>
# include <net/sock.h>
# include <net/ip.h>
# include <net/icmp.h>
# include <net/udp.h>
# include <net/inet_common.h>
# include <net/inet_hashtables.h>
# include <net/tcp_states.h>
# include <net/protocol.h>
# include <net/xfrm.h>
# include "l2tp_core.h"
struct l2tp_ip_sock {
/* inet_sock has to be the first member of l2tp_ip_sock */
struct inet_sock inet ;
2012-04-30 01:48:48 +04:00
u32 conn_id ;
u32 peer_conn_id ;
2010-04-02 10:19:00 +04:00
} ;
static DEFINE_RWLOCK ( l2tp_ip_lock ) ;
static struct hlist_head l2tp_ip_table ;
static struct hlist_head l2tp_ip_bind_table ;
static inline struct l2tp_ip_sock * l2tp_ip_sk ( const struct sock * sk )
{
return ( struct l2tp_ip_sock * ) sk ;
}
2016-12-30 21:48:20 +03:00
static struct sock * __l2tp_ip_bind_lookup ( const struct net * net , __be32 laddr ,
__be32 raddr , int dif , u32 tunnel_id )
2010-04-02 10:19:00 +04:00
{
struct sock * sk ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
sk_for_each_bound ( sk , & l2tp_ip_bind_table ) {
2017-01-06 22:03:55 +03:00
const struct l2tp_ip_sock * l2tp = l2tp_ip_sk ( sk ) ;
const struct inet_sock * inet = inet_sk ( sk ) ;
2010-04-02 10:19:00 +04:00
2017-01-06 22:03:57 +03:00
if ( ! net_eq ( sock_net ( sk ) , net ) )
continue ;
if ( sk - > sk_bound_dev_if & & dif & & sk - > sk_bound_dev_if ! = dif )
continue ;
if ( inet - > inet_rcv_saddr & & laddr & &
inet - > inet_rcv_saddr ! = laddr )
continue ;
if ( inet - > inet_daddr & & raddr & & inet - > inet_daddr ! = raddr )
continue ;
if ( l2tp - > conn_id ! = tunnel_id )
continue ;
goto found ;
2010-04-02 10:19:00 +04:00
}
sk = NULL ;
found :
return sk ;
}
/* When processing receive frames, there are two cases to
* consider . Data frames consist of a non - zero session - id and an
* optional cookie . Control frames consist of a regular L2TP header
* preceded by 32 - bits of zeros .
*
* L2TPv3 Session Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Session ID |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Cookie ( optional , maximum 64 bits ) . . .
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
*
* L2TPv3 Control Message Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | ( 32 bits of zeros ) |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | T | L | x | x | S | x | x | x | x | x | x | x | Ver | Length |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Control Connection ID |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
* | Ns | Nr |
* + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - +
*
* All control frames are passed to userspace .
*/
static int l2tp_ip_recv ( struct sk_buff * skb )
{
2013-02-05 23:36:02 +04:00
struct net * net = dev_net ( skb - > dev ) ;
2010-04-02 10:19:00 +04:00
struct sock * sk ;
u32 session_id ;
u32 tunnel_id ;
unsigned char * ptr , * optr ;
struct l2tp_session * session ;
struct l2tp_tunnel * tunnel = NULL ;
int length ;
if ( ! pskb_may_pull ( skb , 4 ) )
goto discard ;
2016-04-03 17:09:23 +03:00
/* Point to L2TP header */
optr = ptr = skb - > data ;
2010-04-02 10:19:00 +04:00
session_id = ntohl ( * ( ( __be32 * ) ptr ) ) ;
ptr + = 4 ;
/* RFC3931: L2TP/IP packets have the first 4 bytes containing
* the session_id . If it is 0 , the packet is a L2TP control
* frame and the session_id value can be discarded .
*/
if ( session_id = = 0 ) {
__skb_pull ( skb , 4 ) ;
goto pass_up ;
}
/* Ok, this is a data packet. Lookup the session. */
2017-03-31 14:02:25 +03:00
session = l2tp_session_get ( net , NULL , session_id , true ) ;
if ( ! session )
2010-04-02 10:19:00 +04:00
goto discard ;
tunnel = session - > tunnel ;
2017-03-31 14:02:25 +03:00
if ( ! tunnel )
goto discard_sess ;
2010-04-02 10:19:00 +04:00
/* Trace packet contents, if enabled */
if ( tunnel - > debug & L2TP_MSG_DATA ) {
length = min ( 32u , skb - > len ) ;
if ( ! pskb_may_pull ( skb , length ) )
2017-03-31 14:02:25 +03:00
goto discard_sess ;
2010-04-02 10:19:00 +04:00
2016-04-03 17:09:23 +03:00
/* Point to L2TP header */
optr = ptr = skb - > data ;
ptr + = 4 ;
2012-05-16 13:55:56 +04:00
pr_debug ( " %s: ip recv \n " , tunnel - > name ) ;
print_hex_dump_bytes ( " " , DUMP_PREFIX_OFFSET , ptr , length ) ;
2010-04-02 10:19:00 +04:00
}
l2tp_recv_common ( session , skb , ptr , optr , 0 , skb - > len , tunnel - > recv_payload_hook ) ;
2017-03-31 14:02:25 +03:00
l2tp_session_dec_refcount ( session ) ;
2010-04-02 10:19:00 +04:00
return 0 ;
pass_up :
/* Get the tunnel_id from the L2TP header */
if ( ! pskb_may_pull ( skb , 12 ) )
goto discard ;
if ( ( skb - > data [ 0 ] & 0xc0 ) ! = 0xc0 )
goto discard ;
tunnel_id = ntohl ( * ( __be32 * ) & skb - > data [ 4 ] ) ;
2013-02-05 23:36:02 +04:00
tunnel = l2tp_tunnel_find ( net , tunnel_id ) ;
2017-03-29 09:44:59 +03:00
if ( tunnel ) {
2010-04-02 10:19:00 +04:00
sk = tunnel - > sock ;
2017-03-29 09:44:59 +03:00
sock_hold ( sk ) ;
} else {
2010-04-02 10:19:00 +04:00
struct iphdr * iph = ( struct iphdr * ) skb_network_header ( skb ) ;
read_lock_bh ( & l2tp_ip_lock ) ;
2016-12-30 21:48:20 +03:00
sk = __l2tp_ip_bind_lookup ( net , iph - > daddr , iph - > saddr ,
inet_iif ( skb ) , tunnel_id ) ;
2016-11-29 15:09:45 +03:00
if ( ! sk ) {
read_unlock_bh ( & l2tp_ip_lock ) ;
goto discard ;
}
sock_hold ( sk ) ;
2010-04-02 10:19:00 +04:00
read_unlock_bh ( & l2tp_ip_lock ) ;
}
if ( ! xfrm4_policy_check ( sk , XFRM_POLICY_IN , skb ) )
goto discard_put ;
nf_reset ( skb ) ;
return sk_receive_skb ( sk , skb , 1 ) ;
2017-03-31 14:02:25 +03:00
discard_sess :
if ( session - > deref )
session - > deref ( session ) ;
l2tp_session_dec_refcount ( session ) ;
goto discard ;
2010-04-02 10:19:00 +04:00
discard_put :
sock_put ( sk ) ;
discard :
kfree_skb ( skb ) ;
return 0 ;
}
static int l2tp_ip_open ( struct sock * sk )
{
/* Prevent autobind. We don't have ports. */
inet_sk ( sk ) - > inet_num = IPPROTO_L2TP ;
write_lock_bh ( & l2tp_ip_lock ) ;
sk_add_node ( sk , & l2tp_ip_table ) ;
write_unlock_bh ( & l2tp_ip_lock ) ;
return 0 ;
}
static void l2tp_ip_close ( struct sock * sk , long timeout )
{
write_lock_bh ( & l2tp_ip_lock ) ;
hlist_del_init ( & sk - > sk_bind_node ) ;
2012-04-10 04:10:42 +04:00
sk_del_node_init ( sk ) ;
2010-04-02 10:19:00 +04:00
write_unlock_bh ( & l2tp_ip_lock ) ;
sk_common_release ( sk ) ;
}
static void l2tp_ip_destroy_sock ( struct sock * sk )
{
struct sk_buff * skb ;
2013-03-19 10:11:15 +04:00
struct l2tp_tunnel * tunnel = l2tp_sock_to_tunnel ( sk ) ;
2010-04-02 10:19:00 +04:00
while ( ( skb = __skb_dequeue_tail ( & sk - > sk_write_queue ) ) ! = NULL )
kfree_skb ( skb ) ;
2013-03-19 10:11:15 +04:00
if ( tunnel ) {
l2tp_tunnel_closeall ( tunnel ) ;
sock_put ( sk ) ;
}
2010-04-02 10:19:00 +04:00
sk_refcnt_debug_dec ( sk ) ;
}
static int l2tp_ip_bind ( struct sock * sk , struct sockaddr * uaddr , int addr_len )
{
struct inet_sock * inet = inet_sk ( sk ) ;
struct sockaddr_l2tpip * addr = ( struct sockaddr_l2tpip * ) uaddr ;
2013-02-05 23:36:02 +04:00
struct net * net = sock_net ( sk ) ;
2012-05-29 07:30:42 +04:00
int ret ;
2010-04-02 10:19:00 +04:00
int chk_addr_ret ;
2012-05-29 07:30:42 +04:00
if ( addr_len < sizeof ( struct sockaddr_l2tpip ) )
return - EINVAL ;
if ( addr - > l2tp_family ! = AF_INET )
return - EINVAL ;
2010-04-02 10:19:00 +04:00
lock_sock ( sk ) ;
l2tp: fix racy socket lookup in l2tp_ip and l2tp_ip6 bind()
It's not enough to check for sockets bound to same address at the
beginning of l2tp_ip{,6}_bind(): even if no socket is found at that
time, a socket with the same address could be bound before we take
the l2tp lock again.
This patch moves the lookup right before inserting the new socket, so
that no change can ever happen to the list between address lookup and
socket insertion.
Care is taken to avoid side effects on the socket in case of failure.
That is, modifications of the socket are done after the lookup, when
binding is guaranteed to succeed, and before releasing the l2tp lock,
so that concurrent lookups will always see fully initialised sockets.
For l2tp_ip, 'ret' is set to -EINVAL before checking the SOCK_ZAPPED
bit. Error code was mistakenly set to -EADDRINUSE on error by commit
32c231164b76 ("l2tp: fix racy SOCK_ZAPPED flag check in l2tp_ip{,6}_bind()").
Using -EINVAL restores original behaviour.
For l2tp_ip6, the lookup is now always done with the correct bound
device. Before this patch, when binding to a link-local address, the
lookup was done with the original sk->sk_bound_dev_if, which was later
overwritten with addr->l2tp_scope_id. Lookup is now performed with the
final sk->sk_bound_dev_if value.
Finally, the (addr_len >= sizeof(struct sockaddr_in6)) check has been
dropped: addr is a sockaddr_l2tpip6 not sockaddr_in6 and addr_len has
already been checked at this point (this part of the code seems to have
been copy-pasted from net/ipv6/raw.c).
Signed-off-by: Guillaume Nault <g.nault@alphalink.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-29 15:09:46 +03:00
ret = - EINVAL ;
2016-11-19 00:13:00 +03:00
if ( ! sock_flag ( sk , SOCK_ZAPPED ) )
goto out ;
2017-01-06 22:03:54 +03:00
if ( sk - > sk_state ! = TCP_CLOSE )
2010-04-02 10:19:00 +04:00
goto out ;
2013-02-05 23:36:02 +04:00
chk_addr_ret = inet_addr_type ( net , addr - > l2tp_addr . s_addr ) ;
2010-04-02 10:19:00 +04:00
ret = - EADDRNOTAVAIL ;
if ( addr - > l2tp_addr . s_addr & & chk_addr_ret ! = RTN_LOCAL & &
chk_addr_ret ! = RTN_MULTICAST & & chk_addr_ret ! = RTN_BROADCAST )
goto out ;
2012-04-10 04:10:43 +04:00
if ( addr - > l2tp_addr . s_addr )
inet - > inet_rcv_saddr = inet - > inet_saddr = addr - > l2tp_addr . s_addr ;
2010-04-02 10:19:00 +04:00
if ( chk_addr_ret = = RTN_MULTICAST | | chk_addr_ret = = RTN_BROADCAST )
inet - > inet_saddr = 0 ; /* Use device */
l2tp: fix racy socket lookup in l2tp_ip and l2tp_ip6 bind()
It's not enough to check for sockets bound to same address at the
beginning of l2tp_ip{,6}_bind(): even if no socket is found at that
time, a socket with the same address could be bound before we take
the l2tp lock again.
This patch moves the lookup right before inserting the new socket, so
that no change can ever happen to the list between address lookup and
socket insertion.
Care is taken to avoid side effects on the socket in case of failure.
That is, modifications of the socket are done after the lookup, when
binding is guaranteed to succeed, and before releasing the l2tp lock,
so that concurrent lookups will always see fully initialised sockets.
For l2tp_ip, 'ret' is set to -EINVAL before checking the SOCK_ZAPPED
bit. Error code was mistakenly set to -EADDRINUSE on error by commit
32c231164b76 ("l2tp: fix racy SOCK_ZAPPED flag check in l2tp_ip{,6}_bind()").
Using -EINVAL restores original behaviour.
For l2tp_ip6, the lookup is now always done with the correct bound
device. Before this patch, when binding to a link-local address, the
lookup was done with the original sk->sk_bound_dev_if, which was later
overwritten with addr->l2tp_scope_id. Lookup is now performed with the
final sk->sk_bound_dev_if value.
Finally, the (addr_len >= sizeof(struct sockaddr_in6)) check has been
dropped: addr is a sockaddr_l2tpip6 not sockaddr_in6 and addr_len has
already been checked at this point (this part of the code seems to have
been copy-pasted from net/ipv6/raw.c).
Signed-off-by: Guillaume Nault <g.nault@alphalink.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-29 15:09:46 +03:00
write_lock_bh ( & l2tp_ip_lock ) ;
2016-12-30 21:48:20 +03:00
if ( __l2tp_ip_bind_lookup ( net , addr - > l2tp_addr . s_addr , 0 ,
l2tp: fix racy socket lookup in l2tp_ip and l2tp_ip6 bind()
It's not enough to check for sockets bound to same address at the
beginning of l2tp_ip{,6}_bind(): even if no socket is found at that
time, a socket with the same address could be bound before we take
the l2tp lock again.
This patch moves the lookup right before inserting the new socket, so
that no change can ever happen to the list between address lookup and
socket insertion.
Care is taken to avoid side effects on the socket in case of failure.
That is, modifications of the socket are done after the lookup, when
binding is guaranteed to succeed, and before releasing the l2tp lock,
so that concurrent lookups will always see fully initialised sockets.
For l2tp_ip, 'ret' is set to -EINVAL before checking the SOCK_ZAPPED
bit. Error code was mistakenly set to -EADDRINUSE on error by commit
32c231164b76 ("l2tp: fix racy SOCK_ZAPPED flag check in l2tp_ip{,6}_bind()").
Using -EINVAL restores original behaviour.
For l2tp_ip6, the lookup is now always done with the correct bound
device. Before this patch, when binding to a link-local address, the
lookup was done with the original sk->sk_bound_dev_if, which was later
overwritten with addr->l2tp_scope_id. Lookup is now performed with the
final sk->sk_bound_dev_if value.
Finally, the (addr_len >= sizeof(struct sockaddr_in6)) check has been
dropped: addr is a sockaddr_l2tpip6 not sockaddr_in6 and addr_len has
already been checked at this point (this part of the code seems to have
been copy-pasted from net/ipv6/raw.c).
Signed-off-by: Guillaume Nault <g.nault@alphalink.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-29 15:09:46 +03:00
sk - > sk_bound_dev_if , addr - > l2tp_conn_id ) ) {
write_unlock_bh ( & l2tp_ip_lock ) ;
ret = - EADDRINUSE ;
goto out ;
}
sk_dst_reset ( sk ) ;
2010-04-02 10:19:00 +04:00
l2tp_ip_sk ( sk ) - > conn_id = addr - > l2tp_conn_id ;
sk_add_bind_node ( sk , & l2tp_ip_bind_table ) ;
sk_del_node_init ( sk ) ;
write_unlock_bh ( & l2tp_ip_lock ) ;
l2tp: fix racy socket lookup in l2tp_ip and l2tp_ip6 bind()
It's not enough to check for sockets bound to same address at the
beginning of l2tp_ip{,6}_bind(): even if no socket is found at that
time, a socket with the same address could be bound before we take
the l2tp lock again.
This patch moves the lookup right before inserting the new socket, so
that no change can ever happen to the list between address lookup and
socket insertion.
Care is taken to avoid side effects on the socket in case of failure.
That is, modifications of the socket are done after the lookup, when
binding is guaranteed to succeed, and before releasing the l2tp lock,
so that concurrent lookups will always see fully initialised sockets.
For l2tp_ip, 'ret' is set to -EINVAL before checking the SOCK_ZAPPED
bit. Error code was mistakenly set to -EADDRINUSE on error by commit
32c231164b76 ("l2tp: fix racy SOCK_ZAPPED flag check in l2tp_ip{,6}_bind()").
Using -EINVAL restores original behaviour.
For l2tp_ip6, the lookup is now always done with the correct bound
device. Before this patch, when binding to a link-local address, the
lookup was done with the original sk->sk_bound_dev_if, which was later
overwritten with addr->l2tp_scope_id. Lookup is now performed with the
final sk->sk_bound_dev_if value.
Finally, the (addr_len >= sizeof(struct sockaddr_in6)) check has been
dropped: addr is a sockaddr_l2tpip6 not sockaddr_in6 and addr_len has
already been checked at this point (this part of the code seems to have
been copy-pasted from net/ipv6/raw.c).
Signed-off-by: Guillaume Nault <g.nault@alphalink.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-29 15:09:46 +03:00
2010-04-02 10:19:00 +04:00
ret = 0 ;
2012-05-29 07:30:42 +04:00
sock_reset_flag ( sk , SOCK_ZAPPED ) ;
2010-04-02 10:19:00 +04:00
out :
release_sock ( sk ) ;
return ret ;
}
static int l2tp_ip_connect ( struct sock * sk , struct sockaddr * uaddr , int addr_len )
{
struct sockaddr_l2tpip * lsa = ( struct sockaddr_l2tpip * ) uaddr ;
2012-04-30 01:48:47 +04:00
int rc ;
2010-04-02 10:19:00 +04:00
if ( addr_len < sizeof ( * lsa ) )
2012-04-30 01:48:47 +04:00
return - EINVAL ;
2011-05-09 00:39:01 +04:00
2010-04-02 10:19:00 +04:00
if ( ipv4_is_multicast ( lsa - > l2tp_addr . s_addr ) )
2012-04-30 01:48:47 +04:00
return - EINVAL ;
2010-04-02 10:19:00 +04:00
2012-04-30 01:48:47 +04:00
lock_sock ( sk ) ;
2010-04-02 10:19:00 +04:00
2016-11-29 15:09:44 +03:00
/* Must bind first - autobinding does not work */
if ( sock_flag ( sk , SOCK_ZAPPED ) ) {
rc = - EINVAL ;
goto out_sk ;
}
rc = __ip4_datagram_connect ( sk , uaddr , addr_len ) ;
if ( rc < 0 )
goto out_sk ;
2010-04-02 10:19:00 +04:00
l2tp_ip_sk ( sk ) - > peer_conn_id = lsa - > l2tp_conn_id ;
write_lock_bh ( & l2tp_ip_lock ) ;
hlist_del_init ( & sk - > sk_bind_node ) ;
sk_add_bind_node ( sk , & l2tp_ip_bind_table ) ;
write_unlock_bh ( & l2tp_ip_lock ) ;
2016-11-29 15:09:44 +03:00
out_sk :
2011-05-09 00:39:01 +04:00
release_sock ( sk ) ;
2016-11-29 15:09:44 +03:00
2010-04-02 10:19:00 +04:00
return rc ;
}
2012-05-29 07:30:42 +04:00
static int l2tp_ip_disconnect ( struct sock * sk , int flags )
{
if ( sock_flag ( sk , SOCK_ZAPPED ) )
return 0 ;
2016-10-20 19:39:40 +03:00
return __udp_disconnect ( sk , flags ) ;
2012-05-29 07:30:42 +04:00
}
2010-04-02 10:19:00 +04:00
static int l2tp_ip_getname ( struct socket * sock , struct sockaddr * uaddr ,
int * uaddr_len , int peer )
{
struct sock * sk = sock - > sk ;
struct inet_sock * inet = inet_sk ( sk ) ;
struct l2tp_ip_sock * lsk = l2tp_ip_sk ( sk ) ;
struct sockaddr_l2tpip * lsa = ( struct sockaddr_l2tpip * ) uaddr ;
memset ( lsa , 0 , sizeof ( * lsa ) ) ;
lsa - > l2tp_family = AF_INET ;
if ( peer ) {
if ( ! inet - > inet_dport )
return - ENOTCONN ;
lsa - > l2tp_conn_id = lsk - > peer_conn_id ;
lsa - > l2tp_addr . s_addr = inet - > inet_daddr ;
} else {
__be32 addr = inet - > inet_rcv_saddr ;
if ( ! addr )
addr = inet - > inet_saddr ;
lsa - > l2tp_conn_id = lsk - > conn_id ;
lsa - > l2tp_addr . s_addr = addr ;
}
* uaddr_len = sizeof ( * lsa ) ;
return 0 ;
}
static int l2tp_ip_backlog_recv ( struct sock * sk , struct sk_buff * skb )
{
int rc ;
/* Charge it to the socket, dropping if the queue is full. */
rc = sock_queue_rcv_skb ( sk , skb ) ;
if ( rc < 0 )
goto drop ;
return 0 ;
drop :
2013-02-05 23:36:02 +04:00
IP_INC_STATS ( sock_net ( sk ) , IPSTATS_MIB_INDISCARDS ) ;
2010-04-02 10:19:00 +04:00
kfree_skb ( skb ) ;
2017-02-26 19:58:19 +03:00
return 0 ;
2010-04-02 10:19:00 +04:00
}
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
* control frames .
*/
2015-03-02 10:37:48 +03:00
static int l2tp_ip_sendmsg ( struct sock * sk , struct msghdr * msg , size_t len )
2010-04-02 10:19:00 +04:00
{
struct sk_buff * skb ;
int rc ;
struct inet_sock * inet = inet_sk ( sk ) ;
struct rtable * rt = NULL ;
2011-05-09 00:48:37 +04:00
struct flowi4 * fl4 ;
2010-04-02 10:19:00 +04:00
int connected = 0 ;
__be32 daddr ;
2011-05-09 00:39:01 +04:00
lock_sock ( sk ) ;
rc = - ENOTCONN ;
2010-04-02 10:19:00 +04:00
if ( sock_flag ( sk , SOCK_DEAD ) )
2011-05-09 00:39:01 +04:00
goto out ;
2010-04-02 10:19:00 +04:00
/* Get and verify the address. */
if ( msg - > msg_name ) {
2014-01-18 01:53:15 +04:00
DECLARE_SOCKADDR ( struct sockaddr_l2tpip * , lip , msg - > msg_name ) ;
2011-05-09 00:39:01 +04:00
rc = - EINVAL ;
2010-04-02 10:19:00 +04:00
if ( msg - > msg_namelen < sizeof ( * lip ) )
2011-05-09 00:39:01 +04:00
goto out ;
2010-04-02 10:19:00 +04:00
if ( lip - > l2tp_family ! = AF_INET ) {
2011-05-09 00:39:01 +04:00
rc = - EAFNOSUPPORT ;
2010-04-02 10:19:00 +04:00
if ( lip - > l2tp_family ! = AF_UNSPEC )
2011-05-09 00:39:01 +04:00
goto out ;
2010-04-02 10:19:00 +04:00
}
daddr = lip - > l2tp_addr . s_addr ;
} else {
2012-05-02 07:58:43 +04:00
rc = - EDESTADDRREQ ;
2010-04-02 10:19:00 +04:00
if ( sk - > sk_state ! = TCP_ESTABLISHED )
2012-05-02 07:58:43 +04:00
goto out ;
2010-04-02 10:19:00 +04:00
daddr = inet - > inet_daddr ;
connected = 1 ;
}
/* Allocate a socket buffer */
rc = - ENOMEM ;
skb = sock_wmalloc ( sk , 2 + NET_SKB_PAD + sizeof ( struct iphdr ) +
4 + len , 0 , GFP_KERNEL ) ;
if ( ! skb )
goto error ;
/* Reserve space for headers, putting IP header on 4-byte boundary. */
skb_reserve ( skb , 2 + NET_SKB_PAD ) ;
skb_reset_network_header ( skb ) ;
skb_reserve ( skb , sizeof ( struct iphdr ) ) ;
skb_reset_transport_header ( skb ) ;
/* Insert 0 session_id */
* ( ( __be32 * ) skb_put ( skb , 4 ) ) = 0 ;
/* Copy user data into skb */
2014-04-07 05:25:44 +04:00
rc = memcpy_from_msg ( skb_put ( skb , len ) , msg , len ) ;
2010-04-02 10:19:00 +04:00
if ( rc < 0 ) {
kfree_skb ( skb ) ;
goto error ;
}
2011-05-09 00:48:37 +04:00
fl4 = & inet - > cork . fl . u . ip4 ;
2010-04-02 10:19:00 +04:00
if ( connected )
rt = ( struct rtable * ) __sk_dst_check ( sk , 0 ) ;
2011-06-12 02:27:09 +04:00
rcu_read_lock ( ) ;
2010-04-02 10:19:00 +04:00
if ( rt = = NULL ) {
2011-06-12 02:27:09 +04:00
const struct ip_options_rcu * inet_opt ;
2011-04-21 13:45:37 +04:00
2011-04-29 00:54:06 +04:00
inet_opt = rcu_dereference ( inet - > inet_opt ) ;
2011-04-21 13:45:37 +04:00
2010-04-02 10:19:00 +04:00
/* Use correct destination address if we have options. */
2011-04-21 13:45:37 +04:00
if ( inet_opt & & inet_opt - > opt . srr )
daddr = inet_opt - > opt . faddr ;
2010-04-02 10:19:00 +04:00
2011-03-12 08:00:52 +03:00
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out .
*/
2011-05-09 00:48:37 +04:00
rt = ip_route_output_ports ( sock_net ( sk ) , fl4 , sk ,
2011-03-12 08:00:52 +03:00
daddr , inet - > inet_saddr ,
inet - > inet_dport , inet - > inet_sport ,
sk - > sk_protocol , RT_CONN_FLAGS ( sk ) ,
sk - > sk_bound_dev_if ) ;
if ( IS_ERR ( rt ) )
goto no_route ;
2012-06-08 10:25:00 +04:00
if ( connected ) {
2011-06-12 02:27:09 +04:00
sk_setup_caps ( sk , & rt - > dst ) ;
2012-06-08 10:25:00 +04:00
} else {
skb_dst_set ( skb , & rt - > dst ) ;
goto xmit ;
}
2010-04-02 10:19:00 +04:00
}
2011-06-12 02:27:09 +04:00
/* We dont need to clone dst here, it is guaranteed to not disappear.
* __dev_xmit_skb ( ) might force a refcount if needed .
*/
skb_dst_set_noref ( skb , & rt - > dst ) ;
2010-04-02 10:19:00 +04:00
2012-06-08 10:25:00 +04:00
xmit :
2010-04-02 10:19:00 +04:00
/* Queue the packet to IP for output */
2014-04-15 20:58:34 +04:00
rc = ip_queue_xmit ( sk , skb , & inet - > cork . fl ) ;
2011-06-12 02:27:09 +04:00
rcu_read_unlock ( ) ;
2010-04-02 10:19:00 +04:00
error :
2012-04-30 01:48:48 +04:00
if ( rc > = 0 )
2010-04-02 10:19:00 +04:00
rc = len ;
2011-05-09 00:39:01 +04:00
out :
release_sock ( sk ) ;
2010-04-02 10:19:00 +04:00
return rc ;
no_route :
2011-06-12 02:27:09 +04:00
rcu_read_unlock ( ) ;
2010-04-02 10:19:00 +04:00
IP_INC_STATS ( sock_net ( sk ) , IPSTATS_MIB_OUTNOROUTES ) ;
kfree_skb ( skb ) ;
2011-05-09 00:39:01 +04:00
rc = - EHOSTUNREACH ;
goto out ;
2010-04-02 10:19:00 +04:00
}
2015-03-02 10:37:48 +03:00
static int l2tp_ip_recvmsg ( struct sock * sk , struct msghdr * msg ,
2010-04-02 10:19:00 +04:00
size_t len , int noblock , int flags , int * addr_len )
{
struct inet_sock * inet = inet_sk ( sk ) ;
size_t copied = 0 ;
int err = - EOPNOTSUPP ;
2014-01-18 01:53:15 +04:00
DECLARE_SOCKADDR ( struct sockaddr_in * , sin , msg - > msg_name ) ;
2010-04-02 10:19:00 +04:00
struct sk_buff * skb ;
if ( flags & MSG_OOB )
goto out ;
skb = skb_recv_datagram ( sk , flags , noblock , & err ) ;
if ( ! skb )
goto out ;
copied = skb - > len ;
if ( len < copied ) {
msg - > msg_flags | = MSG_TRUNC ;
copied = len ;
}
2014-11-06 00:46:40 +03:00
err = skb_copy_datagram_msg ( skb , 0 , msg , copied ) ;
2010-04-02 10:19:00 +04:00
if ( err )
goto done ;
sock_recv_timestamp ( msg , sk , skb ) ;
/* Copy the address. */
if ( sin ) {
sin - > sin_family = AF_INET ;
sin - > sin_addr . s_addr = ip_hdr ( skb ) - > saddr ;
sin - > sin_port = 0 ;
memset ( & sin - > sin_zero , 0 , sizeof ( sin - > sin_zero ) ) ;
2013-11-18 07:20:45 +04:00
* addr_len = sizeof ( * sin ) ;
2010-04-02 10:19:00 +04:00
}
if ( inet - > cmsg_flags )
ip_cmsg_recv ( msg , skb ) ;
if ( flags & MSG_TRUNC )
copied = skb - > len ;
done :
skb_free_datagram ( sk , skb ) ;
out :
2012-04-30 01:48:48 +04:00
return err ? err : copied ;
2010-04-02 10:19:00 +04:00
}
2017-02-10 03:15:52 +03:00
int l2tp_ioctl ( struct sock * sk , int cmd , unsigned long arg )
{
struct sk_buff * skb ;
int amount ;
switch ( cmd ) {
case SIOCOUTQ :
amount = sk_wmem_alloc_get ( sk ) ;
break ;
case SIOCINQ :
spin_lock_bh ( & sk - > sk_receive_queue . lock ) ;
skb = skb_peek ( & sk - > sk_receive_queue ) ;
amount = skb ? skb - > len : 0 ;
spin_unlock_bh ( & sk - > sk_receive_queue . lock ) ;
break ;
default :
return - ENOIOCTLCMD ;
}
return put_user ( amount , ( int __user * ) arg ) ;
}
EXPORT_SYMBOL ( l2tp_ioctl ) ;
2010-10-21 11:50:46 +04:00
static struct proto l2tp_ip_prot = {
2010-04-02 10:19:00 +04:00
. name = " L2TP/IP " ,
. owner = THIS_MODULE ,
. init = l2tp_ip_open ,
. close = l2tp_ip_close ,
. bind = l2tp_ip_bind ,
. connect = l2tp_ip_connect ,
2012-05-29 07:30:42 +04:00
. disconnect = l2tp_ip_disconnect ,
2017-02-10 03:15:52 +03:00
. ioctl = l2tp_ioctl ,
2010-04-02 10:19:00 +04:00
. destroy = l2tp_ip_destroy_sock ,
. setsockopt = ip_setsockopt ,
. getsockopt = ip_getsockopt ,
. sendmsg = l2tp_ip_sendmsg ,
. recvmsg = l2tp_ip_recvmsg ,
. backlog_rcv = l2tp_ip_backlog_recv ,
. hash = inet_hash ,
. unhash = inet_unhash ,
. obj_size = sizeof ( struct l2tp_ip_sock ) ,
# ifdef CONFIG_COMPAT
. compat_setsockopt = compat_ip_setsockopt ,
. compat_getsockopt = compat_ip_getsockopt ,
# endif
} ;
static const struct proto_ops l2tp_ip_ops = {
. family = PF_INET ,
. owner = THIS_MODULE ,
. release = inet_release ,
. bind = inet_bind ,
. connect = inet_dgram_connect ,
. socketpair = sock_no_socketpair ,
. accept = sock_no_accept ,
. getname = l2tp_ip_getname ,
. poll = datagram_poll ,
. ioctl = inet_ioctl ,
. listen = sock_no_listen ,
. shutdown = inet_shutdown ,
. setsockopt = sock_common_setsockopt ,
. getsockopt = sock_common_getsockopt ,
. sendmsg = inet_sendmsg ,
. recvmsg = sock_common_recvmsg ,
. mmap = sock_no_mmap ,
. sendpage = sock_no_sendpage ,
# ifdef CONFIG_COMPAT
. compat_setsockopt = compat_sock_common_setsockopt ,
. compat_getsockopt = compat_sock_common_getsockopt ,
# endif
} ;
static struct inet_protosw l2tp_ip_protosw = {
. type = SOCK_DGRAM ,
. protocol = IPPROTO_L2TP ,
. prot = & l2tp_ip_prot ,
. ops = & l2tp_ip_ops ,
} ;
static struct net_protocol l2tp_ip_protocol __read_mostly = {
. handler = l2tp_ip_recv ,
2013-02-05 23:36:02 +04:00
. netns_ok = 1 ,
2010-04-02 10:19:00 +04:00
} ;
static int __init l2tp_ip_init ( void )
{
int err ;
2012-05-16 13:55:56 +04:00
pr_info ( " L2TP IP encapsulation support (L2TPv3) \n " ) ;
2010-04-02 10:19:00 +04:00
err = proto_register ( & l2tp_ip_prot , 1 ) ;
if ( err ! = 0 )
goto out ;
err = inet_add_protocol ( & l2tp_ip_protocol , IPPROTO_L2TP ) ;
if ( err )
goto out1 ;
inet_register_protosw ( & l2tp_ip_protosw ) ;
return 0 ;
out1 :
proto_unregister ( & l2tp_ip_prot ) ;
out :
return err ;
}
static void __exit l2tp_ip_exit ( void )
{
inet_unregister_protosw ( & l2tp_ip_protosw ) ;
inet_del_protocol ( & l2tp_ip_protocol , IPPROTO_L2TP ) ;
proto_unregister ( & l2tp_ip_prot ) ;
}
module_init ( l2tp_ip_init ) ;
module_exit ( l2tp_ip_exit ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " James Chapman <jchapman@katalix.com> " ) ;
MODULE_DESCRIPTION ( " L2TP over IP " ) ;
MODULE_VERSION ( " 1.0 " ) ;
2010-12-06 05:39:12 +03:00
2011-04-27 10:28:26 +04:00
/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
2010-12-06 05:39:12 +03:00
* enums
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE ( PF_INET , 2 , IPPROTO_L2TP ) ;
2015-09-24 07:33:35 +03:00
MODULE_ALIAS_NET_PF_PROTO ( PF_INET , IPPROTO_L2TP ) ;