2008-01-11 17:57:09 +03:00
/* SCTP kernel implementation
2005-04-17 02:20:36 +04:00
* Copyright ( c ) 1999 - 2000 Cisco , Inc .
* Copyright ( c ) 1999 - 2001 Motorola , Inc .
* Copyright ( c ) 2001 - 2002 International Business Machines , Corp .
* Copyright ( c ) 2001 Intel Corp .
* Copyright ( c ) 2001 Nokia , Inc .
* Copyright ( c ) 2001 La Monte H . P . Yarroll
*
2008-01-11 17:57:09 +03:00
* This file is part of the SCTP kernel implementation
2005-04-17 02:20:36 +04:00
*
* This abstraction represents an SCTP endpoint .
*
2008-01-11 17:57:09 +03:00
* The SCTP implementation is free software ;
2005-04-17 02:20:36 +04:00
* you can redistribute it and / or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
2008-01-11 17:57:09 +03:00
* The SCTP implementation is distributed in the hope that it
2005-04-17 02:20:36 +04:00
* will be useful , but WITHOUT ANY WARRANTY ; without even the implied
* * * * * * * * * * * * * * * * * * * * * * * * *
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE .
* See the GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
2013-12-06 18:28:48 +04:00
* along with GNU CC ; see the file COPYING . If not , see
* < http : //www.gnu.org/licenses/>.
2005-04-17 02:20:36 +04:00
*
* Please send any bug reports or fixes you make to the
* email address ( es ) :
2013-07-23 16:51:47 +04:00
* lksctp developers < linux - sctp @ vger . kernel . org >
2005-04-17 02:20:36 +04:00
*
* Written or modified by :
* La Monte H . P . Yarroll < piggy @ acm . org >
* Karl Knutson < karl @ athena . chicago . il . us >
* Jon Grimm < jgrimm @ austin . ibm . com >
* Daisy Chang < daisyc @ us . ibm . com >
* Dajiang Zhang < dajiang . zhang @ nokia . com >
*/
# include <linux/types.h>
# include <linux/slab.h>
# include <linux/in.h>
# include <linux/random.h> /* get_random_bytes() */
# include <linux/crypto.h>
# include <net/sock.h>
# include <net/ipv6.h>
# include <net/sctp/sctp.h>
# include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
2006-11-22 17:57:56 +03:00
static void sctp_endpoint_bh_rcv ( struct work_struct * work ) ;
2005-04-17 02:20:36 +04:00
/*
* Initialize the base fields of the endpoint structure .
*/
static struct sctp_endpoint * sctp_endpoint_init ( struct sctp_endpoint * ep ,
2005-07-12 07:57:47 +04:00
struct sock * sk ,
2005-10-07 10:46:04 +04:00
gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
2012-08-07 11:29:57 +04:00
struct net * net = sock_net ( sk ) ;
2007-09-17 06:31:35 +04:00
struct sctp_hmac_algo_param * auth_hmacs = NULL ;
struct sctp_chunks_param * auth_chunks = NULL ;
struct sctp_shared_key * null_key ;
int err ;
2006-11-10 03:29:57 +03:00
ep - > digest = kzalloc ( SCTP_SIGNATURE_SIZE , gfp ) ;
if ( ! ep - > digest )
return NULL ;
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 19:26:50 +04:00
ep - > auth_enable = net - > sctp . auth_enable ;
if ( ep - > auth_enable ) {
2007-09-17 06:31:35 +04:00
/* Allocate space for HMACS and CHUNKS authentication
* variables . There are arrays that we encode directly
* into parameters to make the rest of the operations easier .
*/
auth_hmacs = kzalloc ( sizeof ( sctp_hmac_algo_param_t ) +
sizeof ( __u16 ) * SCTP_AUTH_NUM_HMACS , gfp ) ;
if ( ! auth_hmacs )
goto nomem ;
auth_chunks = kzalloc ( sizeof ( sctp_chunks_param_t ) +
SCTP_NUM_CHUNK_TYPES , gfp ) ;
if ( ! auth_chunks )
goto nomem ;
/* Initialize the HMACS parameter.
* SCTP - AUTH : Section 3.3
* Every endpoint supporting SCTP chunk authentication MUST
* support the HMAC based on the SHA - 1 algorithm .
*/
auth_hmacs - > param_hdr . type = SCTP_PARAM_HMAC_ALGO ;
auth_hmacs - > param_hdr . length =
htons ( sizeof ( sctp_paramhdr_t ) + 2 ) ;
auth_hmacs - > hmac_ids [ 0 ] = htons ( SCTP_AUTH_HMAC_ID_SHA1 ) ;
/* Initialize the CHUNKS parameter */
auth_chunks - > param_hdr . type = SCTP_PARAM_CHUNKS ;
2008-08-21 14:34:25 +04:00
auth_chunks - > param_hdr . length = htons ( sizeof ( sctp_paramhdr_t ) ) ;
2007-09-17 06:31:35 +04:00
/* If the Add-IP functionality is enabled, we must
* authenticate , ASCONF and ASCONF - ACK chunks
*/
2012-08-07 11:29:57 +04:00
if ( net - > sctp . addip_enable ) {
2007-09-17 06:31:35 +04:00
auth_chunks - > chunks [ 0 ] = SCTP_CID_ASCONF ;
auth_chunks - > chunks [ 1 ] = SCTP_CID_ASCONF_ACK ;
2009-03-19 05:12:42 +03:00
auth_chunks - > param_hdr . length =
htons ( sizeof ( sctp_paramhdr_t ) + 2 ) ;
2007-09-17 06:31:35 +04:00
}
}
2005-04-17 02:20:36 +04:00
/* Initialize the base structure. */
/* What type of endpoint are we? */
ep - > base . type = SCTP_EP_TYPE_SOCKET ;
/* Initialize the basic object fields. */
atomic_set ( & ep - > base . refcnt , 1 ) ;
2013-04-15 07:27:18 +04:00
ep - > base . dead = false ;
2005-04-17 02:20:36 +04:00
/* Create an input queue. */
sctp_inq_init ( & ep - > base . inqueue ) ;
/* Set its top-half handler */
2006-11-22 17:57:56 +03:00
sctp_inq_set_th_handler ( & ep - > base . inqueue , sctp_endpoint_bh_rcv ) ;
2005-04-17 02:20:36 +04:00
/* Initialize the bind addr area */
sctp_bind_addr_init ( & ep - > base . bind_addr , 0 ) ;
/* Remember who we are attached to. */
ep - > base . sk = sk ;
sock_hold ( ep - > base . sk ) ;
/* Create the lists of associations. */
INIT_LIST_HEAD ( & ep - > asocs ) ;
/* Use SCTP specific send buffer space queues. */
2012-08-07 11:29:57 +04:00
ep - > sndbuf_policy = net - > sctp . sndbuf_policy ;
2007-08-16 03:07:44 +04:00
2010-04-28 12:47:18 +04:00
sk - > sk_data_ready = sctp_data_ready ;
2005-04-17 02:20:36 +04:00
sk - > sk_write_space = sctp_write_space ;
sock_set_flag ( sk , SOCK_USE_WRITE_QUEUE ) ;
2005-11-12 03:08:24 +03:00
/* Get the receive buffer policy for this endpoint */
2012-08-07 11:29:57 +04:00
ep - > rcvbuf_policy = net - > sctp . rcvbuf_policy ;
2005-11-12 03:08:24 +03:00
2005-04-17 02:20:36 +04:00
/* Initialize the secret key used with cookie. */
2013-02-12 09:15:33 +04:00
get_random_bytes ( ep - > secret_key , sizeof ( ep - > secret_key ) ) ;
2005-04-17 02:20:36 +04:00
2007-09-17 06:31:35 +04:00
/* SCTP-AUTH extensions*/
INIT_LIST_HEAD ( & ep - > endpoint_shared_keys ) ;
2013-02-28 23:27:43 +04:00
null_key = sctp_auth_shkey_create ( 0 , gfp ) ;
2007-09-17 06:31:35 +04:00
if ( ! null_key )
goto nomem ;
list_add ( & null_key - > key_list , & ep - > endpoint_shared_keys ) ;
2012-08-22 14:11:26 +04:00
/* Allocate and initialize transorms arrays for supported HMACs. */
2007-09-17 06:31:35 +04:00
err = sctp_auth_init_hmacs ( ep , gfp ) ;
if ( err )
goto nomem_hmacs ;
/* Add the null key to the endpoint shared keys list and
* set the hmcas and chunks pointers .
*/
ep - > auth_hmacs_list = auth_hmacs ;
ep - > auth_chunk_list = auth_chunks ;
2005-04-17 02:20:36 +04:00
return ep ;
2007-09-17 06:31:35 +04:00
nomem_hmacs :
sctp_auth_destroy_keys ( & ep - > endpoint_shared_keys ) ;
nomem :
/* Free all allocations */
kfree ( auth_hmacs ) ;
kfree ( auth_chunks ) ;
kfree ( ep - > digest ) ;
return NULL ;
2005-04-17 02:20:36 +04:00
}
/* Create a sctp_endpoint with all that boring stuff initialized.
* Returns NULL if there isn ' t enough memory .
*/
2005-10-07 10:46:04 +04:00
struct sctp_endpoint * sctp_endpoint_new ( struct sock * sk , gfp_t gfp )
2005-04-17 02:20:36 +04:00
{
struct sctp_endpoint * ep ;
/* Build a local endpoint. */
2013-06-17 13:40:04 +04:00
ep = kzalloc ( sizeof ( * ep ) , gfp ) ;
2005-04-17 02:20:36 +04:00
if ( ! ep )
goto fail ;
2013-06-17 13:40:04 +04:00
2005-04-17 02:20:36 +04:00
if ( ! sctp_endpoint_init ( ep , sk , gfp ) )
goto fail_init ;
2013-04-15 07:27:17 +04:00
2005-04-17 02:20:36 +04:00
SCTP_DBG_OBJCNT_INC ( ep ) ;
return ep ;
fail_init :
kfree ( ep ) ;
fail :
return NULL ;
}
/* Add an association to an endpoint. */
void sctp_endpoint_add_asoc ( struct sctp_endpoint * ep ,
struct sctp_association * asoc )
{
struct sock * sk = ep - > base . sk ;
2006-10-31 05:55:11 +03:00
/* If this is a temporary association, don't bother
* since we ' ll be removing it shortly and don ' t
* want anyone to find it anyway .
*/
if ( asoc - > temp )
return ;
2005-04-17 02:20:36 +04:00
/* Now just add it to our list of asocs */
list_add_tail ( & asoc - > asocs , & ep - > asocs ) ;
/* Increment the backlog value for a TCP-style listening socket. */
if ( sctp_style ( sk , TCP ) & & sctp_sstate ( sk , LISTENING ) )
sk - > sk_ack_backlog + + ;
}
/* Free the endpoint structure. Delay cleanup until
* all users have released their reference count on this structure .
*/
void sctp_endpoint_free ( struct sctp_endpoint * ep )
{
2013-04-15 07:27:18 +04:00
ep - > base . dead = true ;
2006-07-22 01:48:26 +04:00
ep - > base . sk - > sk_state = SCTP_SS_CLOSED ;
/* Unlink this endpoint, so we can't find it again! */
sctp_unhash_endpoint ( ep ) ;
2005-04-17 02:20:36 +04:00
sctp_endpoint_put ( ep ) ;
}
/* Final destructor for endpoint. */
static void sctp_endpoint_destroy ( struct sctp_endpoint * ep )
{
2013-06-25 20:17:29 +04:00
struct sock * sk ;
2005-04-17 02:20:36 +04:00
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-28 21:49:40 +04:00
if ( unlikely ( ! ep - > base . dead ) ) {
WARN ( 1 , " Attempt to destroy undead endpoint %p! \n " , ep ) ;
return ;
}
2005-04-17 02:20:36 +04:00
2006-11-10 03:29:57 +03:00
/* Free the digest buffer */
kfree ( ep - > digest ) ;
2007-09-17 06:31:35 +04:00
/* SCTP-AUTH: Free up AUTH releated data such as shared keys
* chunks and hmacs arrays that were allocated
*/
sctp_auth_destroy_keys ( & ep - > endpoint_shared_keys ) ;
kfree ( ep - > auth_hmacs_list ) ;
kfree ( ep - > auth_chunk_list ) ;
/* AUTH - Free any allocated HMAC transform containers */
sctp_auth_destroy_hmacs ( ep - > auth_hmacs ) ;
2005-04-17 02:20:36 +04:00
/* Cleanup. */
sctp_inq_free ( & ep - > base . inqueue ) ;
sctp_bind_addr_free ( & ep - > base . bind_addr ) ;
2013-02-12 09:15:33 +04:00
memset ( ep - > secret_key , 0 , sizeof ( ep - > secret_key ) ) ;
2013-02-08 07:04:35 +04:00
2005-04-17 02:20:36 +04:00
/* Give up our hold on the sock. */
2013-06-25 20:17:29 +04:00
sk = ep - > base . sk ;
if ( sk ! = NULL ) {
/* Remove and free the port */
if ( sctp_sk ( sk ) - > bind_hash )
sctp_put_port ( sk ) ;
sock_put ( sk ) ;
}
2005-04-17 02:20:36 +04:00
2013-04-15 07:27:17 +04:00
kfree ( ep ) ;
SCTP_DBG_OBJCNT_DEC ( ep ) ;
2005-04-17 02:20:36 +04:00
}
/* Hold a reference to an endpoint. */
void sctp_endpoint_hold ( struct sctp_endpoint * ep )
{
atomic_inc ( & ep - > base . refcnt ) ;
}
/* Release a reference to an endpoint and clean up if there are
* no more references .
*/
void sctp_endpoint_put ( struct sctp_endpoint * ep )
{
if ( atomic_dec_and_test ( & ep - > base . refcnt ) )
sctp_endpoint_destroy ( ep ) ;
}
/* Is this the endpoint we are looking for? */
struct sctp_endpoint * sctp_endpoint_is_match ( struct sctp_endpoint * ep ,
2012-08-06 12:40:21 +04:00
struct net * net ,
2005-04-17 02:20:36 +04:00
const union sctp_addr * laddr )
{
2007-09-17 03:03:28 +04:00
struct sctp_endpoint * retval = NULL ;
2005-04-17 02:20:36 +04:00
2012-08-06 12:40:21 +04:00
if ( ( htons ( ep - > base . bind_addr . port ) = = laddr - > v4 . sin_port ) & &
net_eq ( sock_net ( ep - > base . sk ) , net ) ) {
2005-04-17 02:20:36 +04:00
if ( sctp_bind_addr_match ( & ep - > base . bind_addr , laddr ,
2007-09-17 03:03:28 +04:00
sctp_sk ( ep - > base . sk ) ) )
2005-04-17 02:20:36 +04:00
retval = ep ;
}
return retval ;
}
/* Find the association that goes with this chunk.
* We do a linear search of the associations for this endpoint .
* We return the matching transport address too .
*/
static struct sctp_association * __sctp_endpoint_lookup_assoc (
const struct sctp_endpoint * ep ,
const union sctp_addr * paddr ,
struct sctp_transport * * transport )
{
2007-11-09 19:41:36 +03:00
struct sctp_association * asoc = NULL ;
2011-04-20 01:29:23 +04:00
struct sctp_association * tmp ;
2007-11-09 19:41:36 +03:00
struct sctp_transport * t = NULL ;
struct sctp_hashbucket * head ;
struct sctp_ep_common * epb ;
int hash ;
2005-04-17 02:20:36 +04:00
int rport ;
2007-11-09 19:41:36 +03:00
* transport = NULL ;
2011-04-20 01:29:23 +04:00
/* If the local port is not set, there can't be any associations
* on this endpoint .
*/
if ( ! ep - > base . bind_addr . port )
goto out ;
2006-11-21 04:11:33 +03:00
rport = ntohs ( paddr - > v4 . sin_port ) ;
2005-04-17 02:20:36 +04:00
2012-08-06 12:41:13 +04:00
hash = sctp_assoc_hashfn ( sock_net ( ep - > base . sk ) , ep - > base . bind_addr . port ,
rport ) ;
2007-11-09 19:41:36 +03:00
head = & sctp_assoc_hashtable [ hash ] ;
read_lock ( & head - > lock ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
sctp_for_each_hentry ( epb , & head - > chain ) {
2011-04-20 01:29:23 +04:00
tmp = sctp_assoc ( epb ) ;
if ( tmp - > ep ! = ep | | rport ! = tmp - > peer . port )
continue ;
2007-11-09 19:41:36 +03:00
2011-04-20 01:29:23 +04:00
t = sctp_assoc_lookup_paddr ( tmp , paddr ) ;
2007-11-09 19:41:36 +03:00
if ( t ) {
2011-04-20 01:29:23 +04:00
asoc = tmp ;
2007-11-09 19:41:36 +03:00
* transport = t ;
break ;
2005-04-17 02:20:36 +04:00
}
}
2007-11-09 19:41:36 +03:00
read_unlock ( & head - > lock ) ;
2011-04-20 01:29:23 +04:00
out :
2007-11-09 19:41:36 +03:00
return asoc ;
2005-04-17 02:20:36 +04:00
}
/* Lookup association on an endpoint based on a peer address. BH-safe. */
struct sctp_association * sctp_endpoint_lookup_assoc (
const struct sctp_endpoint * ep ,
const union sctp_addr * paddr ,
struct sctp_transport * * transport )
{
struct sctp_association * asoc ;
2014-01-21 11:44:07 +04:00
local_bh_disable ( ) ;
2005-04-17 02:20:36 +04:00
asoc = __sctp_endpoint_lookup_assoc ( ep , paddr , transport ) ;
2014-01-21 11:44:07 +04:00
local_bh_enable ( ) ;
2005-04-17 02:20:36 +04:00
return asoc ;
}
/* Look for any peeled off association from the endpoint that matches the
* given peer address .
*/
int sctp_endpoint_is_peeled_off ( struct sctp_endpoint * ep ,
const union sctp_addr * paddr )
{
struct sctp_sockaddr_entry * addr ;
struct sctp_bind_addr * bp ;
2012-08-06 12:41:13 +04:00
struct net * net = sock_net ( ep - > base . sk ) ;
2005-04-17 02:20:36 +04:00
bp = & ep - > base . bind_addr ;
2007-09-17 03:03:28 +04:00
/* This function is called with the socket lock held,
* so the address_list can not change .
*/
list_for_each_entry ( addr , & bp - > address_list , list ) {
2012-08-06 12:41:13 +04:00
if ( sctp_has_association ( net , & addr - > a , paddr ) )
2005-04-17 02:20:36 +04:00
return 1 ;
}
return 0 ;
}
/* Do delayed input processing. This is scheduled by sctp_rcv().
* This may be called on BH or task time .
*/
2006-11-22 17:57:56 +03:00
static void sctp_endpoint_bh_rcv ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2006-11-22 17:57:56 +03:00
struct sctp_endpoint * ep =
container_of ( work , struct sctp_endpoint ,
base . inqueue . immediate ) ;
2005-04-17 02:20:36 +04:00
struct sctp_association * asoc ;
struct sock * sk ;
2012-08-07 11:25:24 +04:00
struct net * net ;
2005-04-17 02:20:36 +04:00
struct sctp_transport * transport ;
struct sctp_chunk * chunk ;
struct sctp_inq * inqueue ;
sctp_subtype_t subtype ;
sctp_state_t state ;
int error = 0 ;
2011-11-29 08:31:00 +04:00
int first_time = 1 ; /* is this the first time through the loop */
2005-04-17 02:20:36 +04:00
if ( ep - > base . dead )
return ;
asoc = NULL ;
inqueue = & ep - > base . inqueue ;
sk = ep - > base . sk ;
2012-08-07 11:25:24 +04:00
net = sock_net ( sk ) ;
2005-04-17 02:20:36 +04:00
while ( NULL ! = ( chunk = sctp_inq_pop ( inqueue ) ) ) {
subtype = SCTP_ST_CHUNK ( chunk - > chunk_hdr - > type ) ;
2007-10-04 04:51:34 +04:00
/* If the first chunk in the packet is AUTH, do special
* processing specified in Section 6.3 of SCTP - AUTH spec
*/
if ( first_time & & ( subtype . chunk = = SCTP_CID_AUTH ) ) {
struct sctp_chunkhdr * next_hdr ;
next_hdr = sctp_inq_peek ( inqueue ) ;
if ( ! next_hdr )
goto normal ;
/* If the next chunk is COOKIE-ECHO, skip the AUTH
* chunk while saving a pointer to it so we can do
* Authentication later ( during cookie - echo
* processing ) .
*/
if ( next_hdr - > type = = SCTP_CID_COOKIE_ECHO ) {
chunk - > auth_chunk = skb_clone ( chunk - > skb ,
GFP_ATOMIC ) ;
chunk - > auth = 1 ;
continue ;
}
}
normal :
2005-04-17 02:20:36 +04:00
/* We might have grown an association since last we
* looked , so try again .
*
* This happens when we ' ve just processed our
* COOKIE - ECHO chunk .
*/
if ( NULL = = chunk - > asoc ) {
asoc = sctp_endpoint_lookup_assoc ( ep ,
sctp_source ( chunk ) ,
& transport ) ;
chunk - > asoc = asoc ;
chunk - > transport = transport ;
}
state = asoc ? asoc - > state : SCTP_STATE_CLOSED ;
2007-10-04 04:51:34 +04:00
if ( sctp_auth_recv_cid ( subtype . chunk , asoc ) & & ! chunk - > auth )
continue ;
2005-04-17 02:20:36 +04:00
/* Remember where the last DATA chunk came from so we
* know where to send the SACK .
*/
if ( asoc & & sctp_chunk_is_data ( chunk ) )
asoc - > peer . last_data_from = chunk - > transport ;
2012-12-01 08:49:42 +04:00
else {
2012-08-06 12:47:55 +04:00
SCTP_INC_STATS ( sock_net ( ep - > base . sk ) , SCTP_MIB_INCTRLCHUNKS ) ;
2012-12-01 08:49:42 +04:00
if ( asoc )
asoc - > stats . ictrlchunks + + ;
}
2005-04-17 02:20:36 +04:00
if ( chunk - > transport )
chunk - > transport - > last_time_heard = jiffies ;
2012-08-07 11:25:24 +04:00
error = sctp_do_sm ( net , SCTP_EVENT_T_CHUNK , subtype , state ,
2007-02-09 17:25:18 +03:00
ep , asoc , chunk , GFP_ATOMIC ) ;
2005-04-17 02:20:36 +04:00
if ( error & & chunk )
chunk - > pdiscard = 1 ;
/* Check to see if the endpoint is freed in response to
* the incoming chunk . If so , get out of the while loop .
*/
if ( ! sctp_sk ( sk ) - > ep )
break ;
2007-10-04 04:51:34 +04:00
if ( first_time )
first_time = 0 ;
2005-04-17 02:20:36 +04:00
}
}