2017-12-08 16:04:01 +03:00
/* SCTP kernel implementation
* ( C ) Copyright Red Hat Inc . 2017
*
* This file is part of the SCTP kernel implementation
*
* These functions manipulate sctp stream queue / scheduling .
*
* This SCTP implementation is free software ;
* you can redistribute it and / or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
* This SCTP implementation is distributed in the hope that it
* will be useful , but WITHOUT ANY WARRANTY ; without even the implied
* * * * * * * * * * * * * * * * * * * * * * * * *
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE .
* See the GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with GNU CC ; see the file COPYING . If not , see
* < http : //www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email addresched ( es ) :
* lksctp developers < linux - sctp @ vger . kernel . org >
*
* Written or modified by :
* Xin Long < lucien . xin @ gmail . com >
*/
2017-12-08 16:04:04 +03:00
# include <net/busy_poll.h>
2017-12-08 16:04:01 +03:00
# include <net/sctp/sctp.h>
# include <net/sctp/sm.h>
2017-12-08 16:04:04 +03:00
# include <net/sctp/ulpevent.h>
2017-12-08 16:04:01 +03:00
# include <linux/sctp.h>
static struct sctp_chunk * sctp_make_idatafrag_empty (
const struct sctp_association * asoc ,
const struct sctp_sndrcvinfo * sinfo ,
int len , __u8 flags , gfp_t gfp )
{
struct sctp_chunk * retval ;
struct sctp_idatahdr dp ;
memset ( & dp , 0 , sizeof ( dp ) ) ;
dp . stream = htons ( sinfo - > sinfo_stream ) ;
if ( sinfo - > sinfo_flags & SCTP_UNORDERED )
flags | = SCTP_DATA_UNORDERED ;
retval = sctp_make_idata ( asoc , flags , sizeof ( dp ) + len , gfp ) ;
if ( ! retval )
return NULL ;
retval - > subh . idata_hdr = sctp_addto_chunk ( retval , sizeof ( dp ) , & dp ) ;
memcpy ( & retval - > sinfo , sinfo , sizeof ( struct sctp_sndrcvinfo ) ) ;
return retval ;
}
2017-12-08 16:04:02 +03:00
static void sctp_chunk_assign_mid ( struct sctp_chunk * chunk )
{
struct sctp_stream * stream ;
struct sctp_chunk * lchunk ;
__u32 cfsn = 0 ;
__u16 sid ;
if ( chunk - > has_mid )
return ;
sid = sctp_chunk_stream_no ( chunk ) ;
stream = & chunk - > asoc - > stream ;
list_for_each_entry ( lchunk , & chunk - > msg - > chunks , frag_list ) {
struct sctp_idatahdr * hdr ;
2017-12-08 16:04:09 +03:00
__u32 mid ;
2017-12-08 16:04:02 +03:00
lchunk - > has_mid = 1 ;
hdr = lchunk - > subh . idata_hdr ;
if ( lchunk - > chunk_hdr - > flags & SCTP_DATA_FIRST_FRAG )
hdr - > ppid = lchunk - > sinfo . sinfo_ppid ;
else
hdr - > fsn = htonl ( cfsn + + ) ;
2017-12-08 16:04:09 +03:00
if ( lchunk - > chunk_hdr - > flags & SCTP_DATA_UNORDERED ) {
mid = lchunk - > chunk_hdr - > flags & SCTP_DATA_LAST_FRAG ?
sctp_mid_uo_next ( stream , out , sid ) :
sctp_mid_uo_peek ( stream , out , sid ) ;
} else {
mid = lchunk - > chunk_hdr - > flags & SCTP_DATA_LAST_FRAG ?
sctp_mid_next ( stream , out , sid ) :
sctp_mid_peek ( stream , out , sid ) ;
}
hdr - > mid = htonl ( mid ) ;
2017-12-08 16:04:02 +03:00
}
}
2017-12-08 16:04:03 +03:00
static bool sctp_validate_data ( struct sctp_chunk * chunk )
{
const struct sctp_stream * stream ;
__u16 sid , ssn ;
if ( chunk - > chunk_hdr - > type ! = SCTP_CID_DATA )
return false ;
if ( chunk - > chunk_hdr - > flags & SCTP_DATA_UNORDERED )
return true ;
stream = & chunk - > asoc - > stream ;
sid = sctp_chunk_stream_no ( chunk ) ;
ssn = ntohs ( chunk - > subh . data_hdr - > ssn ) ;
return ! SSN_lt ( ssn , sctp_ssn_peek ( stream , in , sid ) ) ;
}
static bool sctp_validate_idata ( struct sctp_chunk * chunk )
{
struct sctp_stream * stream ;
__u32 mid ;
__u16 sid ;
if ( chunk - > chunk_hdr - > type ! = SCTP_CID_I_DATA )
return false ;
if ( chunk - > chunk_hdr - > flags & SCTP_DATA_UNORDERED )
return true ;
stream = & chunk - > asoc - > stream ;
sid = sctp_chunk_stream_no ( chunk ) ;
mid = ntohl ( chunk - > subh . idata_hdr - > mid ) ;
return ! MID_lt ( mid , sctp_mid_peek ( stream , in , sid ) ) ;
}
2017-12-08 16:04:04 +03:00
static void sctp_intl_store_reasm ( struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sctp_ulpevent * cevent ;
struct sk_buff * pos ;
pos = skb_peek_tail ( & ulpq - > reasm ) ;
if ( ! pos ) {
__skb_queue_tail ( & ulpq - > reasm , sctp_event2skb ( event ) ) ;
return ;
}
cevent = sctp_skb2event ( pos ) ;
if ( event - > stream = = cevent - > stream & &
event - > mid = = cevent - > mid & &
( cevent - > msg_flags & SCTP_DATA_FIRST_FRAG | |
( ! ( event - > msg_flags & SCTP_DATA_FIRST_FRAG ) & &
event - > fsn > cevent - > fsn ) ) ) {
__skb_queue_tail ( & ulpq - > reasm , sctp_event2skb ( event ) ) ;
return ;
}
if ( ( event - > stream = = cevent - > stream & &
MID_lt ( cevent - > mid , event - > mid ) ) | |
event - > stream > cevent - > stream ) {
__skb_queue_tail ( & ulpq - > reasm , sctp_event2skb ( event ) ) ;
return ;
}
skb_queue_walk ( & ulpq - > reasm , pos ) {
cevent = sctp_skb2event ( pos ) ;
if ( event - > stream < cevent - > stream | |
( event - > stream = = cevent - > stream & &
MID_lt ( event - > mid , cevent - > mid ) ) )
break ;
if ( event - > stream = = cevent - > stream & &
event - > mid = = cevent - > mid & &
! ( cevent - > msg_flags & SCTP_DATA_FIRST_FRAG ) & &
( event - > msg_flags & SCTP_DATA_FIRST_FRAG | |
event - > fsn < cevent - > fsn ) )
break ;
}
__skb_queue_before ( & ulpq - > reasm , pos , sctp_event2skb ( event ) ) ;
}
static struct sctp_ulpevent * sctp_intl_retrieve_partial (
struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sk_buff * first_frag = NULL ;
struct sk_buff * last_frag = NULL ;
struct sctp_ulpevent * retval ;
struct sctp_stream_in * sin ;
struct sk_buff * pos ;
__u32 next_fsn = 0 ;
int is_last = 0 ;
sin = sctp_stream_in ( ulpq - > asoc , event - > stream ) ;
skb_queue_walk ( & ulpq - > reasm , pos ) {
struct sctp_ulpevent * cevent = sctp_skb2event ( pos ) ;
if ( cevent - > stream < event - > stream )
continue ;
if ( cevent - > stream > event - > stream | |
cevent - > mid ! = sin - > mid )
break ;
switch ( cevent - > msg_flags & SCTP_DATA_FRAG_MASK ) {
case SCTP_DATA_FIRST_FRAG :
goto out ;
case SCTP_DATA_MIDDLE_FRAG :
if ( ! first_frag ) {
if ( cevent - > fsn = = sin - > fsn ) {
first_frag = pos ;
last_frag = pos ;
next_fsn = cevent - > fsn + 1 ;
}
} else if ( cevent - > fsn = = next_fsn ) {
last_frag = pos ;
next_fsn + + ;
} else {
goto out ;
}
break ;
case SCTP_DATA_LAST_FRAG :
if ( ! first_frag ) {
if ( cevent - > fsn = = sin - > fsn ) {
first_frag = pos ;
last_frag = pos ;
next_fsn = 0 ;
is_last = 1 ;
}
} else if ( cevent - > fsn = = next_fsn ) {
last_frag = pos ;
next_fsn = 0 ;
is_last = 1 ;
}
goto out ;
default :
goto out ;
}
}
out :
if ( ! first_frag )
return NULL ;
retval = sctp_make_reassembled_event ( sock_net ( ulpq - > asoc - > base . sk ) ,
& ulpq - > reasm , first_frag ,
last_frag ) ;
if ( retval ) {
sin - > fsn = next_fsn ;
if ( is_last ) {
retval - > msg_flags | = MSG_EOR ;
sin - > pd_mode = 0 ;
}
}
return retval ;
}
static struct sctp_ulpevent * sctp_intl_retrieve_reassembled (
struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sctp_association * asoc = ulpq - > asoc ;
struct sk_buff * pos , * first_frag = NULL ;
struct sctp_ulpevent * retval = NULL ;
struct sk_buff * pd_first = NULL ;
struct sk_buff * pd_last = NULL ;
struct sctp_stream_in * sin ;
__u32 next_fsn = 0 ;
__u32 pd_point = 0 ;
__u32 pd_len = 0 ;
__u32 mid = 0 ;
sin = sctp_stream_in ( ulpq - > asoc , event - > stream ) ;
skb_queue_walk ( & ulpq - > reasm , pos ) {
struct sctp_ulpevent * cevent = sctp_skb2event ( pos ) ;
if ( cevent - > stream < event - > stream )
continue ;
if ( cevent - > stream > event - > stream )
break ;
if ( MID_lt ( cevent - > mid , event - > mid ) )
continue ;
if ( MID_lt ( event - > mid , cevent - > mid ) )
break ;
switch ( cevent - > msg_flags & SCTP_DATA_FRAG_MASK ) {
case SCTP_DATA_FIRST_FRAG :
if ( cevent - > mid = = sin - > mid ) {
pd_first = pos ;
pd_last = pos ;
pd_len = pos - > len ;
}
first_frag = pos ;
next_fsn = 0 ;
mid = cevent - > mid ;
break ;
case SCTP_DATA_MIDDLE_FRAG :
if ( first_frag & & cevent - > mid = = mid & &
cevent - > fsn = = next_fsn ) {
next_fsn + + ;
if ( pd_first ) {
pd_last = pos ;
pd_len + = pos - > len ;
}
} else {
first_frag = NULL ;
}
break ;
case SCTP_DATA_LAST_FRAG :
if ( first_frag & & cevent - > mid = = mid & &
cevent - > fsn = = next_fsn )
goto found ;
else
first_frag = NULL ;
break ;
}
}
if ( ! pd_first )
goto out ;
pd_point = sctp_sk ( asoc - > base . sk ) - > pd_point ;
if ( pd_point & & pd_point < = pd_len ) {
retval = sctp_make_reassembled_event ( sock_net ( asoc - > base . sk ) ,
& ulpq - > reasm ,
pd_first , pd_last ) ;
if ( retval ) {
sin - > fsn = next_fsn ;
sin - > pd_mode = 1 ;
}
}
goto out ;
found :
retval = sctp_make_reassembled_event ( sock_net ( asoc - > base . sk ) ,
& ulpq - > reasm ,
first_frag , pos ) ;
if ( retval )
retval - > msg_flags | = MSG_EOR ;
out :
return retval ;
}
static struct sctp_ulpevent * sctp_intl_reasm ( struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sctp_ulpevent * retval = NULL ;
struct sctp_stream_in * sin ;
if ( SCTP_DATA_NOT_FRAG = = ( event - > msg_flags & SCTP_DATA_FRAG_MASK ) ) {
event - > msg_flags | = MSG_EOR ;
return event ;
}
sctp_intl_store_reasm ( ulpq , event ) ;
sin = sctp_stream_in ( ulpq - > asoc , event - > stream ) ;
if ( sin - > pd_mode & & event - > mid = = sin - > mid & &
event - > fsn = = sin - > fsn )
retval = sctp_intl_retrieve_partial ( ulpq , event ) ;
if ( ! retval )
retval = sctp_intl_retrieve_reassembled ( ulpq , event ) ;
return retval ;
}
static void sctp_intl_store_ordered ( struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sctp_ulpevent * cevent ;
struct sk_buff * pos ;
pos = skb_peek_tail ( & ulpq - > lobby ) ;
if ( ! pos ) {
__skb_queue_tail ( & ulpq - > lobby , sctp_event2skb ( event ) ) ;
return ;
}
cevent = ( struct sctp_ulpevent * ) pos - > cb ;
if ( event - > stream = = cevent - > stream & &
MID_lt ( cevent - > mid , event - > mid ) ) {
__skb_queue_tail ( & ulpq - > lobby , sctp_event2skb ( event ) ) ;
return ;
}
if ( event - > stream > cevent - > stream ) {
__skb_queue_tail ( & ulpq - > lobby , sctp_event2skb ( event ) ) ;
return ;
}
skb_queue_walk ( & ulpq - > lobby , pos ) {
cevent = ( struct sctp_ulpevent * ) pos - > cb ;
if ( cevent - > stream > event - > stream )
break ;
if ( cevent - > stream = = event - > stream & &
MID_lt ( event - > mid , cevent - > mid ) )
break ;
}
__skb_queue_before ( & ulpq - > lobby , pos , sctp_event2skb ( event ) ) ;
}
static void sctp_intl_retrieve_ordered ( struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sk_buff_head * event_list ;
struct sctp_stream * stream ;
struct sk_buff * pos , * tmp ;
__u16 sid = event - > stream ;
stream = & ulpq - > asoc - > stream ;
event_list = ( struct sk_buff_head * ) sctp_event2skb ( event ) - > prev ;
sctp_skb_for_each ( pos , & ulpq - > lobby , tmp ) {
struct sctp_ulpevent * cevent = ( struct sctp_ulpevent * ) pos - > cb ;
if ( cevent - > stream > sid )
break ;
if ( cevent - > stream < sid )
continue ;
if ( cevent - > mid ! = sctp_mid_peek ( stream , in , sid ) )
break ;
sctp_mid_next ( stream , in , sid ) ;
__skb_unlink ( pos , & ulpq - > lobby ) ;
__skb_queue_tail ( event_list , pos ) ;
}
}
static struct sctp_ulpevent * sctp_intl_order ( struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sctp_stream * stream ;
__u16 sid ;
stream = & ulpq - > asoc - > stream ;
sid = event - > stream ;
if ( event - > mid ! = sctp_mid_peek ( stream , in , sid ) ) {
sctp_intl_store_ordered ( ulpq , event ) ;
return NULL ;
}
sctp_mid_next ( stream , in , sid ) ;
sctp_intl_retrieve_ordered ( ulpq , event ) ;
return event ;
}
static int sctp_enqueue_event ( struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sk_buff * skb = sctp_event2skb ( event ) ;
struct sock * sk = ulpq - > asoc - > base . sk ;
struct sctp_sock * sp = sctp_sk ( sk ) ;
struct sk_buff_head * skb_list ;
skb_list = ( struct sk_buff_head * ) skb - > prev ;
if ( sk - > sk_shutdown & RCV_SHUTDOWN & &
( sk - > sk_shutdown & SEND_SHUTDOWN | |
! sctp_ulpevent_is_notification ( event ) ) )
goto out_free ;
if ( ! sctp_ulpevent_is_notification ( event ) ) {
sk_mark_napi_id ( sk , skb ) ;
sk_incoming_cpu_update ( sk ) ;
}
if ( ! sctp_ulpevent_is_enabled ( event , & sp - > subscribe ) )
goto out_free ;
if ( skb_list )
skb_queue_splice_tail_init ( skb_list ,
& sk - > sk_receive_queue ) ;
else
__skb_queue_tail ( & sk - > sk_receive_queue , skb ) ;
if ( ! sp - > data_ready_signalled ) {
sp - > data_ready_signalled = 1 ;
sk - > sk_data_ready ( sk ) ;
}
return 1 ;
out_free :
if ( skb_list )
sctp_queue_purge_ulpevents ( skb_list ) ;
else
sctp_ulpevent_free ( event ) ;
return 0 ;
}
2017-12-08 16:04:09 +03:00
static void sctp_intl_store_reasm_uo ( struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sctp_ulpevent * cevent ;
struct sk_buff * pos ;
pos = skb_peek_tail ( & ulpq - > reasm_uo ) ;
if ( ! pos ) {
__skb_queue_tail ( & ulpq - > reasm_uo , sctp_event2skb ( event ) ) ;
return ;
}
cevent = sctp_skb2event ( pos ) ;
if ( event - > stream = = cevent - > stream & &
event - > mid = = cevent - > mid & &
( cevent - > msg_flags & SCTP_DATA_FIRST_FRAG | |
( ! ( event - > msg_flags & SCTP_DATA_FIRST_FRAG ) & &
event - > fsn > cevent - > fsn ) ) ) {
__skb_queue_tail ( & ulpq - > reasm_uo , sctp_event2skb ( event ) ) ;
return ;
}
if ( ( event - > stream = = cevent - > stream & &
MID_lt ( cevent - > mid , event - > mid ) ) | |
event - > stream > cevent - > stream ) {
__skb_queue_tail ( & ulpq - > reasm_uo , sctp_event2skb ( event ) ) ;
return ;
}
skb_queue_walk ( & ulpq - > reasm_uo , pos ) {
cevent = sctp_skb2event ( pos ) ;
if ( event - > stream < cevent - > stream | |
( event - > stream = = cevent - > stream & &
MID_lt ( event - > mid , cevent - > mid ) ) )
break ;
if ( event - > stream = = cevent - > stream & &
event - > mid = = cevent - > mid & &
! ( cevent - > msg_flags & SCTP_DATA_FIRST_FRAG ) & &
( event - > msg_flags & SCTP_DATA_FIRST_FRAG | |
event - > fsn < cevent - > fsn ) )
break ;
}
__skb_queue_before ( & ulpq - > reasm_uo , pos , sctp_event2skb ( event ) ) ;
}
static struct sctp_ulpevent * sctp_intl_retrieve_partial_uo (
struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sk_buff * first_frag = NULL ;
struct sk_buff * last_frag = NULL ;
struct sctp_ulpevent * retval ;
struct sctp_stream_in * sin ;
struct sk_buff * pos ;
__u32 next_fsn = 0 ;
int is_last = 0 ;
sin = sctp_stream_in ( ulpq - > asoc , event - > stream ) ;
skb_queue_walk ( & ulpq - > reasm_uo , pos ) {
struct sctp_ulpevent * cevent = sctp_skb2event ( pos ) ;
if ( cevent - > stream < event - > stream )
continue ;
if ( cevent - > stream > event - > stream )
break ;
if ( MID_lt ( cevent - > mid , sin - > mid_uo ) )
continue ;
if ( MID_lt ( sin - > mid_uo , cevent - > mid ) )
break ;
switch ( cevent - > msg_flags & SCTP_DATA_FRAG_MASK ) {
case SCTP_DATA_FIRST_FRAG :
goto out ;
case SCTP_DATA_MIDDLE_FRAG :
if ( ! first_frag ) {
if ( cevent - > fsn = = sin - > fsn_uo ) {
first_frag = pos ;
last_frag = pos ;
next_fsn = cevent - > fsn + 1 ;
}
} else if ( cevent - > fsn = = next_fsn ) {
last_frag = pos ;
next_fsn + + ;
} else {
goto out ;
}
break ;
case SCTP_DATA_LAST_FRAG :
if ( ! first_frag ) {
if ( cevent - > fsn = = sin - > fsn_uo ) {
first_frag = pos ;
last_frag = pos ;
next_fsn = 0 ;
is_last = 1 ;
}
} else if ( cevent - > fsn = = next_fsn ) {
last_frag = pos ;
next_fsn = 0 ;
is_last = 1 ;
}
goto out ;
default :
goto out ;
}
}
out :
if ( ! first_frag )
return NULL ;
retval = sctp_make_reassembled_event ( sock_net ( ulpq - > asoc - > base . sk ) ,
& ulpq - > reasm_uo , first_frag ,
last_frag ) ;
if ( retval ) {
sin - > fsn_uo = next_fsn ;
if ( is_last ) {
retval - > msg_flags | = MSG_EOR ;
sin - > pd_mode_uo = 0 ;
}
}
return retval ;
}
static struct sctp_ulpevent * sctp_intl_retrieve_reassembled_uo (
struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sctp_association * asoc = ulpq - > asoc ;
struct sk_buff * pos , * first_frag = NULL ;
struct sctp_ulpevent * retval = NULL ;
struct sk_buff * pd_first = NULL ;
struct sk_buff * pd_last = NULL ;
struct sctp_stream_in * sin ;
__u32 next_fsn = 0 ;
__u32 pd_point = 0 ;
__u32 pd_len = 0 ;
__u32 mid = 0 ;
sin = sctp_stream_in ( ulpq - > asoc , event - > stream ) ;
skb_queue_walk ( & ulpq - > reasm_uo , pos ) {
struct sctp_ulpevent * cevent = sctp_skb2event ( pos ) ;
if ( cevent - > stream < event - > stream )
continue ;
if ( cevent - > stream > event - > stream )
break ;
if ( MID_lt ( cevent - > mid , event - > mid ) )
continue ;
if ( MID_lt ( event - > mid , cevent - > mid ) )
break ;
switch ( cevent - > msg_flags & SCTP_DATA_FRAG_MASK ) {
case SCTP_DATA_FIRST_FRAG :
if ( ! sin - > pd_mode_uo ) {
sin - > mid_uo = cevent - > mid ;
pd_first = pos ;
pd_last = pos ;
pd_len = pos - > len ;
}
first_frag = pos ;
next_fsn = 0 ;
mid = cevent - > mid ;
break ;
case SCTP_DATA_MIDDLE_FRAG :
if ( first_frag & & cevent - > mid = = mid & &
cevent - > fsn = = next_fsn ) {
next_fsn + + ;
if ( pd_first ) {
pd_last = pos ;
pd_len + = pos - > len ;
}
} else {
first_frag = NULL ;
}
break ;
case SCTP_DATA_LAST_FRAG :
if ( first_frag & & cevent - > mid = = mid & &
cevent - > fsn = = next_fsn )
goto found ;
else
first_frag = NULL ;
break ;
}
}
if ( ! pd_first )
goto out ;
pd_point = sctp_sk ( asoc - > base . sk ) - > pd_point ;
if ( pd_point & & pd_point < = pd_len ) {
retval = sctp_make_reassembled_event ( sock_net ( asoc - > base . sk ) ,
& ulpq - > reasm_uo ,
pd_first , pd_last ) ;
if ( retval ) {
sin - > fsn_uo = next_fsn ;
sin - > pd_mode_uo = 1 ;
}
}
goto out ;
found :
retval = sctp_make_reassembled_event ( sock_net ( asoc - > base . sk ) ,
& ulpq - > reasm_uo ,
first_frag , pos ) ;
if ( retval )
retval - > msg_flags | = MSG_EOR ;
out :
return retval ;
}
static struct sctp_ulpevent * sctp_intl_reasm_uo ( struct sctp_ulpq * ulpq ,
struct sctp_ulpevent * event )
{
struct sctp_ulpevent * retval = NULL ;
struct sctp_stream_in * sin ;
if ( SCTP_DATA_NOT_FRAG = = ( event - > msg_flags & SCTP_DATA_FRAG_MASK ) ) {
event - > msg_flags | = MSG_EOR ;
return event ;
}
sctp_intl_store_reasm_uo ( ulpq , event ) ;
sin = sctp_stream_in ( ulpq - > asoc , event - > stream ) ;
if ( sin - > pd_mode_uo & & event - > mid = = sin - > mid_uo & &
event - > fsn = = sin - > fsn_uo )
retval = sctp_intl_retrieve_partial_uo ( ulpq , event ) ;
if ( ! retval )
retval = sctp_intl_retrieve_reassembled_uo ( ulpq , event ) ;
return retval ;
}
static struct sctp_ulpevent * sctp_intl_retrieve_first_uo ( struct sctp_ulpq * ulpq )
{
struct sctp_stream_in * csin , * sin = NULL ;
struct sk_buff * first_frag = NULL ;
struct sk_buff * last_frag = NULL ;
struct sctp_ulpevent * retval ;
struct sk_buff * pos ;
__u32 next_fsn = 0 ;
__u16 sid = 0 ;
skb_queue_walk ( & ulpq - > reasm_uo , pos ) {
struct sctp_ulpevent * cevent = sctp_skb2event ( pos ) ;
csin = sctp_stream_in ( ulpq - > asoc , cevent - > stream ) ;
if ( csin - > pd_mode_uo )
continue ;
switch ( cevent - > msg_flags & SCTP_DATA_FRAG_MASK ) {
case SCTP_DATA_FIRST_FRAG :
if ( first_frag )
goto out ;
first_frag = pos ;
last_frag = pos ;
next_fsn = 0 ;
sin = csin ;
sid = cevent - > stream ;
sin - > mid_uo = cevent - > mid ;
break ;
case SCTP_DATA_MIDDLE_FRAG :
if ( ! first_frag )
break ;
if ( cevent - > stream = = sid & &
cevent - > mid = = sin - > mid_uo & &
cevent - > fsn = = next_fsn ) {
next_fsn + + ;
last_frag = pos ;
} else {
goto out ;
}
break ;
case SCTP_DATA_LAST_FRAG :
if ( first_frag )
goto out ;
break ;
default :
break ;
}
}
if ( ! first_frag )
return NULL ;
out :
retval = sctp_make_reassembled_event ( sock_net ( ulpq - > asoc - > base . sk ) ,
& ulpq - > reasm_uo , first_frag ,
last_frag ) ;
if ( retval ) {
sin - > fsn_uo = next_fsn ;
sin - > pd_mode_uo = 1 ;
}
return retval ;
}
2017-12-08 16:04:04 +03:00
static int sctp_ulpevent_idata ( struct sctp_ulpq * ulpq ,
struct sctp_chunk * chunk , gfp_t gfp )
{
struct sctp_ulpevent * event ;
struct sk_buff_head temp ;
int event_eor = 0 ;
event = sctp_ulpevent_make_rcvmsg ( chunk - > asoc , chunk , gfp ) ;
if ( ! event )
return - ENOMEM ;
event - > mid = ntohl ( chunk - > subh . idata_hdr - > mid ) ;
if ( event - > msg_flags & SCTP_DATA_FIRST_FRAG )
event - > ppid = chunk - > subh . idata_hdr - > ppid ;
else
event - > fsn = ntohl ( chunk - > subh . idata_hdr - > fsn ) ;
2017-12-08 16:04:09 +03:00
if ( ! ( event - > msg_flags & SCTP_DATA_UNORDERED ) ) {
event = sctp_intl_reasm ( ulpq , event ) ;
if ( event & & event - > msg_flags & MSG_EOR ) {
skb_queue_head_init ( & temp ) ;
__skb_queue_tail ( & temp , sctp_event2skb ( event ) ) ;
2017-12-08 16:04:04 +03:00
2017-12-08 16:04:09 +03:00
event = sctp_intl_order ( ulpq , event ) ;
}
} else {
event = sctp_intl_reasm_uo ( ulpq , event ) ;
2017-12-08 16:04:04 +03:00
}
if ( event ) {
event_eor = ( event - > msg_flags & MSG_EOR ) ? 1 : 0 ;
sctp_enqueue_event ( ulpq , event ) ;
}
return event_eor ;
}
2017-12-08 16:04:06 +03:00
static struct sctp_ulpevent * sctp_intl_retrieve_first ( struct sctp_ulpq * ulpq )
{
struct sctp_stream_in * csin , * sin = NULL ;
struct sk_buff * first_frag = NULL ;
struct sk_buff * last_frag = NULL ;
struct sctp_ulpevent * retval ;
struct sk_buff * pos ;
__u32 next_fsn = 0 ;
__u16 sid = 0 ;
skb_queue_walk ( & ulpq - > reasm , pos ) {
struct sctp_ulpevent * cevent = sctp_skb2event ( pos ) ;
csin = sctp_stream_in ( ulpq - > asoc , cevent - > stream ) ;
if ( csin - > pd_mode )
continue ;
switch ( cevent - > msg_flags & SCTP_DATA_FRAG_MASK ) {
case SCTP_DATA_FIRST_FRAG :
if ( first_frag )
goto out ;
if ( cevent - > mid = = csin - > mid ) {
first_frag = pos ;
last_frag = pos ;
next_fsn = 0 ;
sin = csin ;
sid = cevent - > stream ;
}
break ;
case SCTP_DATA_MIDDLE_FRAG :
if ( ! first_frag )
break ;
if ( cevent - > stream = = sid & &
cevent - > mid = = sin - > mid & &
cevent - > fsn = = next_fsn ) {
next_fsn + + ;
last_frag = pos ;
} else {
goto out ;
}
break ;
case SCTP_DATA_LAST_FRAG :
if ( first_frag )
goto out ;
break ;
default :
break ;
}
}
if ( ! first_frag )
return NULL ;
out :
retval = sctp_make_reassembled_event ( sock_net ( ulpq - > asoc - > base . sk ) ,
& ulpq - > reasm , first_frag ,
last_frag ) ;
if ( retval ) {
sin - > fsn = next_fsn ;
sin - > pd_mode = 1 ;
}
return retval ;
}
static void sctp_intl_start_pd ( struct sctp_ulpq * ulpq , gfp_t gfp )
{
struct sctp_ulpevent * event ;
2017-12-08 16:04:09 +03:00
if ( ! skb_queue_empty ( & ulpq - > reasm ) ) {
do {
event = sctp_intl_retrieve_first ( ulpq ) ;
if ( event )
sctp_enqueue_event ( ulpq , event ) ;
} while ( event ) ;
}
2017-12-08 16:04:06 +03:00
2017-12-08 16:04:09 +03:00
if ( ! skb_queue_empty ( & ulpq - > reasm_uo ) ) {
do {
event = sctp_intl_retrieve_first_uo ( ulpq ) ;
if ( event )
sctp_enqueue_event ( ulpq , event ) ;
} while ( event ) ;
}
2017-12-08 16:04:06 +03:00
}
static void sctp_renege_events ( struct sctp_ulpq * ulpq , struct sctp_chunk * chunk ,
gfp_t gfp )
{
struct sctp_association * asoc = ulpq - > asoc ;
__u32 freed = 0 ;
__u16 needed ;
if ( chunk ) {
needed = ntohs ( chunk - > chunk_hdr - > length ) ;
needed - = sizeof ( struct sctp_idata_chunk ) ;
} else {
needed = SCTP_DEFAULT_MAXWINDOW ;
}
if ( skb_queue_empty ( & asoc - > base . sk - > sk_receive_queue ) ) {
freed = sctp_ulpq_renege_list ( ulpq , & ulpq - > lobby , needed ) ;
if ( freed < needed )
freed + = sctp_ulpq_renege_list ( ulpq , & ulpq - > reasm ,
needed ) ;
2017-12-08 16:04:09 +03:00
if ( freed < needed )
freed + = sctp_ulpq_renege_list ( ulpq , & ulpq - > reasm_uo ,
needed ) ;
2017-12-08 16:04:06 +03:00
}
if ( chunk & & freed > = needed )
if ( sctp_ulpevent_idata ( ulpq , chunk , gfp ) < = 0 )
sctp_intl_start_pd ( ulpq , gfp ) ;
sk_mem_reclaim ( asoc - > base . sk ) ;
}
2017-12-08 16:04:08 +03:00
static void sctp_intl_stream_abort_pd ( struct sctp_ulpq * ulpq , __u16 sid ,
__u32 mid , __u16 flags , gfp_t gfp )
{
struct sock * sk = ulpq - > asoc - > base . sk ;
struct sctp_ulpevent * ev = NULL ;
if ( ! sctp_ulpevent_type_enabled ( SCTP_PARTIAL_DELIVERY_EVENT ,
& sctp_sk ( sk ) - > subscribe ) )
return ;
ev = sctp_ulpevent_make_pdapi ( ulpq - > asoc , SCTP_PARTIAL_DELIVERY_ABORTED ,
sid , mid , flags , gfp ) ;
if ( ev ) {
__skb_queue_tail ( & sk - > sk_receive_queue , sctp_event2skb ( ev ) ) ;
if ( ! sctp_sk ( sk ) - > data_ready_signalled ) {
sctp_sk ( sk ) - > data_ready_signalled = 1 ;
sk - > sk_data_ready ( sk ) ;
}
}
}
static void sctp_intl_reap_ordered ( struct sctp_ulpq * ulpq , __u16 sid )
{
struct sctp_stream * stream = & ulpq - > asoc - > stream ;
struct sctp_ulpevent * cevent , * event = NULL ;
struct sk_buff_head * lobby = & ulpq - > lobby ;
struct sk_buff * pos , * tmp ;
struct sk_buff_head temp ;
__u16 csid ;
__u32 cmid ;
skb_queue_head_init ( & temp ) ;
sctp_skb_for_each ( pos , lobby , tmp ) {
cevent = ( struct sctp_ulpevent * ) pos - > cb ;
csid = cevent - > stream ;
cmid = cevent - > mid ;
if ( csid > sid )
break ;
if ( csid < sid )
continue ;
if ( ! MID_lt ( cmid , sctp_mid_peek ( stream , in , csid ) ) )
break ;
__skb_unlink ( pos , lobby ) ;
if ( ! event )
event = sctp_skb2event ( pos ) ;
__skb_queue_tail ( & temp , pos ) ;
}
if ( ! event & & pos ! = ( struct sk_buff * ) lobby ) {
cevent = ( struct sctp_ulpevent * ) pos - > cb ;
csid = cevent - > stream ;
cmid = cevent - > mid ;
if ( csid = = sid & & cmid = = sctp_mid_peek ( stream , in , csid ) ) {
sctp_mid_next ( stream , in , csid ) ;
__skb_unlink ( pos , lobby ) ;
__skb_queue_tail ( & temp , pos ) ;
event = sctp_skb2event ( pos ) ;
}
}
if ( event ) {
sctp_intl_retrieve_ordered ( ulpq , event ) ;
sctp_enqueue_event ( ulpq , event ) ;
}
}
static void sctp_intl_abort_pd ( struct sctp_ulpq * ulpq , gfp_t gfp )
{
struct sctp_stream * stream = & ulpq - > asoc - > stream ;
__u16 sid ;
for ( sid = 0 ; sid < stream - > incnt ; sid + + ) {
struct sctp_stream_in * sin = & stream - > in [ sid ] ;
__u32 mid ;
2017-12-08 16:04:09 +03:00
if ( sin - > pd_mode_uo ) {
sin - > pd_mode_uo = 0 ;
mid = sin - > mid_uo ;
sctp_intl_stream_abort_pd ( ulpq , sid , mid , 0x1 , gfp ) ;
}
2017-12-08 16:04:08 +03:00
if ( sin - > pd_mode ) {
sin - > pd_mode = 0 ;
mid = sin - > mid ;
sctp_intl_stream_abort_pd ( ulpq , sid , mid , 0 , gfp ) ;
sctp_mid_skip ( stream , in , sid , mid ) ;
sctp_intl_reap_ordered ( ulpq , sid ) ;
}
}
/* intl abort pd happens only when all data needs to be cleaned */
sctp_ulpq_flush ( ulpq ) ;
}
2017-12-08 16:04:01 +03:00
static struct sctp_stream_interleave sctp_stream_interleave_0 = {
. data_chunk_len = sizeof ( struct sctp_data_chunk ) ,
/* DATA process functions */
. make_datafrag = sctp_make_datafrag_empty ,
2017-12-08 16:04:02 +03:00
. assign_number = sctp_chunk_assign_ssn ,
2017-12-08 16:04:03 +03:00
. validate_data = sctp_validate_data ,
2017-12-08 16:04:04 +03:00
. ulpevent_data = sctp_ulpq_tail_data ,
2017-12-08 16:04:05 +03:00
. enqueue_event = sctp_ulpq_tail_event ,
2017-12-08 16:04:06 +03:00
. renege_events = sctp_ulpq_renege ,
2017-12-08 16:04:07 +03:00
. start_pd = sctp_ulpq_partial_delivery ,
2017-12-08 16:04:08 +03:00
. abort_pd = sctp_ulpq_abort_pd ,
2017-12-08 16:04:01 +03:00
} ;
static struct sctp_stream_interleave sctp_stream_interleave_1 = {
. data_chunk_len = sizeof ( struct sctp_idata_chunk ) ,
/* I-DATA process functions */
. make_datafrag = sctp_make_idatafrag_empty ,
2017-12-08 16:04:02 +03:00
. assign_number = sctp_chunk_assign_mid ,
2017-12-08 16:04:03 +03:00
. validate_data = sctp_validate_idata ,
2017-12-08 16:04:04 +03:00
. ulpevent_data = sctp_ulpevent_idata ,
2017-12-08 16:04:05 +03:00
. enqueue_event = sctp_enqueue_event ,
2017-12-08 16:04:06 +03:00
. renege_events = sctp_renege_events ,
2017-12-08 16:04:07 +03:00
. start_pd = sctp_intl_start_pd ,
2017-12-08 16:04:08 +03:00
. abort_pd = sctp_intl_abort_pd ,
2017-12-08 16:04:01 +03:00
} ;
void sctp_stream_interleave_init ( struct sctp_stream * stream )
{
struct sctp_association * asoc ;
asoc = container_of ( stream , struct sctp_association , stream ) ;
stream - > si = asoc - > intl_enable ? & sctp_stream_interleave_1
: & sctp_stream_interleave_0 ;
}