2005-08-10 07:14:34 +04:00
/*
* net / dccp / output . c
2006-12-10 21:01:18 +03:00
*
2005-08-10 07:14:34 +04:00
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo < acme @ conectiva . com . br >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/dccp.h>
2005-10-30 03:20:59 +03:00
# include <linux/kernel.h>
2005-08-10 07:14:34 +04:00
# include <linux/skbuff.h>
2005-12-27 07:43:12 +03:00
# include <net/inet_sock.h>
2005-08-10 07:14:34 +04:00
# include <net/sock.h>
2005-09-18 11:17:51 +04:00
# include "ackvec.h"
2005-08-10 07:14:34 +04:00
# include "ccid.h"
# include "dccp.h"
static inline void dccp_event_ack_sent ( struct sock * sk )
{
inet_csk_clear_xmit_timer ( sk , ICSK_TIME_DACK ) ;
}
2006-03-21 08:58:56 +03:00
static void dccp_skb_entail ( struct sock * sk , struct sk_buff * skb )
2005-10-30 03:20:59 +03:00
{
skb_set_owner_w ( skb , sk ) ;
WARN_ON ( sk - > sk_send_head ) ;
sk - > sk_send_head = skb ;
}
2005-08-10 07:14:34 +04:00
/*
* All SKB ' s seen here are completely headerless . It is our
* job to build the DCCP header , and pass the packet down to
* IP so it can do the same plus pass the packet off to the
* device .
*/
2005-10-30 03:20:59 +03:00
static int dccp_transmit_skb ( struct sock * sk , struct sk_buff * skb )
2005-08-10 07:14:34 +04:00
{
if ( likely ( skb ! = NULL ) ) {
const struct inet_sock * inet = inet_sk ( sk ) ;
2005-12-14 10:16:16 +03:00
const struct inet_connection_sock * icsk = inet_csk ( sk ) ;
2005-08-10 07:14:34 +04:00
struct dccp_sock * dp = dccp_sk ( sk ) ;
struct dccp_skb_cb * dcb = DCCP_SKB_CB ( skb ) ;
struct dccp_hdr * dh ;
/* XXX For now we're using only 48 bits sequence numbers */
2006-03-21 09:31:09 +03:00
const u32 dccp_header_size = sizeof ( * dh ) +
2005-08-10 07:14:34 +04:00
sizeof ( struct dccp_hdr_ext ) +
2005-08-14 03:34:54 +04:00
dccp_packet_hdr_len ( dcb - > dccpd_type ) ;
2005-08-10 07:14:34 +04:00
int err , set_ack = 1 ;
u64 ackno = dp - > dccps_gsr ;
dccp_inc_seqno ( & dp - > dccps_gss ) ;
switch ( dcb - > dccpd_type ) {
case DCCP_PKT_DATA :
set_ack = 0 ;
2005-10-30 03:20:59 +03:00
/* fall through */
case DCCP_PKT_DATAACK :
2007-09-26 18:30:02 +04:00
case DCCP_PKT_RESET :
2005-08-10 07:14:34 +04:00
break ;
2005-10-30 03:20:59 +03:00
[DCCP]: Initial feature negotiation implementation
Still needs more work, but boots and doesn't crashes, even
does some negotiation!
18:38:52.174934 127.0.0.1.43458 > 127.0.0.1.5001: request <change_l ack_ratio 2, change_r ccid 2, change_l ccid 2>
18:38:52.218526 127.0.0.1.5001 > 127.0.0.1.43458: response <nop, nop, change_l ack_ratio 2, confirm_r ccid 2 2, confirm_l ccid 2 2, confirm_r ack_ratio 2>
18:38:52.185398 127.0.0.1.43458 > 127.0.0.1.5001: <nop, confirm_r ack_ratio 2, ack_vector0 0x00, elapsed_time 212>
:-)
Signed-off-by: Andrea Bittau <a.bittau@cs.ucl.ac.uk>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 04:43:56 +03:00
case DCCP_PKT_REQUEST :
set_ack = 0 ;
/* fall through */
2005-08-10 07:14:34 +04:00
case DCCP_PKT_SYNC :
case DCCP_PKT_SYNCACK :
2007-09-26 09:42:27 +04:00
ackno = dcb - > dccpd_ack_seq ;
2005-10-30 03:20:59 +03:00
/* fall through */
default :
/*
2007-09-26 18:30:02 +04:00
* Set owner / destructor : some skbs are allocated via
* alloc_skb ( e . g . when retransmission may happen ) .
* Only Data , DataAck , and Reset packets should come
* through here with skb - > sk set .
2005-10-30 03:20:59 +03:00
*/
WARN_ON ( skb - > sk ) ;
skb_set_owner_w ( skb , sk ) ;
2005-08-10 07:14:34 +04:00
break ;
}
2005-08-21 12:40:16 +04:00
dcb - > dccpd_seq = dp - > dccps_gss ;
2006-03-21 09:32:06 +03:00
if ( dccp_insert_options ( sk , skb ) ) {
kfree_skb ( skb ) ;
return - EPROTO ;
}
2007-02-09 17:24:38 +03:00
2005-10-14 10:38:49 +04:00
2005-08-10 07:14:34 +04:00
/* Build DCCP header and checksum it. */
2006-11-10 16:22:32 +03:00
dh = dccp_zeroed_hdr ( skb , dccp_header_size ) ;
2005-08-10 07:14:34 +04:00
dh - > dccph_type = dcb - > dccpd_type ;
dh - > dccph_sport = inet - > sport ;
dh - > dccph_dport = inet - > dport ;
dh - > dccph_doff = ( dccp_header_size + dcb - > dccpd_opt_len ) / 4 ;
dh - > dccph_ccval = dcb - > dccpd_ccval ;
2006-11-10 22:43:06 +03:00
dh - > dccph_cscov = dp - > dccps_pcslen ;
2005-08-10 07:14:34 +04:00
/* XXX For now we're using only 48 bits sequence numbers */
dh - > dccph_x = 1 ;
dp - > dccps_awh = dp - > dccps_gss ;
dccp_hdr_set_seq ( dh , dp - > dccps_gss ) ;
if ( set_ack )
dccp_hdr_set_ack ( dccp_hdr_ack_bits ( skb ) , ackno ) ;
switch ( dcb - > dccpd_type ) {
case DCCP_PKT_REQUEST :
2005-08-14 03:34:54 +04:00
dccp_hdr_request ( skb ) - > dccph_req_service =
2005-09-17 03:58:40 +04:00
dp - > dccps_service ;
2005-08-10 07:14:34 +04:00
break ;
case DCCP_PKT_RESET :
2005-08-14 03:34:54 +04:00
dccp_hdr_reset ( skb ) - > dccph_reset_code =
dcb - > dccpd_reset_code ;
2005-08-10 07:14:34 +04:00
break ;
}
2006-11-10 22:43:06 +03:00
icsk - > icsk_af_ops - > send_check ( sk , 0 , skb ) ;
2005-08-10 07:14:34 +04:00
2005-08-24 08:50:06 +04:00
if ( set_ack )
2005-08-10 07:14:34 +04:00
dccp_event_ack_sent ( sk ) ;
DCCP_INC_STATS ( DCCP_MIB_OUTSEGS ) ;
2005-10-18 06:03:28 +04:00
memset ( & ( IPCB ( skb ) - > opt ) , 0 , sizeof ( IPCB ( skb ) - > opt ) ) ;
2007-01-26 12:04:55 +03:00
err = icsk - > icsk_af_ops - > queue_xmit ( skb , 0 ) ;
2006-11-14 16:21:36 +03:00
return net_xmit_eval ( err ) ;
2005-08-10 07:14:34 +04:00
}
return - ENOBUFS ;
}
unsigned int dccp_sync_mss ( struct sock * sk , u32 pmtu )
{
2005-12-14 10:26:10 +03:00
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
2005-08-10 07:14:34 +04:00
struct dccp_sock * dp = dccp_sk ( sk ) ;
2005-12-14 10:26:10 +03:00
int mss_now = ( pmtu - icsk - > icsk_af_ops - > net_header_len -
2005-12-14 10:16:16 +03:00
sizeof ( struct dccp_hdr ) - sizeof ( struct dccp_hdr_ext ) ) ;
2005-08-10 07:14:34 +04:00
/* Now subtract optional transport overhead */
2005-12-14 10:26:10 +03:00
mss_now - = icsk - > icsk_ext_hdr_len ;
2005-08-10 07:14:34 +04:00
/*
* FIXME : this should come from the CCID infrastructure , where , say ,
* TFRC will say it wants TIMESTAMPS , ELAPSED time , etc , for now lets
* put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
* TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
* make it a multiple of 4
*/
mss_now - = ( ( 5 + 6 + 10 + 6 + 6 + 6 + 3 ) / 4 ) * 4 ;
/* And store cached results */
2005-12-14 10:26:10 +03:00
icsk - > icsk_pmtu_cookie = pmtu ;
2005-08-10 07:14:34 +04:00
dp - > dccps_mss_cache = mss_now ;
return mss_now ;
}
2005-12-14 10:24:16 +03:00
EXPORT_SYMBOL_GPL ( dccp_sync_mss ) ;
2005-08-29 09:15:54 +04:00
void dccp_write_space ( struct sock * sk )
{
read_lock ( & sk - > sk_callback_lock ) ;
if ( sk - > sk_sleep & & waitqueue_active ( sk - > sk_sleep ) )
wake_up_interruptible ( sk - > sk_sleep ) ;
/* Should agree with poll, otherwise some programs break */
if ( sock_writeable ( sk ) )
sk_wake_async ( sk , 2 , POLL_OUT ) ;
read_unlock ( & sk - > sk_callback_lock ) ;
}
2005-08-27 10:06:35 +04:00
/**
* dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
* @ sk : socket to wait for
*/
2006-12-10 04:56:09 +03:00
static int dccp_wait_for_ccid ( struct sock * sk , struct sk_buff * skb )
2005-08-27 10:06:35 +04:00
{
struct dccp_sock * dp = dccp_sk ( sk ) ;
DEFINE_WAIT ( wait ) ;
2006-12-10 04:56:09 +03:00
unsigned long delay ;
2005-08-27 10:06:35 +04:00
int rc ;
while ( 1 ) {
prepare_to_wait ( sk - > sk_sleep , & wait , TASK_INTERRUPTIBLE ) ;
2006-08-27 06:16:45 +04:00
if ( sk - > sk_err )
2005-08-27 10:06:35 +04:00
goto do_error ;
if ( signal_pending ( current ) )
goto do_interrupted ;
2006-11-29 00:55:06 +03:00
rc = ccid_hc_tx_send_packet ( dp - > dccps_hc_tx_ccid , sk , skb ) ;
2005-08-27 10:06:35 +04:00
if ( rc < = 0 )
break ;
2007-03-20 21:00:28 +03:00
dccp_pr_debug ( " delayed send by %d msec \n " , rc ) ;
2005-08-27 10:06:35 +04:00
delay = msecs_to_jiffies ( rc ) ;
sk - > sk_write_pending + + ;
release_sock ( sk ) ;
2006-12-10 04:56:09 +03:00
schedule_timeout ( delay ) ;
2005-08-27 10:06:35 +04:00
lock_sock ( sk ) ;
sk - > sk_write_pending - - ;
}
out :
finish_wait ( sk - > sk_sleep , & wait ) ;
return rc ;
do_error :
rc = - EPIPE ;
goto out ;
do_interrupted :
2006-12-10 04:56:09 +03:00
rc = - EINTR ;
2005-08-27 10:06:35 +04:00
goto out ;
}
2006-08-27 06:16:45 +04:00
void dccp_write_xmit ( struct sock * sk , int block )
2005-08-10 07:30:56 +04:00
{
2006-08-27 06:16:45 +04:00
struct dccp_sock * dp = dccp_sk ( sk ) ;
struct sk_buff * skb ;
while ( ( skb = skb_peek ( & sk - > sk_write_queue ) ) ) {
2006-11-29 00:55:06 +03:00
int err = ccid_hc_tx_send_packet ( dp - > dccps_hc_tx_ccid , sk , skb ) ;
2005-08-27 10:06:35 +04:00
2006-08-27 06:16:45 +04:00
if ( err > 0 ) {
if ( ! block ) {
sk_reset_timer ( sk , & dp - > dccps_xmit_timer ,
msecs_to_jiffies ( err ) + jiffies ) ;
break ;
2006-12-10 04:56:09 +03:00
} else
err = dccp_wait_for_ccid ( sk , skb ) ;
2006-12-10 05:05:12 +03:00
if ( err & & err ! = - EINTR )
2006-11-20 23:39:23 +03:00
DCCP_BUG ( " err=%d after dccp_wait_for_ccid " , err ) ;
2006-08-27 06:16:45 +04:00
}
2005-08-10 07:30:56 +04:00
2006-08-27 06:16:45 +04:00
skb_dequeue ( & sk - > sk_write_queue ) ;
if ( err = = 0 ) {
struct dccp_skb_cb * dcb = DCCP_SKB_CB ( skb ) ;
const int len = skb - > len ;
2005-08-10 07:30:56 +04:00
2006-08-27 06:16:45 +04:00
if ( sk - > sk_state = = DCCP_PARTOPEN ) {
/* See 8.1.5. Handshake Completion */
inet_csk_schedule_ack ( sk ) ;
inet_csk_reset_xmit_timer ( sk , ICSK_TIME_DACK ,
2005-08-10 07:30:56 +04:00
inet_csk ( sk ) - > icsk_rto ,
DCCP_RTO_MAX ) ;
2006-08-27 06:16:45 +04:00
dcb - > dccpd_type = DCCP_PKT_DATAACK ;
} else if ( dccp_ack_pending ( sk ) )
dcb - > dccpd_type = DCCP_PKT_DATAACK ;
else
dcb - > dccpd_type = DCCP_PKT_DATA ;
err = dccp_transmit_skb ( sk , skb ) ;
ccid_hc_tx_packet_sent ( dp - > dccps_hc_tx_ccid , sk , 0 , len ) ;
2006-11-20 23:39:23 +03:00
if ( err )
DCCP_BUG ( " err=%d after ccid_hc_tx_packet_sent " ,
err ) ;
2006-12-10 05:05:12 +03:00
} else {
2007-03-20 21:00:28 +03:00
dccp_pr_debug ( " packet discarded due to err=%d \n " , err ) ;
2007-02-27 20:57:37 +03:00
kfree_skb ( skb ) ;
2006-12-10 05:05:12 +03:00
}
2006-08-27 06:16:45 +04:00
}
2005-08-10 07:30:56 +04:00
}
2005-08-10 07:14:34 +04:00
int dccp_retransmit_skb ( struct sock * sk , struct sk_buff * skb )
{
2005-12-14 10:16:16 +03:00
if ( inet_csk ( sk ) - > icsk_af_ops - > rebuild_header ( sk ) ! = 0 )
2005-08-10 07:14:34 +04:00
return - EHOSTUNREACH ; /* Routing failure or similar. */
return dccp_transmit_skb ( sk , ( skb_cloned ( skb ) ?
pskb_copy ( skb , GFP_ATOMIC ) :
skb_clone ( skb , GFP_ATOMIC ) ) ) ;
}
struct sk_buff * dccp_make_response ( struct sock * sk , struct dst_entry * dst ,
struct request_sock * req )
{
struct dccp_hdr * dh ;
2005-09-17 03:58:40 +04:00
struct dccp_request_sock * dreq ;
2006-03-21 09:31:09 +03:00
const u32 dccp_header_size = sizeof ( struct dccp_hdr ) +
2005-08-10 07:14:34 +04:00
sizeof ( struct dccp_hdr_ext ) +
sizeof ( struct dccp_hdr_response ) ;
2006-03-21 09:31:09 +03:00
struct sk_buff * skb = sock_wmalloc ( sk , sk - > sk_prot - > max_header , 1 ,
2005-08-10 07:14:34 +04:00
GFP_ATOMIC ) ;
if ( skb = = NULL )
return NULL ;
/* Reserve space for headers. */
2006-03-21 09:31:09 +03:00
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-10 07:14:34 +04:00
skb - > dst = dst_clone ( dst ) ;
2005-09-17 03:58:40 +04:00
dreq = dccp_rsk ( req ) ;
2006-11-13 18:12:07 +03:00
if ( inet_rsk ( req ) - > acked ) /* increase ISS upon retransmission */
dccp_inc_seqno ( & dreq - > dreq_iss ) ;
2005-08-10 07:14:34 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = DCCP_PKT_RESPONSE ;
2005-09-17 03:58:40 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_seq = dreq - > dreq_iss ;
2006-03-21 09:32:06 +03:00
if ( dccp_insert_options ( sk , skb ) ) {
kfree_skb ( skb ) ;
return NULL ;
}
2005-08-10 07:14:34 +04:00
2006-11-14 17:57:34 +03:00
/* Build and checksum header */
2006-11-10 16:22:32 +03:00
dh = dccp_zeroed_hdr ( skb , dccp_header_size ) ;
2005-08-10 07:14:34 +04:00
dh - > dccph_sport = inet_sk ( sk ) - > sport ;
dh - > dccph_dport = inet_rsk ( req ) - > rmt_port ;
2005-08-14 03:34:54 +04:00
dh - > dccph_doff = ( dccp_header_size +
DCCP_SKB_CB ( skb ) - > dccpd_opt_len ) / 4 ;
2005-08-10 07:14:34 +04:00
dh - > dccph_type = DCCP_PKT_RESPONSE ;
dh - > dccph_x = 1 ;
2005-09-17 03:58:40 +04:00
dccp_hdr_set_seq ( dh , dreq - > dreq_iss ) ;
dccp_hdr_set_ack ( dccp_hdr_ack_bits ( skb ) , dreq - > dreq_isr ) ;
dccp_hdr_response ( skb ) - > dccph_resp_service = dreq - > dreq_service ;
2005-08-10 07:14:34 +04:00
2006-11-10 22:43:06 +03:00
dccp_csum_outgoing ( skb ) ;
2006-11-13 18:12:07 +03:00
/* We use `acked' to remember that a Response was already sent. */
inet_rsk ( req ) - > acked = 1 ;
2005-08-10 07:14:34 +04:00
DCCP_INC_STATS ( DCCP_MIB_OUTSEGS ) ;
return skb ;
}
2005-12-14 10:24:16 +03:00
EXPORT_SYMBOL_GPL ( dccp_make_response ) ;
[DCCP]: Factor out common code for generating Resets
This factors code common to dccp_v{4,6}_ctl_send_reset into a separate function,
and adds support for filling in the Data 1 ... Data 3 fields from RFC 4340, 5.6.
It is useful to have this separate, since the following Reset codes will always
be generated from the control socket rather than via dccp_send_reset:
* Code 3, "No Connection", cf. 8.3.1;
* Code 4, "Packet Error" (identification for Data 1 added);
* Code 5, "Option Error" (identification for Data 1..3 added, will be used later);
* Code 6, "Mandatory Error" (same as Option Error);
* Code 7, "Connection Refused" (what on Earth is the difference to "No Connection"?);
* Code 8, "Bad Service Code";
* Code 9, "Too Busy";
* Code 10, "Bad Init Cookie" (not used).
Code 0 is not recommended by the RFC, the following codes would be used in
dccp_send_reset() instead, since they all relate to an established DCCP connection:
* Code 1, "Closed";
* Code 2, "Aborted";
* Code 11, "Aggression Penalty" (12.3).
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
2007-09-26 21:35:19 +04:00
/* answer offending packet in @rcv_skb with Reset from control socket @ctl */
struct sk_buff * dccp_ctl_make_reset ( struct socket * ctl , struct sk_buff * rcv_skb )
{
struct dccp_hdr * rxdh = dccp_hdr ( rcv_skb ) , * dh ;
struct dccp_skb_cb * dcb = DCCP_SKB_CB ( rcv_skb ) ;
const u32 dccp_hdr_reset_len = sizeof ( struct dccp_hdr ) +
sizeof ( struct dccp_hdr_ext ) +
sizeof ( struct dccp_hdr_reset ) ;
struct dccp_hdr_reset * dhr ;
struct sk_buff * skb ;
skb = alloc_skb ( ctl - > sk - > sk_prot - > max_header , GFP_ATOMIC ) ;
if ( skb = = NULL )
return NULL ;
skb_reserve ( skb , ctl - > sk - > sk_prot - > max_header ) ;
/* Swap the send and the receive. */
dh = dccp_zeroed_hdr ( skb , dccp_hdr_reset_len ) ;
dh - > dccph_type = DCCP_PKT_RESET ;
dh - > dccph_sport = rxdh - > dccph_dport ;
dh - > dccph_dport = rxdh - > dccph_sport ;
dh - > dccph_doff = dccp_hdr_reset_len / 4 ;
dh - > dccph_x = 1 ;
dhr = dccp_hdr_reset ( skb ) ;
dhr - > dccph_reset_code = dcb - > dccpd_reset_code ;
switch ( dcb - > dccpd_reset_code ) {
case DCCP_RESET_CODE_PACKET_ERROR :
dhr - > dccph_reset_data [ 0 ] = rxdh - > dccph_type ;
break ;
case DCCP_RESET_CODE_OPTION_ERROR : /* fall through */
case DCCP_RESET_CODE_MANDATORY_ERROR :
memcpy ( dhr - > dccph_reset_data , dcb - > dccpd_reset_data , 3 ) ;
break ;
}
/*
* From RFC 4340 , 8.3 .1 :
* If P . ackno exists , set R . seqno : = P . ackno + 1.
* Else set R . seqno : = 0.
*/
if ( dcb - > dccpd_ack_seq ! = DCCP_PKT_WITHOUT_ACK_SEQ )
dccp_hdr_set_seq ( dh , ADD48 ( dcb - > dccpd_ack_seq , 1 ) ) ;
dccp_hdr_set_ack ( dccp_hdr_ack_bits ( skb ) , dcb - > dccpd_seq ) ;
dccp_csum_outgoing ( skb ) ;
return skb ;
}
EXPORT_SYMBOL_GPL ( dccp_ctl_make_reset ) ;
2007-09-26 18:30:02 +04:00
/* send Reset on established socket, to close or abort the connection */
2006-03-21 06:25:24 +03:00
int dccp_send_reset ( struct sock * sk , enum dccp_reset_codes code )
{
2007-09-26 18:30:02 +04:00
struct sk_buff * skb ;
2006-03-21 06:25:24 +03:00
/*
* FIXME : what if rebuild_header fails ?
* Should we be doing a rebuild_header here ?
*/
int err = inet_sk_rebuild_header ( sk ) ;
2007-09-26 18:30:02 +04:00
if ( err ! = 0 )
return err ;
skb = sock_wmalloc ( sk , sk - > sk_prot - > max_header , 1 , GFP_ATOMIC ) ;
if ( skb = = NULL )
return - ENOBUFS ;
/* Reserve space for headers and prepare control bits. */
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
DCCP_SKB_CB ( skb ) - > dccpd_type = DCCP_PKT_RESET ;
DCCP_SKB_CB ( skb ) - > dccpd_reset_code = code ;
2006-03-21 06:25:24 +03:00
2007-09-26 18:30:02 +04:00
return dccp_transmit_skb ( sk , skb ) ;
2006-03-21 06:25:24 +03:00
}
2005-08-10 07:14:34 +04:00
/*
* Do all connect socket setups that can be done AF independent .
*/
static inline void dccp_connect_init ( struct sock * sk )
{
2005-12-14 10:24:16 +03:00
struct dccp_sock * dp = dccp_sk ( sk ) ;
2005-08-10 07:14:34 +04:00
struct dst_entry * dst = __sk_dst_get ( sk ) ;
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
sk - > sk_err = 0 ;
sock_reset_flag ( sk , SOCK_DONE ) ;
2007-02-09 17:24:38 +03:00
2005-08-10 07:14:34 +04:00
dccp_sync_mss ( sk , dst_mtu ( dst ) ) ;
2006-12-10 21:01:18 +03:00
/*
2005-12-14 10:24:16 +03:00
* SWL and AWL are initially adjusted so that they are not less than
* the initial Sequence Numbers received and sent , respectively :
* SWL : = max ( GSR + 1 - floor ( W / 4 ) , ISR ) ,
* AWL : = max ( GSS - W ' + 1 , ISS ) .
* These adjustments MUST be applied only at the beginning of the
* connection .
2006-12-10 21:01:18 +03:00
*/
2006-11-13 18:34:38 +03:00
dccp_update_gss ( sk , dp - > dccps_iss ) ;
2005-12-14 10:24:16 +03:00
dccp_set_seqno ( & dp - > dccps_awl , max48 ( dp - > dccps_awl , dp - > dccps_iss ) ) ;
2005-08-10 07:14:34 +04:00
2006-11-13 18:34:38 +03:00
/* S.GAR - greatest valid acknowledgement number received on a non-Sync;
* initialized to S . ISS ( sec . 8.5 ) */
dp - > dccps_gar = dp - > dccps_iss ;
2005-08-10 07:14:34 +04:00
icsk - > icsk_retransmits = 0 ;
}
int dccp_connect ( struct sock * sk )
{
struct sk_buff * skb ;
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
dccp_connect_init ( sk ) ;
2006-03-21 09:31:09 +03:00
skb = alloc_skb ( sk - > sk_prot - > max_header , sk - > sk_allocation ) ;
2005-08-10 07:14:34 +04:00
if ( unlikely ( skb = = NULL ) )
return - ENOBUFS ;
/* Reserve space for headers. */
2006-03-21 09:31:09 +03:00
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-10 07:14:34 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = DCCP_PKT_REQUEST ;
2005-10-30 03:20:59 +03:00
dccp_skb_entail ( sk , skb ) ;
2005-08-10 07:14:34 +04:00
dccp_transmit_skb ( sk , skb_clone ( skb , GFP_KERNEL ) ) ;
DCCP_INC_STATS ( DCCP_MIB_ACTIVEOPENS ) ;
/* Timer for repeating the REQUEST until an answer. */
2005-08-10 07:30:56 +04:00
inet_csk_reset_xmit_timer ( sk , ICSK_TIME_RETRANS ,
icsk - > icsk_rto , DCCP_RTO_MAX ) ;
2005-08-10 07:14:34 +04:00
return 0 ;
}
2005-12-14 10:24:16 +03:00
EXPORT_SYMBOL_GPL ( dccp_connect ) ;
2005-08-10 07:14:34 +04:00
void dccp_send_ack ( struct sock * sk )
{
/* If we have been reset, we may not send again. */
if ( sk - > sk_state ! = DCCP_CLOSED ) {
2006-03-21 09:31:09 +03:00
struct sk_buff * skb = alloc_skb ( sk - > sk_prot - > max_header ,
GFP_ATOMIC ) ;
2005-08-10 07:14:34 +04:00
if ( skb = = NULL ) {
inet_csk_schedule_ack ( sk ) ;
inet_csk ( sk ) - > icsk_ack . ato = TCP_ATO_MIN ;
2005-08-14 03:34:54 +04:00
inet_csk_reset_xmit_timer ( sk , ICSK_TIME_DACK ,
TCP_DELACK_MAX ,
DCCP_RTO_MAX ) ;
2005-08-10 07:14:34 +04:00
return ;
}
/* Reserve space for headers */
2006-03-21 09:31:09 +03:00
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-10 07:14:34 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = DCCP_PKT_ACK ;
dccp_transmit_skb ( sk , skb ) ;
}
}
EXPORT_SYMBOL_GPL ( dccp_send_ack ) ;
2007-09-26 18:26:04 +04:00
/* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
2005-08-10 07:14:34 +04:00
void dccp_send_delayed_ack ( struct sock * sk )
{
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
/*
* FIXME : tune this timer . elapsed time fixes the skew , so no problem
* with using 2 s , and active senders also piggyback the ACK into a
* DATAACK packet , so this is really for quiescent senders .
*/
unsigned long timeout = jiffies + 2 * HZ ;
/* Use new timeout only if there wasn't a older one earlier. */
if ( icsk - > icsk_ack . pending & ICSK_ACK_TIMER ) {
/* If delack timer was blocked or is about to expire,
* send ACK now .
*
* FIXME : check the " about to expire " part
*/
if ( icsk - > icsk_ack . blocked ) {
dccp_send_ack ( sk ) ;
return ;
}
if ( ! time_before ( timeout , icsk - > icsk_ack . timeout ) )
timeout = icsk - > icsk_ack . timeout ;
}
icsk - > icsk_ack . pending | = ICSK_ACK_SCHED | ICSK_ACK_TIMER ;
icsk - > icsk_ack . timeout = timeout ;
sk_reset_timer ( sk , & icsk - > icsk_delack_timer , timeout ) ;
}
2007-09-26 09:42:27 +04:00
void dccp_send_sync ( struct sock * sk , const u64 ackno ,
2005-08-17 10:10:59 +04:00
const enum dccp_pkt_type pkt_type )
2005-08-10 07:14:34 +04:00
{
/*
* We are not putting this on the write queue , so
* dccp_transmit_skb ( ) will set the ownership to this
* sock .
*/
2006-03-21 09:31:09 +03:00
struct sk_buff * skb = alloc_skb ( sk - > sk_prot - > max_header , GFP_ATOMIC ) ;
2005-08-10 07:14:34 +04:00
2007-09-26 09:42:27 +04:00
if ( skb = = NULL ) {
2005-08-10 07:14:34 +04:00
/* FIXME: how to make sure the sync is sent? */
2007-09-26 09:42:27 +04:00
DCCP_CRIT ( " could not send %s " , dccp_packet_name ( pkt_type ) ) ;
2005-08-10 07:14:34 +04:00
return ;
2007-09-26 09:42:27 +04:00
}
2005-08-10 07:14:34 +04:00
/* Reserve space for headers and prepare control bits. */
2006-03-21 09:31:09 +03:00
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-17 10:10:59 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = pkt_type ;
2007-09-26 09:42:27 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_ack_seq = ackno ;
2005-08-10 07:14:34 +04:00
dccp_transmit_skb ( sk , skb ) ;
}
2006-03-21 08:25:11 +03:00
EXPORT_SYMBOL_GPL ( dccp_send_sync ) ;
2005-08-14 03:34:54 +04:00
/*
* Send a DCCP_PKT_CLOSE / CLOSEREQ . The caller locks the socket for us . This
* cannot be allowed to fail queueing a DCCP_PKT_CLOSE / CLOSEREQ frame under
* any circumstances .
2005-08-10 07:14:34 +04:00
*/
2005-08-24 08:50:06 +04:00
void dccp_send_close ( struct sock * sk , const int active )
2005-08-10 07:14:34 +04:00
{
struct dccp_sock * dp = dccp_sk ( sk ) ;
struct sk_buff * skb ;
2005-10-21 11:20:43 +04:00
const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC ;
2005-08-10 07:14:34 +04:00
2005-08-24 08:50:06 +04:00
skb = alloc_skb ( sk - > sk_prot - > max_header , prio ) ;
if ( skb = = NULL )
return ;
2005-08-10 07:14:34 +04:00
/* Reserve space for headers and prepare control bits. */
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-14 03:34:54 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = dp - > dccps_role = = DCCP_ROLE_CLIENT ?
DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ ;
2005-08-10 07:14:34 +04:00
2005-08-24 08:50:06 +04:00
if ( active ) {
2006-08-27 06:16:45 +04:00
dccp_write_xmit ( sk , 1 ) ;
2005-10-30 03:20:59 +03:00
dccp_skb_entail ( sk , skb ) ;
2005-08-24 08:50:06 +04:00
dccp_transmit_skb ( sk , skb_clone ( skb , prio ) ) ;
2006-08-27 06:16:45 +04:00
/* FIXME do we need a retransmit timer here? */
2005-08-24 08:50:06 +04:00
} else
dccp_transmit_skb ( sk , skb ) ;
2005-08-10 07:14:34 +04:00
}