2005-08-10 07:14:34 +04:00
/*
* net / dccp / output . c
2006-12-10 21:01:18 +03:00
*
2005-08-10 07:14:34 +04:00
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo < acme @ conectiva . com . br >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/dccp.h>
2005-10-30 03:20:59 +03:00
# include <linux/kernel.h>
2005-08-10 07:14:34 +04:00
# include <linux/skbuff.h>
2005-12-27 07:43:12 +03:00
# include <net/inet_sock.h>
2005-08-10 07:14:34 +04:00
# include <net/sock.h>
2005-09-18 11:17:51 +04:00
# include "ackvec.h"
2005-08-10 07:14:34 +04:00
# include "ccid.h"
# include "dccp.h"
static inline void dccp_event_ack_sent ( struct sock * sk )
{
inet_csk_clear_xmit_timer ( sk , ICSK_TIME_DACK ) ;
}
2006-03-21 08:58:56 +03:00
static void dccp_skb_entail ( struct sock * sk , struct sk_buff * skb )
2005-10-30 03:20:59 +03:00
{
skb_set_owner_w ( skb , sk ) ;
WARN_ON ( sk - > sk_send_head ) ;
sk - > sk_send_head = skb ;
}
2005-08-10 07:14:34 +04:00
/*
* All SKB ' s seen here are completely headerless . It is our
* job to build the DCCP header , and pass the packet down to
* IP so it can do the same plus pass the packet off to the
* device .
*/
2005-10-30 03:20:59 +03:00
static int dccp_transmit_skb ( struct sock * sk , struct sk_buff * skb )
2005-08-10 07:14:34 +04:00
{
if ( likely ( skb ! = NULL ) ) {
const struct inet_sock * inet = inet_sk ( sk ) ;
2005-12-14 10:16:16 +03:00
const struct inet_connection_sock * icsk = inet_csk ( sk ) ;
2005-08-10 07:14:34 +04:00
struct dccp_sock * dp = dccp_sk ( sk ) ;
struct dccp_skb_cb * dcb = DCCP_SKB_CB ( skb ) ;
struct dccp_hdr * dh ;
/* XXX For now we're using only 48 bits sequence numbers */
2006-03-21 09:31:09 +03:00
const u32 dccp_header_size = sizeof ( * dh ) +
2005-08-10 07:14:34 +04:00
sizeof ( struct dccp_hdr_ext ) +
2005-08-14 03:34:54 +04:00
dccp_packet_hdr_len ( dcb - > dccpd_type ) ;
2005-08-10 07:14:34 +04:00
int err , set_ack = 1 ;
u64 ackno = dp - > dccps_gsr ;
dccp_inc_seqno ( & dp - > dccps_gss ) ;
switch ( dcb - > dccpd_type ) {
case DCCP_PKT_DATA :
set_ack = 0 ;
2005-10-30 03:20:59 +03:00
/* fall through */
case DCCP_PKT_DATAACK :
2005-08-10 07:14:34 +04:00
break ;
2005-10-30 03:20:59 +03:00
[DCCP]: Initial feature negotiation implementation
Still needs more work, but boots and doesn't crashes, even
does some negotiation!
18:38:52.174934 127.0.0.1.43458 > 127.0.0.1.5001: request <change_l ack_ratio 2, change_r ccid 2, change_l ccid 2>
18:38:52.218526 127.0.0.1.5001 > 127.0.0.1.43458: response <nop, nop, change_l ack_ratio 2, confirm_r ccid 2 2, confirm_l ccid 2 2, confirm_r ack_ratio 2>
18:38:52.185398 127.0.0.1.43458 > 127.0.0.1.5001: <nop, confirm_r ack_ratio 2, ack_vector0 0x00, elapsed_time 212>
:-)
Signed-off-by: Andrea Bittau <a.bittau@cs.ucl.ac.uk>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-03-21 04:43:56 +03:00
case DCCP_PKT_REQUEST :
set_ack = 0 ;
/* fall through */
2005-08-10 07:14:34 +04:00
case DCCP_PKT_SYNC :
case DCCP_PKT_SYNCACK :
ackno = dcb - > dccpd_seq ;
2005-10-30 03:20:59 +03:00
/* fall through */
default :
/*
* Only data packets should come through with skb - > sk
* set .
*/
WARN_ON ( skb - > sk ) ;
skb_set_owner_w ( skb , sk ) ;
2005-08-10 07:14:34 +04:00
break ;
}
2005-08-21 12:40:16 +04:00
dcb - > dccpd_seq = dp - > dccps_gss ;
2006-03-21 09:32:06 +03:00
if ( dccp_insert_options ( sk , skb ) ) {
kfree_skb ( skb ) ;
return - EPROTO ;
}
2005-08-10 07:14:34 +04:00
2005-10-14 10:38:49 +04:00
2005-08-10 07:14:34 +04:00
/* Build DCCP header and checksum it. */
2006-11-10 16:22:32 +03:00
dh = dccp_zeroed_hdr ( skb , dccp_header_size ) ;
2005-08-10 07:14:34 +04:00
dh - > dccph_type = dcb - > dccpd_type ;
dh - > dccph_sport = inet - > sport ;
dh - > dccph_dport = inet - > dport ;
dh - > dccph_doff = ( dccp_header_size + dcb - > dccpd_opt_len ) / 4 ;
dh - > dccph_ccval = dcb - > dccpd_ccval ;
2006-11-10 22:43:06 +03:00
dh - > dccph_cscov = dp - > dccps_pcslen ;
2005-08-10 07:14:34 +04:00
/* XXX For now we're using only 48 bits sequence numbers */
dh - > dccph_x = 1 ;
dp - > dccps_awh = dp - > dccps_gss ;
dccp_hdr_set_seq ( dh , dp - > dccps_gss ) ;
if ( set_ack )
dccp_hdr_set_ack ( dccp_hdr_ack_bits ( skb ) , ackno ) ;
switch ( dcb - > dccpd_type ) {
case DCCP_PKT_REQUEST :
2005-08-14 03:34:54 +04:00
dccp_hdr_request ( skb ) - > dccph_req_service =
2005-09-17 03:58:40 +04:00
dp - > dccps_service ;
2005-08-10 07:14:34 +04:00
break ;
case DCCP_PKT_RESET :
2005-08-14 03:34:54 +04:00
dccp_hdr_reset ( skb ) - > dccph_reset_code =
dcb - > dccpd_reset_code ;
2005-08-10 07:14:34 +04:00
break ;
}
2006-11-10 22:43:06 +03:00
icsk - > icsk_af_ops - > send_check ( sk , 0 , skb ) ;
2005-08-10 07:14:34 +04:00
2005-08-24 08:50:06 +04:00
if ( set_ack )
2005-08-10 07:14:34 +04:00
dccp_event_ack_sent ( sk ) ;
DCCP_INC_STATS ( DCCP_MIB_OUTSEGS ) ;
2005-10-18 06:03:28 +04:00
memset ( & ( IPCB ( skb ) - > opt ) , 0 , sizeof ( IPCB ( skb ) - > opt ) ) ;
2007-01-26 12:04:55 +03:00
err = icsk - > icsk_af_ops - > queue_xmit ( skb , 0 ) ;
2006-11-14 16:21:36 +03:00
return net_xmit_eval ( err ) ;
2005-08-10 07:14:34 +04:00
}
return - ENOBUFS ;
}
unsigned int dccp_sync_mss ( struct sock * sk , u32 pmtu )
{
2005-12-14 10:26:10 +03:00
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
2005-08-10 07:14:34 +04:00
struct dccp_sock * dp = dccp_sk ( sk ) ;
2005-12-14 10:26:10 +03:00
int mss_now = ( pmtu - icsk - > icsk_af_ops - > net_header_len -
2005-12-14 10:16:16 +03:00
sizeof ( struct dccp_hdr ) - sizeof ( struct dccp_hdr_ext ) ) ;
2005-08-10 07:14:34 +04:00
/* Now subtract optional transport overhead */
2005-12-14 10:26:10 +03:00
mss_now - = icsk - > icsk_ext_hdr_len ;
2005-08-10 07:14:34 +04:00
/*
* FIXME : this should come from the CCID infrastructure , where , say ,
* TFRC will say it wants TIMESTAMPS , ELAPSED time , etc , for now lets
* put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
* TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
* make it a multiple of 4
*/
mss_now - = ( ( 5 + 6 + 10 + 6 + 6 + 6 + 3 ) / 4 ) * 4 ;
/* And store cached results */
2005-12-14 10:26:10 +03:00
icsk - > icsk_pmtu_cookie = pmtu ;
2005-08-10 07:14:34 +04:00
dp - > dccps_mss_cache = mss_now ;
return mss_now ;
}
2005-12-14 10:24:16 +03:00
EXPORT_SYMBOL_GPL ( dccp_sync_mss ) ;
2005-08-29 09:15:54 +04:00
void dccp_write_space ( struct sock * sk )
{
read_lock ( & sk - > sk_callback_lock ) ;
if ( sk - > sk_sleep & & waitqueue_active ( sk - > sk_sleep ) )
wake_up_interruptible ( sk - > sk_sleep ) ;
/* Should agree with poll, otherwise some programs break */
if ( sock_writeable ( sk ) )
sk_wake_async ( sk , 2 , POLL_OUT ) ;
read_unlock ( & sk - > sk_callback_lock ) ;
}
2005-08-27 10:06:35 +04:00
/**
* dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
* @ sk : socket to wait for
*/
2006-12-10 04:56:09 +03:00
static int dccp_wait_for_ccid ( struct sock * sk , struct sk_buff * skb )
2005-08-27 10:06:35 +04:00
{
struct dccp_sock * dp = dccp_sk ( sk ) ;
DEFINE_WAIT ( wait ) ;
2006-12-10 04:56:09 +03:00
unsigned long delay ;
2005-08-27 10:06:35 +04:00
int rc ;
while ( 1 ) {
prepare_to_wait ( sk - > sk_sleep , & wait , TASK_INTERRUPTIBLE ) ;
2006-08-27 06:16:45 +04:00
if ( sk - > sk_err )
2005-08-27 10:06:35 +04:00
goto do_error ;
if ( signal_pending ( current ) )
goto do_interrupted ;
2006-11-29 00:55:06 +03:00
rc = ccid_hc_tx_send_packet ( dp - > dccps_hc_tx_ccid , sk , skb ) ;
2005-08-27 10:06:35 +04:00
if ( rc < = 0 )
break ;
delay = msecs_to_jiffies ( rc ) ;
sk - > sk_write_pending + + ;
release_sock ( sk ) ;
2006-12-10 04:56:09 +03:00
schedule_timeout ( delay ) ;
2005-08-27 10:06:35 +04:00
lock_sock ( sk ) ;
sk - > sk_write_pending - - ;
}
out :
finish_wait ( sk - > sk_sleep , & wait ) ;
return rc ;
do_error :
rc = - EPIPE ;
goto out ;
do_interrupted :
2006-12-10 04:56:09 +03:00
rc = - EINTR ;
2005-08-27 10:06:35 +04:00
goto out ;
}
2006-08-27 06:16:45 +04:00
static void dccp_write_xmit_timer ( unsigned long data ) {
struct sock * sk = ( struct sock * ) data ;
struct dccp_sock * dp = dccp_sk ( sk ) ;
bh_lock_sock ( sk ) ;
if ( sock_owned_by_user ( sk ) )
sk_reset_timer ( sk , & dp - > dccps_xmit_timer , jiffies + 1 ) ;
else
dccp_write_xmit ( sk , 0 ) ;
bh_unlock_sock ( sk ) ;
sock_put ( sk ) ;
}
void dccp_write_xmit ( struct sock * sk , int block )
2005-08-10 07:30:56 +04:00
{
2006-08-27 06:16:45 +04:00
struct dccp_sock * dp = dccp_sk ( sk ) ;
struct sk_buff * skb ;
while ( ( skb = skb_peek ( & sk - > sk_write_queue ) ) ) {
2006-11-29 00:55:06 +03:00
int err = ccid_hc_tx_send_packet ( dp - > dccps_hc_tx_ccid , sk , skb ) ;
2005-08-27 10:06:35 +04:00
2006-08-27 06:16:45 +04:00
if ( err > 0 ) {
if ( ! block ) {
sk_reset_timer ( sk , & dp - > dccps_xmit_timer ,
msecs_to_jiffies ( err ) + jiffies ) ;
break ;
2006-12-10 04:56:09 +03:00
} else
err = dccp_wait_for_ccid ( sk , skb ) ;
2006-12-10 05:05:12 +03:00
if ( err & & err ! = - EINTR )
2006-11-20 23:39:23 +03:00
DCCP_BUG ( " err=%d after dccp_wait_for_ccid " , err ) ;
2006-08-27 06:16:45 +04:00
}
2005-08-10 07:30:56 +04:00
2006-08-27 06:16:45 +04:00
skb_dequeue ( & sk - > sk_write_queue ) ;
if ( err = = 0 ) {
struct dccp_skb_cb * dcb = DCCP_SKB_CB ( skb ) ;
const int len = skb - > len ;
2005-08-10 07:30:56 +04:00
2006-08-27 06:16:45 +04:00
if ( sk - > sk_state = = DCCP_PARTOPEN ) {
/* See 8.1.5. Handshake Completion */
inet_csk_schedule_ack ( sk ) ;
inet_csk_reset_xmit_timer ( sk , ICSK_TIME_DACK ,
2005-08-10 07:30:56 +04:00
inet_csk ( sk ) - > icsk_rto ,
DCCP_RTO_MAX ) ;
2006-08-27 06:16:45 +04:00
dcb - > dccpd_type = DCCP_PKT_DATAACK ;
} else if ( dccp_ack_pending ( sk ) )
dcb - > dccpd_type = DCCP_PKT_DATAACK ;
else
dcb - > dccpd_type = DCCP_PKT_DATA ;
err = dccp_transmit_skb ( sk , skb ) ;
ccid_hc_tx_packet_sent ( dp - > dccps_hc_tx_ccid , sk , 0 , len ) ;
2006-11-20 23:39:23 +03:00
if ( err )
DCCP_BUG ( " err=%d after ccid_hc_tx_packet_sent " ,
err ) ;
2006-12-10 05:05:12 +03:00
} else {
dccp_pr_debug ( " packet discarded \n " ) ;
2006-08-27 06:16:45 +04:00
kfree ( skb ) ;
2006-12-10 05:05:12 +03:00
}
2006-08-27 06:16:45 +04:00
}
2005-08-10 07:30:56 +04:00
}
2005-08-10 07:14:34 +04:00
int dccp_retransmit_skb ( struct sock * sk , struct sk_buff * skb )
{
2005-12-14 10:16:16 +03:00
if ( inet_csk ( sk ) - > icsk_af_ops - > rebuild_header ( sk ) ! = 0 )
2005-08-10 07:14:34 +04:00
return - EHOSTUNREACH ; /* Routing failure or similar. */
return dccp_transmit_skb ( sk , ( skb_cloned ( skb ) ?
pskb_copy ( skb , GFP_ATOMIC ) :
skb_clone ( skb , GFP_ATOMIC ) ) ) ;
}
struct sk_buff * dccp_make_response ( struct sock * sk , struct dst_entry * dst ,
struct request_sock * req )
{
struct dccp_hdr * dh ;
2005-09-17 03:58:40 +04:00
struct dccp_request_sock * dreq ;
2006-03-21 09:31:09 +03:00
const u32 dccp_header_size = sizeof ( struct dccp_hdr ) +
2005-08-10 07:14:34 +04:00
sizeof ( struct dccp_hdr_ext ) +
sizeof ( struct dccp_hdr_response ) ;
2006-03-21 09:31:09 +03:00
struct sk_buff * skb = sock_wmalloc ( sk , sk - > sk_prot - > max_header , 1 ,
2005-08-10 07:14:34 +04:00
GFP_ATOMIC ) ;
if ( skb = = NULL )
return NULL ;
/* Reserve space for headers. */
2006-03-21 09:31:09 +03:00
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-10 07:14:34 +04:00
skb - > dst = dst_clone ( dst ) ;
2005-09-17 03:58:40 +04:00
dreq = dccp_rsk ( req ) ;
2006-11-13 18:12:07 +03:00
if ( inet_rsk ( req ) - > acked ) /* increase ISS upon retransmission */
dccp_inc_seqno ( & dreq - > dreq_iss ) ;
2005-08-10 07:14:34 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = DCCP_PKT_RESPONSE ;
2005-09-17 03:58:40 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_seq = dreq - > dreq_iss ;
2006-03-21 09:32:06 +03:00
if ( dccp_insert_options ( sk , skb ) ) {
kfree_skb ( skb ) ;
return NULL ;
}
2005-08-10 07:14:34 +04:00
2006-11-14 17:57:34 +03:00
/* Build and checksum header */
2006-11-10 16:22:32 +03:00
dh = dccp_zeroed_hdr ( skb , dccp_header_size ) ;
2005-08-10 07:14:34 +04:00
dh - > dccph_sport = inet_sk ( sk ) - > sport ;
dh - > dccph_dport = inet_rsk ( req ) - > rmt_port ;
2005-08-14 03:34:54 +04:00
dh - > dccph_doff = ( dccp_header_size +
DCCP_SKB_CB ( skb ) - > dccpd_opt_len ) / 4 ;
2005-08-10 07:14:34 +04:00
dh - > dccph_type = DCCP_PKT_RESPONSE ;
dh - > dccph_x = 1 ;
2005-09-17 03:58:40 +04:00
dccp_hdr_set_seq ( dh , dreq - > dreq_iss ) ;
dccp_hdr_set_ack ( dccp_hdr_ack_bits ( skb ) , dreq - > dreq_isr ) ;
dccp_hdr_response ( skb ) - > dccph_resp_service = dreq - > dreq_service ;
2005-08-10 07:14:34 +04:00
2006-11-10 22:43:06 +03:00
dccp_csum_outgoing ( skb ) ;
2006-11-13 18:12:07 +03:00
/* We use `acked' to remember that a Response was already sent. */
inet_rsk ( req ) - > acked = 1 ;
2005-08-10 07:14:34 +04:00
DCCP_INC_STATS ( DCCP_MIB_OUTSEGS ) ;
return skb ;
}
2005-12-14 10:24:16 +03:00
EXPORT_SYMBOL_GPL ( dccp_make_response ) ;
2006-03-21 06:25:24 +03:00
static struct sk_buff * dccp_make_reset ( struct sock * sk , struct dst_entry * dst ,
const enum dccp_reset_codes code )
2005-08-10 07:14:34 +04:00
{
struct dccp_hdr * dh ;
struct dccp_sock * dp = dccp_sk ( sk ) ;
2006-03-21 09:31:09 +03:00
const u32 dccp_header_size = sizeof ( struct dccp_hdr ) +
2005-08-10 07:14:34 +04:00
sizeof ( struct dccp_hdr_ext ) +
sizeof ( struct dccp_hdr_reset ) ;
2006-03-21 09:31:09 +03:00
struct sk_buff * skb = sock_wmalloc ( sk , sk - > sk_prot - > max_header , 1 ,
2005-08-10 07:14:34 +04:00
GFP_ATOMIC ) ;
if ( skb = = NULL )
return NULL ;
/* Reserve space for headers. */
2006-03-21 09:31:09 +03:00
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-10 07:14:34 +04:00
skb - > dst = dst_clone ( dst ) ;
dccp_inc_seqno ( & dp - > dccps_gss ) ;
DCCP_SKB_CB ( skb ) - > dccpd_reset_code = code ;
DCCP_SKB_CB ( skb ) - > dccpd_type = DCCP_PKT_RESET ;
DCCP_SKB_CB ( skb ) - > dccpd_seq = dp - > dccps_gss ;
2006-03-21 09:32:06 +03:00
if ( dccp_insert_options ( sk , skb ) ) {
kfree_skb ( skb ) ;
return NULL ;
}
2005-08-10 07:14:34 +04:00
2006-11-10 16:22:32 +03:00
dh = dccp_zeroed_hdr ( skb , dccp_header_size ) ;
2005-08-10 07:14:34 +04:00
dh - > dccph_sport = inet_sk ( sk ) - > sport ;
dh - > dccph_dport = inet_sk ( sk ) - > dport ;
2005-08-14 03:34:54 +04:00
dh - > dccph_doff = ( dccp_header_size +
DCCP_SKB_CB ( skb ) - > dccpd_opt_len ) / 4 ;
2005-08-10 07:14:34 +04:00
dh - > dccph_type = DCCP_PKT_RESET ;
dh - > dccph_x = 1 ;
dccp_hdr_set_seq ( dh , dp - > dccps_gss ) ;
dccp_hdr_set_ack ( dccp_hdr_ack_bits ( skb ) , dp - > dccps_gsr ) ;
dccp_hdr_reset ( skb ) - > dccph_reset_code = code ;
2006-11-10 22:43:06 +03:00
inet_csk ( sk ) - > icsk_af_ops - > send_check ( sk , 0 , skb ) ;
2005-08-10 07:14:34 +04:00
DCCP_INC_STATS ( DCCP_MIB_OUTSEGS ) ;
return skb ;
}
2006-03-21 06:25:24 +03:00
int dccp_send_reset ( struct sock * sk , enum dccp_reset_codes code )
{
/*
* FIXME : what if rebuild_header fails ?
* Should we be doing a rebuild_header here ?
*/
int err = inet_sk_rebuild_header ( sk ) ;
if ( err = = 0 ) {
struct sk_buff * skb = dccp_make_reset ( sk , sk - > sk_dst_cache ,
code ) ;
if ( skb ! = NULL ) {
memset ( & ( IPCB ( skb ) - > opt ) , 0 , sizeof ( IPCB ( skb ) - > opt ) ) ;
2007-01-26 12:04:55 +03:00
err = inet_csk ( sk ) - > icsk_af_ops - > queue_xmit ( skb , 0 ) ;
2006-11-14 16:21:36 +03:00
return net_xmit_eval ( err ) ;
2006-03-21 06:25:24 +03:00
}
}
return err ;
}
2005-08-10 07:14:34 +04:00
/*
* Do all connect socket setups that can be done AF independent .
*/
static inline void dccp_connect_init ( struct sock * sk )
{
2005-12-14 10:24:16 +03:00
struct dccp_sock * dp = dccp_sk ( sk ) ;
2005-08-10 07:14:34 +04:00
struct dst_entry * dst = __sk_dst_get ( sk ) ;
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
sk - > sk_err = 0 ;
sock_reset_flag ( sk , SOCK_DONE ) ;
dccp_sync_mss ( sk , dst_mtu ( dst ) ) ;
2006-12-10 21:01:18 +03:00
/*
2005-12-14 10:24:16 +03:00
* SWL and AWL are initially adjusted so that they are not less than
* the initial Sequence Numbers received and sent , respectively :
* SWL : = max ( GSR + 1 - floor ( W / 4 ) , ISR ) ,
* AWL : = max ( GSS - W ' + 1 , ISS ) .
* These adjustments MUST be applied only at the beginning of the
* connection .
2006-12-10 21:01:18 +03:00
*/
2006-11-13 18:34:38 +03:00
dccp_update_gss ( sk , dp - > dccps_iss ) ;
2005-12-14 10:24:16 +03:00
dccp_set_seqno ( & dp - > dccps_awl , max48 ( dp - > dccps_awl , dp - > dccps_iss ) ) ;
2005-08-10 07:14:34 +04:00
2006-11-13 18:34:38 +03:00
/* S.GAR - greatest valid acknowledgement number received on a non-Sync;
* initialized to S . ISS ( sec . 8.5 ) */
dp - > dccps_gar = dp - > dccps_iss ;
2005-08-10 07:14:34 +04:00
icsk - > icsk_retransmits = 0 ;
2006-08-27 06:16:45 +04:00
init_timer ( & dp - > dccps_xmit_timer ) ;
dp - > dccps_xmit_timer . data = ( unsigned long ) sk ;
dp - > dccps_xmit_timer . function = dccp_write_xmit_timer ;
2005-08-10 07:14:34 +04:00
}
int dccp_connect ( struct sock * sk )
{
struct sk_buff * skb ;
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
dccp_connect_init ( sk ) ;
2006-03-21 09:31:09 +03:00
skb = alloc_skb ( sk - > sk_prot - > max_header , sk - > sk_allocation ) ;
2005-08-10 07:14:34 +04:00
if ( unlikely ( skb = = NULL ) )
return - ENOBUFS ;
/* Reserve space for headers. */
2006-03-21 09:31:09 +03:00
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-10 07:14:34 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = DCCP_PKT_REQUEST ;
2005-10-30 03:20:59 +03:00
dccp_skb_entail ( sk , skb ) ;
2005-08-10 07:14:34 +04:00
dccp_transmit_skb ( sk , skb_clone ( skb , GFP_KERNEL ) ) ;
DCCP_INC_STATS ( DCCP_MIB_ACTIVEOPENS ) ;
/* Timer for repeating the REQUEST until an answer. */
2005-08-10 07:30:56 +04:00
inet_csk_reset_xmit_timer ( sk , ICSK_TIME_RETRANS ,
icsk - > icsk_rto , DCCP_RTO_MAX ) ;
2005-08-10 07:14:34 +04:00
return 0 ;
}
2005-12-14 10:24:16 +03:00
EXPORT_SYMBOL_GPL ( dccp_connect ) ;
2005-08-10 07:14:34 +04:00
void dccp_send_ack ( struct sock * sk )
{
/* If we have been reset, we may not send again. */
if ( sk - > sk_state ! = DCCP_CLOSED ) {
2006-03-21 09:31:09 +03:00
struct sk_buff * skb = alloc_skb ( sk - > sk_prot - > max_header ,
GFP_ATOMIC ) ;
2005-08-10 07:14:34 +04:00
if ( skb = = NULL ) {
inet_csk_schedule_ack ( sk ) ;
inet_csk ( sk ) - > icsk_ack . ato = TCP_ATO_MIN ;
2005-08-14 03:34:54 +04:00
inet_csk_reset_xmit_timer ( sk , ICSK_TIME_DACK ,
TCP_DELACK_MAX ,
DCCP_RTO_MAX ) ;
2005-08-10 07:14:34 +04:00
return ;
}
/* Reserve space for headers */
2006-03-21 09:31:09 +03:00
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-10 07:14:34 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = DCCP_PKT_ACK ;
dccp_transmit_skb ( sk , skb ) ;
}
}
EXPORT_SYMBOL_GPL ( dccp_send_ack ) ;
void dccp_send_delayed_ack ( struct sock * sk )
{
struct inet_connection_sock * icsk = inet_csk ( sk ) ;
/*
* FIXME : tune this timer . elapsed time fixes the skew , so no problem
* with using 2 s , and active senders also piggyback the ACK into a
* DATAACK packet , so this is really for quiescent senders .
*/
unsigned long timeout = jiffies + 2 * HZ ;
/* Use new timeout only if there wasn't a older one earlier. */
if ( icsk - > icsk_ack . pending & ICSK_ACK_TIMER ) {
/* If delack timer was blocked or is about to expire,
* send ACK now .
*
* FIXME : check the " about to expire " part
*/
if ( icsk - > icsk_ack . blocked ) {
dccp_send_ack ( sk ) ;
return ;
}
if ( ! time_before ( timeout , icsk - > icsk_ack . timeout ) )
timeout = icsk - > icsk_ack . timeout ;
}
icsk - > icsk_ack . pending | = ICSK_ACK_SCHED | ICSK_ACK_TIMER ;
icsk - > icsk_ack . timeout = timeout ;
sk_reset_timer ( sk , & icsk - > icsk_delack_timer , timeout ) ;
}
2005-08-17 10:10:59 +04:00
void dccp_send_sync ( struct sock * sk , const u64 seq ,
const enum dccp_pkt_type pkt_type )
2005-08-10 07:14:34 +04:00
{
/*
* We are not putting this on the write queue , so
* dccp_transmit_skb ( ) will set the ownership to this
* sock .
*/
2006-03-21 09:31:09 +03:00
struct sk_buff * skb = alloc_skb ( sk - > sk_prot - > max_header , GFP_ATOMIC ) ;
2005-08-10 07:14:34 +04:00
if ( skb = = NULL )
/* FIXME: how to make sure the sync is sent? */
return ;
/* Reserve space for headers and prepare control bits. */
2006-03-21 09:31:09 +03:00
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-17 10:10:59 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = pkt_type ;
2005-08-10 07:14:34 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_seq = seq ;
dccp_transmit_skb ( sk , skb ) ;
}
2006-03-21 08:25:11 +03:00
EXPORT_SYMBOL_GPL ( dccp_send_sync ) ;
2005-08-14 03:34:54 +04:00
/*
* Send a DCCP_PKT_CLOSE / CLOSEREQ . The caller locks the socket for us . This
* cannot be allowed to fail queueing a DCCP_PKT_CLOSE / CLOSEREQ frame under
* any circumstances .
2005-08-10 07:14:34 +04:00
*/
2005-08-24 08:50:06 +04:00
void dccp_send_close ( struct sock * sk , const int active )
2005-08-10 07:14:34 +04:00
{
struct dccp_sock * dp = dccp_sk ( sk ) ;
struct sk_buff * skb ;
2005-10-21 11:20:43 +04:00
const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC ;
2005-08-10 07:14:34 +04:00
2005-08-24 08:50:06 +04:00
skb = alloc_skb ( sk - > sk_prot - > max_header , prio ) ;
if ( skb = = NULL )
return ;
2005-08-10 07:14:34 +04:00
/* Reserve space for headers and prepare control bits. */
skb_reserve ( skb , sk - > sk_prot - > max_header ) ;
2005-08-14 03:34:54 +04:00
DCCP_SKB_CB ( skb ) - > dccpd_type = dp - > dccps_role = = DCCP_ROLE_CLIENT ?
DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ ;
2005-08-10 07:14:34 +04:00
2005-08-24 08:50:06 +04:00
if ( active ) {
2006-08-27 06:16:45 +04:00
dccp_write_xmit ( sk , 1 ) ;
2005-10-30 03:20:59 +03:00
dccp_skb_entail ( sk , skb ) ;
2005-08-24 08:50:06 +04:00
dccp_transmit_skb ( sk , skb_clone ( skb , prio ) ) ;
2006-08-27 06:16:45 +04:00
/* FIXME do we need a retransmit timer here? */
2005-08-24 08:50:06 +04:00
} else
dccp_transmit_skb ( sk , skb ) ;
2005-08-10 07:14:34 +04:00
}