2005-06-23 23:24:09 +04:00
/*
2006-06-12 10:01:39 +04:00
* TCP Westwood + : end - to - end bandwidth estimation for TCP
2005-06-23 23:24:09 +04:00
*
2006-06-12 10:01:39 +04:00
* Angelo Dell ' Aera : author of the first version of TCP Westwood + in Linux 2.4
*
* Support at http : //c3lab.poliba.it/index.php/Westwood
* Main references in literature :
*
* - Mascolo S , Casetti , M . Gerla et al .
* " TCP Westwood: bandwidth estimation for TCP " Proc . ACM Mobicom 2001
*
* - A . Grieco , s . Mascolo
* " Performance evaluation of New Reno, Vegas, Westwood+ TCP " ACM Computer
* Comm . Review , 2004
*
* - A . Dell ' Aera , L . Grieco , S . Mascolo .
* " Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
* A Performance Evaluation Over the Internet " (ICC 2004), Paris, June 2004
*
* Westwood + employs end - to - end bandwidth measurement to set cwnd and
* ssthresh after packet loss . The probing phase is as the original Reno .
2005-06-23 23:24:09 +04:00
*/
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/skbuff.h>
2005-08-12 19:56:38 +04:00
# include <linux/inet_diag.h>
2005-06-23 23:24:09 +04:00
# include <net/tcp.h>
/* TCP Westwood structure */
struct westwood {
u32 bw_ns_est ; /* first bandwidth estimation..not too smoothed 8) */
u32 bw_est ; /* bandwidth estimate */
u32 rtt_win_sx ; /* here starts a new evaluation... */
u32 bk ;
u32 snd_una ; /* used for evaluating the number of acked bytes */
u32 cumul_ack ;
u32 accounted ;
u32 rtt ;
u32 rtt_min ; /* minimum observed RTT */
2006-06-12 10:01:02 +04:00
u8 first_ack ; /* flag which infers that this is the first ack */
2006-06-12 10:02:19 +04:00
u8 reset_rtt_min ; /* Reset RTT min to next RTT sample*/
2005-06-23 23:24:09 +04:00
} ;
/* TCP Westwood functions and constants */
# define TCP_WESTWOOD_RTT_MIN (HZ / 20) /* 50ms */
# define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
/*
* @ tcp_westwood_create
* This function initializes fields used in TCP Westwood + ,
* it is called after the initial SYN , so the sequence numbers
* are correct but new passive connections we have no
* information about RTTmin at this time so we simply set it to
* TCP_WESTWOOD_INIT_RTT . This value was chosen to be too conservative
* since in this way we ' re sure it will be updated in a consistent
* way as soon as possible . It will reasonably happen within the first
* RTT period of the connection lifetime .
*/
2005-08-10 11:03:31 +04:00
static void tcp_westwood_init ( struct sock * sk )
2005-06-23 23:24:09 +04:00
{
2005-08-10 11:03:31 +04:00
struct westwood * w = inet_csk_ca ( sk ) ;
2005-06-23 23:24:09 +04:00
w - > bk = 0 ;
2007-02-09 17:24:47 +03:00
w - > bw_ns_est = 0 ;
w - > bw_est = 0 ;
w - > accounted = 0 ;
w - > cumul_ack = 0 ;
2006-06-12 10:02:19 +04:00
w - > reset_rtt_min = 1 ;
2005-06-23 23:24:09 +04:00
w - > rtt_min = w - > rtt = TCP_WESTWOOD_INIT_RTT ;
w - > rtt_win_sx = tcp_time_stamp ;
2005-08-10 11:03:31 +04:00
w - > snd_una = tcp_sk ( sk ) - > snd_una ;
2006-06-12 10:01:02 +04:00
w - > first_ack = 1 ;
2005-06-23 23:24:09 +04:00
}
/*
* @ westwood_do_filter
* Low - pass filter . Implemented using constant coefficients .
*/
static inline u32 westwood_do_filter ( u32 a , u32 b )
{
2010-09-23 00:43:57 +04:00
return ( ( 7 * a ) + b ) > > 3 ;
2005-06-23 23:24:09 +04:00
}
2006-06-12 10:01:59 +04:00
static void westwood_filter ( struct westwood * w , u32 delta )
2005-06-23 23:24:09 +04:00
{
2006-06-12 10:01:59 +04:00
/* If the filter is empty fill it with the first sample of bandwidth */
if ( w - > bw_ns_est = = 0 & & w - > bw_est = = 0 ) {
w - > bw_ns_est = w - > bk / delta ;
w - > bw_est = w - > bw_ns_est ;
} else {
w - > bw_ns_est = westwood_do_filter ( w - > bw_ns_est , w - > bk / delta ) ;
w - > bw_est = westwood_do_filter ( w - > bw_est , w - > bw_ns_est ) ;
}
2005-06-23 23:24:09 +04:00
}
/*
* @ westwood_pkts_acked
* Called after processing group of packets .
* but all westwood needs is the last sample of srtt .
*/
2007-07-26 10:49:34 +04:00
static void tcp_westwood_pkts_acked ( struct sock * sk , u32 cnt , s32 rtt )
2005-06-23 23:24:09 +04:00
{
2005-08-10 11:03:31 +04:00
struct westwood * w = inet_csk_ca ( sk ) ;
2007-07-26 10:49:34 +04:00
if ( rtt > 0 )
w - > rtt = usecs_to_jiffies ( rtt ) ;
2005-06-23 23:24:09 +04:00
}
/*
* @ westwood_update_window
* It updates RTT evaluation window if it is the right moment to do
* it . If so it calls filter for evaluating bandwidth .
*/
2005-08-10 11:03:31 +04:00
static void westwood_update_window ( struct sock * sk )
2005-06-23 23:24:09 +04:00
{
2005-08-10 11:03:31 +04:00
struct westwood * w = inet_csk_ca ( sk ) ;
2005-06-23 23:24:09 +04:00
s32 delta = tcp_time_stamp - w - > rtt_win_sx ;
2006-06-12 10:01:39 +04:00
/* Initialize w->snd_una with the first acked sequence number in order
2006-06-12 10:01:02 +04:00
* to fix mismatch between tp - > snd_una and w - > snd_una for the first
* bandwidth sample
*/
2007-02-09 17:24:47 +03:00
if ( w - > first_ack ) {
2006-06-12 10:01:02 +04:00
w - > snd_una = tcp_sk ( sk ) - > snd_una ;
w - > first_ack = 0 ;
}
2005-06-23 23:24:09 +04:00
/*
* See if a RTT - window has passed .
* Be careful since if RTT is less than
* 50 ms we don ' t filter but we continue ' building the sample ' .
* This minimum limit was chosen since an estimation on small
* time intervals is better to avoid . . .
* Obviously on a LAN we reasonably will always have
* right_bound = left_bound + WESTWOOD_RTT_MIN
*/
if ( w - > rtt & & delta > max_t ( u32 , w - > rtt , TCP_WESTWOOD_RTT_MIN ) ) {
westwood_filter ( w , delta ) ;
w - > bk = 0 ;
w - > rtt_win_sx = tcp_time_stamp ;
}
}
2006-06-12 10:02:19 +04:00
static inline void update_rtt_min ( struct westwood * w )
{
if ( w - > reset_rtt_min ) {
w - > rtt_min = w - > rtt ;
2007-02-09 17:24:47 +03:00
w - > reset_rtt_min = 0 ;
2006-06-12 10:02:19 +04:00
} else
w - > rtt_min = min ( w - > rtt , w - > rtt_min ) ;
}
2005-06-23 23:24:09 +04:00
/*
* @ westwood_fast_bw
* It is called when we are in fast path . In particular it is called when
* header prediction is successful . In such case in fact update is
* straight forward and doesn ' t need any particular care .
*/
2005-08-10 11:03:31 +04:00
static inline void westwood_fast_bw ( struct sock * sk )
2005-06-23 23:24:09 +04:00
{
2005-08-10 11:03:31 +04:00
const struct tcp_sock * tp = tcp_sk ( sk ) ;
struct westwood * w = inet_csk_ca ( sk ) ;
2005-06-23 23:24:09 +04:00
2005-08-10 11:03:31 +04:00
westwood_update_window ( sk ) ;
2005-06-23 23:24:09 +04:00
w - > bk + = tp - > snd_una - w - > snd_una ;
w - > snd_una = tp - > snd_una ;
2006-06-12 10:02:19 +04:00
update_rtt_min ( w ) ;
2005-06-23 23:24:09 +04:00
}
/*
* @ westwood_acked_count
* This function evaluates cumul_ack for evaluating bk in case of
* delayed or partial acks .
*/
2005-08-10 11:03:31 +04:00
static inline u32 westwood_acked_count ( struct sock * sk )
2005-06-23 23:24:09 +04:00
{
2005-08-10 11:03:31 +04:00
const struct tcp_sock * tp = tcp_sk ( sk ) ;
struct westwood * w = inet_csk_ca ( sk ) ;
2005-06-23 23:24:09 +04:00
w - > cumul_ack = tp - > snd_una - w - > snd_una ;
2007-02-09 17:24:47 +03:00
/* If cumul_ack is 0 this is a dupack since it's not moving
* tp - > snd_una .
*/
if ( ! w - > cumul_ack ) {
2005-06-23 23:24:09 +04:00
w - > accounted + = tp - > mss_cache ;
w - > cumul_ack = tp - > mss_cache ;
}
2007-02-09 17:24:47 +03:00
if ( w - > cumul_ack > tp - > mss_cache ) {
2005-06-23 23:24:09 +04:00
/* Partial or delayed ack */
if ( w - > accounted > = w - > cumul_ack ) {
w - > accounted - = w - > cumul_ack ;
w - > cumul_ack = tp - > mss_cache ;
} else {
w - > cumul_ack - = w - > accounted ;
w - > accounted = 0 ;
}
}
w - > snd_una = tp - > snd_una ;
return w - > cumul_ack ;
}
/*
* TCP Westwood
* Here limit is evaluated as Bw estimation * RTTmin ( for obtaining it
* in packets we use mss_cache ) . Rttmin is guaranteed to be > = 2
* so avoids ever returning 0.
*/
2006-06-06 04:30:08 +04:00
static u32 tcp_westwood_bw_rttmin ( const struct sock * sk )
2005-06-23 23:24:09 +04:00
{
2006-06-06 04:30:08 +04:00
const struct tcp_sock * tp = tcp_sk ( sk ) ;
const struct westwood * w = inet_csk_ca ( sk ) ;
2014-08-30 10:32:05 +04:00
2006-06-06 04:30:08 +04:00
return max_t ( u32 , ( w - > bw_est * w - > rtt_min ) / tp - > mss_cache , 2 ) ;
2005-06-23 23:24:09 +04:00
}
2005-08-10 11:03:31 +04:00
static void tcp_westwood_event ( struct sock * sk , enum tcp_ca_event event )
2005-06-23 23:24:09 +04:00
{
2005-08-10 11:03:31 +04:00
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct westwood * w = inet_csk_ca ( sk ) ;
2006-06-12 10:01:39 +04:00
2007-03-09 07:45:19 +03:00
switch ( event ) {
2005-06-23 23:24:09 +04:00
case CA_EVENT_FAST_ACK :
2005-08-10 11:03:31 +04:00
westwood_fast_bw ( sk ) ;
2005-06-23 23:24:09 +04:00
break ;
case CA_EVENT_COMPLETE_CWR :
2006-06-06 04:30:08 +04:00
tp - > snd_cwnd = tp - > snd_ssthresh = tcp_westwood_bw_rttmin ( sk ) ;
2005-06-23 23:24:09 +04:00
break ;
2013-03-20 17:32:58 +04:00
case CA_EVENT_LOSS :
2006-06-06 04:30:08 +04:00
tp - > snd_ssthresh = tcp_westwood_bw_rttmin ( sk ) ;
2007-02-09 17:24:47 +03:00
/* Update RTT_min when next ack arrives */
2006-06-12 10:02:19 +04:00
w - > reset_rtt_min = 1 ;
2005-06-23 23:24:09 +04:00
break ;
case CA_EVENT_SLOW_ACK :
2005-08-10 11:03:31 +04:00
westwood_update_window ( sk ) ;
w - > bk + = westwood_acked_count ( sk ) ;
2006-06-12 10:02:19 +04:00
update_rtt_min ( w ) ;
2005-06-23 23:24:09 +04:00
break ;
default :
/* don't care */
break ;
}
}
/* Extract info for Tcp socket info provided via netlink. */
2005-08-10 11:03:31 +04:00
static void tcp_westwood_info ( struct sock * sk , u32 ext ,
2005-06-23 23:24:09 +04:00
struct sk_buff * skb )
{
2005-08-10 11:03:31 +04:00
const struct westwood * ca = inet_csk_ca ( sk ) ;
2014-08-30 10:32:05 +04:00
2005-08-12 19:51:49 +04:00
if ( ext & ( 1 < < ( INET_DIAG_VEGASINFO - 1 ) ) ) {
2007-03-23 09:27:19 +03:00
struct tcpvegas_info info = {
. tcpv_enabled = 1 ,
. tcpv_rtt = jiffies_to_usecs ( ca - > rtt ) ,
. tcpv_minrtt = jiffies_to_usecs ( ca - > rtt_min ) ,
} ;
nla_put ( skb , INET_DIAG_VEGASINFO , sizeof ( info ) , & info ) ;
2005-06-23 23:24:09 +04:00
}
}
2011-03-10 11:40:17 +03:00
static struct tcp_congestion_ops tcp_westwood __read_mostly = {
2005-06-23 23:24:09 +04:00
. init = tcp_westwood_init ,
. ssthresh = tcp_reno_ssthresh ,
. cong_avoid = tcp_reno_cong_avoid ,
. cwnd_event = tcp_westwood_event ,
. get_info = tcp_westwood_info ,
. pkts_acked = tcp_westwood_pkts_acked ,
. owner = THIS_MODULE ,
. name = " westwood "
} ;
static int __init tcp_westwood_register ( void )
{
2006-08-26 04:10:33 +04:00
BUILD_BUG_ON ( sizeof ( struct westwood ) > ICSK_CA_PRIV_SIZE ) ;
2005-06-23 23:24:09 +04:00
return tcp_register_congestion_control ( & tcp_westwood ) ;
}
static void __exit tcp_westwood_unregister ( void )
{
tcp_unregister_congestion_control ( & tcp_westwood ) ;
}
module_init ( tcp_westwood_register ) ;
module_exit ( tcp_westwood_unregister ) ;
MODULE_AUTHOR ( " Stephen Hemminger, Angelo Dell'Aera " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " TCP Westwood+ " ) ;