2007-02-22 11:23:05 +03:00
/*
*
* YeAH TCP
*
* For further details look at :
2013-12-23 10:37:32 +04:00
* https : //web.archive.org/web/20080316215752/http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
2007-02-22 11:23:05 +03:00
*
*/
2007-04-24 09:28:23 +04:00
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/skbuff.h>
# include <linux/inet_diag.h>
2007-02-22 11:23:05 +03:00
2007-04-24 09:28:23 +04:00
# include <net/tcp.h>
2007-02-22 11:23:05 +03:00
2007-04-24 09:28:23 +04:00
# include "tcp_vegas.h"
2007-02-22 11:23:05 +03:00
2013-12-23 10:37:32 +04:00
# define TCP_YEAH_ALPHA 80 /* number of packets queued at the bottleneck */
# define TCP_YEAH_GAMMA 1 /* fraction of queue to be removed per rtt */
# define TCP_YEAH_DELTA 3 /* log minimum fraction of cwnd to be removed on loss */
# define TCP_YEAH_EPSILON 1 /* log maximum fraction to be removed on early decongestion */
# define TCP_YEAH_PHY 8 /* maximum delta from base */
# define TCP_YEAH_RHO 16 /* minimum number of consecutive rtt to consider competition on loss */
# define TCP_YEAH_ZETA 50 /* minimum number of state switches to reset reno_count */
2007-02-22 11:23:05 +03:00
# define TCP_SCALABLE_AI_CNT 100U
/* YeAH variables */
struct yeah {
2007-04-24 09:28:23 +04:00
struct vegas vegas ; /* must be first */
2007-02-22 11:23:05 +03:00
/* YeAH */
u32 lastQ ;
u32 doing_reno_now ;
u32 reno_count ;
u32 fast_count ;
u32 pkts_acked ;
} ;
static void tcp_yeah_init ( struct sock * sk )
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct yeah * yeah = inet_csk_ca ( sk ) ;
tcp_vegas_init ( sk ) ;
yeah - > doing_reno_now = 0 ;
yeah - > lastQ = 0 ;
yeah - > reno_count = 2 ;
/* Ensure the MD arithmetic works. This is somewhat pedantic,
* since I don ' t think we will see a cwnd this large . : ) */
tp - > snd_cwnd_clamp = min_t ( u32 , tp - > snd_cwnd_clamp , 0xffffffff / 128 ) ;
}
2016-05-11 20:02:13 +03:00
static void tcp_yeah_pkts_acked ( struct sock * sk ,
const struct ack_sample * sample )
2007-02-22 11:23:05 +03:00
{
const struct inet_connection_sock * icsk = inet_csk ( sk ) ;
struct yeah * yeah = inet_csk_ca ( sk ) ;
if ( icsk - > icsk_ca_state = = TCP_CA_Open )
2016-05-11 20:02:13 +03:00
yeah - > pkts_acked = sample - > pkts_acked ;
2007-04-24 09:26:16 +04:00
2016-05-11 20:02:13 +03:00
tcp_vegas_pkts_acked ( sk , sample ) ;
2007-02-22 11:23:05 +03:00
}
2014-05-03 08:18:05 +04:00
static void tcp_yeah_cong_avoid ( struct sock * sk , u32 ack , u32 acked )
2007-02-22 11:23:05 +03:00
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct yeah * yeah = inet_csk_ca ( sk ) ;
2014-05-03 08:18:05 +04:00
if ( ! tcp_is_cwnd_limited ( sk ) )
2007-02-22 11:23:05 +03:00
return ;
2016-09-07 20:49:36 +03:00
if ( tcp_in_slow_start ( tp ) )
2013-10-31 22:07:31 +04:00
tcp_slow_start ( tp , acked ) ;
2007-04-24 09:28:23 +04:00
else if ( ! yeah - > doing_reno_now ) {
2007-02-22 11:23:05 +03:00
/* Scalable */
2008-11-03 11:24:34 +03:00
tp - > snd_cwnd_cnt + = yeah - > pkts_acked ;
2014-08-30 10:32:05 +04:00
if ( tp - > snd_cwnd_cnt > min ( tp - > snd_cwnd , TCP_SCALABLE_AI_CNT ) ) {
2007-02-22 11:23:05 +03:00
if ( tp - > snd_cwnd < tp - > snd_cwnd_clamp )
tp - > snd_cwnd + + ;
tp - > snd_cwnd_cnt = 0 ;
}
yeah - > pkts_acked = 1 ;
} else {
/* Reno */
2015-01-29 04:01:35 +03:00
tcp_cong_avoid_ai ( tp , tp - > snd_cwnd , 1 ) ;
2007-02-22 11:23:05 +03:00
}
2007-04-24 09:28:23 +04:00
/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
2007-02-22 11:23:05 +03:00
*
* These are so named because they represent the approximate values
* of snd_una and snd_nxt at the beginning of the current RTT . More
* precisely , they represent the amount of data sent during the RTT .
* At the end of the RTT , when we receive an ACK for v_beg_snd_nxt ,
2007-04-24 09:28:23 +04:00
* we will calculate that ( v_beg_snd_nxt - v_vegas . beg_snd_una ) outstanding
2007-02-22 11:23:05 +03:00
* bytes of data have been ACKed during the course of the RTT , giving
* an " actual " rate of :
*
2007-04-24 09:28:23 +04:00
* ( v_beg_snd_nxt - v_vegas . beg_snd_una ) / ( rtt duration )
2007-02-22 11:23:05 +03:00
*
2007-04-24 09:28:23 +04:00
* Unfortunately , v_vegas . beg_snd_una is not exactly equal to snd_una ,
2007-02-22 11:23:05 +03:00
* because delayed ACKs can cover more than one segment , so they
* don ' t line up yeahly with the boundaries of RTTs .
*
* Another unfortunate fact of life is that delayed ACKs delay the
* advance of the left edge of our send window , so that the number
* of bytes we send in an RTT is often less than our cwnd will allow .
* So we keep track of our cwnd separately , in v_beg_snd_cwnd .
*/
2007-04-24 09:28:23 +04:00
if ( after ( ack , yeah - > vegas . beg_snd_nxt ) ) {
2007-02-22 11:23:05 +03:00
/* We do the Vegas calculations only if we got enough RTT
* samples that we can be reasonably sure that we got
* at least one RTT sample that wasn ' t from a delayed ACK .
* If we only had 2 samples total ,
* then that means we ' re getting only 1 ACK per RTT , which
* means they ' re almost certainly delayed ACKs .
* If we have 3 samples , we should be OK .
*/
2007-04-24 09:28:23 +04:00
if ( yeah - > vegas . cntRTT > 2 ) {
2007-03-07 07:21:20 +03:00
u32 rtt , queue ;
u64 bw ;
2007-02-22 11:23:05 +03:00
/* We have enough RTT samples, so, using the Vegas
* algorithm , we determine if we should increase or
* decrease cwnd , and by how much .
*/
/* Pluck out the RTT we are using for the Vegas
* calculations . This is the min RTT seen during the
* last RTT . Taking the min filters out the effects
* of delayed ACKs , at the cost of noticing congestion
* a bit later .
*/
2007-04-24 09:28:23 +04:00
rtt = yeah - > vegas . minRTT ;
2007-02-22 11:23:05 +03:00
2007-03-07 07:21:20 +03:00
/* Compute excess number of packets above bandwidth
* Avoid doing full 64 bit divide .
*/
bw = tp - > snd_cwnd ;
2007-04-24 09:28:23 +04:00
bw * = rtt - yeah - > vegas . baseRTT ;
2007-03-07 07:21:20 +03:00
do_div ( bw , rtt ) ;
queue = bw ;
if ( queue > TCP_YEAH_ALPHA | |
2007-04-24 09:28:23 +04:00
rtt - yeah - > vegas . baseRTT > ( yeah - > vegas . baseRTT / TCP_YEAH_PHY ) ) {
2009-11-23 21:41:23 +03:00
if ( queue > TCP_YEAH_ALPHA & &
tp - > snd_cwnd > yeah - > reno_count ) {
2007-03-07 07:21:20 +03:00
u32 reduction = min ( queue / TCP_YEAH_GAMMA ,
tp - > snd_cwnd > > TCP_YEAH_EPSILON ) ;
2007-02-22 11:23:05 +03:00
tp - > snd_cwnd - = reduction ;
2007-03-07 07:21:20 +03:00
tp - > snd_cwnd = max ( tp - > snd_cwnd ,
yeah - > reno_count ) ;
2007-02-22 11:23:05 +03:00
tp - > snd_ssthresh = tp - > snd_cwnd ;
2007-03-07 07:21:20 +03:00
}
2007-02-22 11:23:05 +03:00
if ( yeah - > reno_count < = 2 )
2007-03-07 07:21:20 +03:00
yeah - > reno_count = max ( tp - > snd_cwnd > > 1 , 2U ) ;
2007-02-22 11:23:05 +03:00
else
yeah - > reno_count + + ;
2007-03-07 07:21:20 +03:00
yeah - > doing_reno_now = min ( yeah - > doing_reno_now + 1 ,
0xffffffU ) ;
2007-02-22 11:23:05 +03:00
} else {
yeah - > fast_count + + ;
if ( yeah - > fast_count > TCP_YEAH_ZETA ) {
yeah - > reno_count = 2 ;
yeah - > fast_count = 0 ;
}
yeah - > doing_reno_now = 0 ;
}
yeah - > lastQ = queue ;
}
/* Save the extent of the current window so we can use this
* at the end of the next RTT .
*/
2007-04-24 09:28:23 +04:00
yeah - > vegas . beg_snd_una = yeah - > vegas . beg_snd_nxt ;
yeah - > vegas . beg_snd_nxt = tp - > snd_nxt ;
yeah - > vegas . beg_snd_cwnd = tp - > snd_cwnd ;
2007-02-22 11:23:05 +03:00
/* Wipe the slate clean for the next RTT. */
2007-04-24 09:28:23 +04:00
yeah - > vegas . cntRTT = 0 ;
yeah - > vegas . minRTT = 0x7fffffff ;
2007-02-22 11:23:05 +03:00
}
}
2014-08-30 10:32:05 +04:00
static u32 tcp_yeah_ssthresh ( struct sock * sk )
{
2007-02-22 11:23:05 +03:00
const struct tcp_sock * tp = tcp_sk ( sk ) ;
struct yeah * yeah = inet_csk_ca ( sk ) ;
u32 reduction ;
if ( yeah - > doing_reno_now < TCP_YEAH_RHO ) {
reduction = yeah - > lastQ ;
2013-12-23 10:37:27 +04:00
reduction = min ( reduction , max ( tp - > snd_cwnd > > 1 , 2U ) ) ;
2007-02-22 11:23:05 +03:00
2013-12-23 10:37:27 +04:00
reduction = max ( reduction , tp - > snd_cwnd > > TCP_YEAH_DELTA ) ;
2007-02-22 11:23:05 +03:00
} else
2008-11-03 11:24:34 +03:00
reduction = max ( tp - > snd_cwnd > > 1 , 2U ) ;
2007-02-22 11:23:05 +03:00
yeah - > fast_count = 0 ;
yeah - > reno_count = max ( yeah - > reno_count > > 1 , 2U ) ;
2016-01-11 21:42:43 +03:00
return max_t ( int , tp - > snd_cwnd - reduction , 2 ) ;
2007-02-22 11:23:05 +03:00
}
2011-03-10 11:40:17 +03:00
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
2007-02-22 11:23:05 +03:00
. init = tcp_yeah_init ,
. ssthresh = tcp_yeah_ssthresh ,
2017-08-04 06:38:52 +03:00
. undo_cwnd = tcp_reno_undo_cwnd ,
2007-02-22 11:23:05 +03:00
. cong_avoid = tcp_yeah_cong_avoid ,
. set_state = tcp_vegas_state ,
. cwnd_event = tcp_vegas_cwnd_event ,
. get_info = tcp_vegas_get_info ,
. pkts_acked = tcp_yeah_pkts_acked ,
. owner = THIS_MODULE ,
. name = " yeah " ,
} ;
static int __init tcp_yeah_register ( void )
{
BUG_ON ( sizeof ( struct yeah ) > ICSK_CA_PRIV_SIZE ) ;
tcp_register_congestion_control ( & tcp_yeah ) ;
return 0 ;
}
static void __exit tcp_yeah_unregister ( void )
{
tcp_unregister_congestion_control ( & tcp_yeah ) ;
}
module_init ( tcp_yeah_register ) ;
module_exit ( tcp_yeah_unregister ) ;
MODULE_AUTHOR ( " Angelo P. Castellani " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " YeAH TCP " ) ;