2019-05-19 15:08:20 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2005-12-14 10:13:28 +03:00
/*
2008-10-29 07:07:18 +03:00
* TCP CUBIC : Binary Increase Congestion control for TCP v2 .3
2008-03-05 01:17:41 +03:00
* Home page :
* http : //netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
2005-12-14 10:13:28 +03:00
* This is from the implementation of CUBIC TCP in
2008-10-29 07:07:18 +03:00
* Sangtae Ha , Injong Rhee and Lisong Xu ,
* " CUBIC: A New TCP-Friendly High-Speed TCP Variant "
* in ACM SIGOPS Operating System Review , July 2008.
2005-12-14 10:13:28 +03:00
* Available from :
2008-10-29 07:07:18 +03:00
* http : //netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf
*
* CUBIC integrates a new slow start algorithm , called HyStart .
* The details of HyStart are presented in
* Sangtae Ha and Injong Rhee ,
* " Taming the Elephants: New TCP Slow Start " , NCSU TechReport 2008.
* Available from :
* http : //netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf
*
* All testing results are available from :
* http : //netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing
2005-12-14 10:13:28 +03:00
*
* Unless CUBIC is enabled and congestion window is large
* this behaves the same as the original Reno .
*/
# include <linux/mm.h>
# include <linux/module.h>
2008-05-01 15:34:28 +04:00
# include <linux/math64.h>
2005-12-14 10:13:28 +03:00
# include <net/tcp.h>
# define BICTCP_BETA_SCALE 1024 / * Scale factor beta calculation
* max_cwnd = snd_cwnd * beta
*/
# define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
2008-10-29 07:07:18 +03:00
/* Two methods of hybrid slow start */
# define HYSTART_ACK_TRAIN 0x1
# define HYSTART_DELAY 0x2
/* Number of delay samples for detecting the increase of delay */
# define HYSTART_MIN_SAMPLES 8
2019-12-23 23:27:52 +03:00
# define HYSTART_DELAY_MIN (4000U) /* 4 ms */
# define HYSTART_DELAY_MAX (16000U) /* 16 ms */
2008-10-29 07:07:18 +03:00
# define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
2007-02-13 00:15:20 +03:00
static int fast_convergence __read_mostly = 1 ;
2008-03-05 01:17:41 +03:00
static int beta __read_mostly = 717 ; /* = 717/1024 (BICTCP_BETA_SCALE) */
2007-06-13 12:03:53 +04:00
static int initial_ssthresh __read_mostly ;
2007-02-13 00:15:20 +03:00
static int bic_scale __read_mostly = 41 ;
static int tcp_friendliness __read_mostly = 1 ;
2005-12-14 10:13:28 +03:00
2008-10-29 07:07:18 +03:00
static int hystart __read_mostly = 1 ;
static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY ;
static int hystart_low_window __read_mostly = 16 ;
2019-12-23 23:27:52 +03:00
static int hystart_ack_delta_us __read_mostly = 2000 ;
2008-10-29 07:07:18 +03:00
2007-02-13 00:15:20 +03:00
static u32 cube_rtt_scale __read_mostly ;
static u32 beta_scale __read_mostly ;
static u64 cube_factor __read_mostly ;
2005-12-22 06:32:08 +03:00
/* Note parameters that are used for precomputing scale factors are read-only */
2005-12-14 10:13:28 +03:00
module_param ( fast_convergence , int , 0644 ) ;
MODULE_PARM_DESC ( fast_convergence , " turn on/off fast convergence " ) ;
2008-03-05 01:17:41 +03:00
module_param ( beta , int , 0644 ) ;
2005-12-14 10:13:28 +03:00
MODULE_PARM_DESC ( beta , " beta for multiplicative increase " ) ;
module_param ( initial_ssthresh , int , 0644 ) ;
MODULE_PARM_DESC ( initial_ssthresh , " initial value of slow start threshold " ) ;
2005-12-22 06:32:08 +03:00
module_param ( bic_scale , int , 0444 ) ;
2005-12-14 10:13:28 +03:00
MODULE_PARM_DESC ( bic_scale , " scale (scaled by 1024) value for bic function (bic_scale/1024) " ) ;
module_param ( tcp_friendliness , int , 0644 ) ;
MODULE_PARM_DESC ( tcp_friendliness , " turn on/off tcp friendliness " ) ;
2008-10-29 07:07:18 +03:00
module_param ( hystart , int , 0644 ) ;
MODULE_PARM_DESC ( hystart , " turn on/off hybrid slow start algorithm " ) ;
module_param ( hystart_detect , int , 0644 ) ;
2017-04-19 05:22:23 +03:00
MODULE_PARM_DESC ( hystart_detect , " hybrid slow start detection mechanisms "
2008-10-29 07:07:18 +03:00
" 1: packet-train 2: delay 3: both packet-train and delay " ) ;
module_param ( hystart_low_window , int , 0644 ) ;
MODULE_PARM_DESC ( hystart_low_window , " lower bound cwnd for hybrid slow start " ) ;
2019-12-23 23:27:52 +03:00
module_param ( hystart_ack_delta_us , int , 0644 ) ;
MODULE_PARM_DESC ( hystart_ack_delta_us , " spacing between ack's indicating train (usecs) " ) ;
2005-12-14 10:13:28 +03:00
/* BIC TCP Parameters */
struct bictcp {
u32 cnt ; /* increase cwnd by 1 after ACKs */
2014-08-30 10:32:05 +04:00
u32 last_max_cwnd ; /* last maximum snd_cwnd */
2005-12-14 10:13:28 +03:00
u32 last_cwnd ; /* the last snd_cwnd */
u32 last_time ; /* time when updated last_cwnd */
u32 bic_origin_point ; /* origin point of bic function */
2014-08-30 10:32:05 +04:00
u32 bic_K ; /* time to origin point
from the beginning of the current epoch */
2019-12-23 23:27:52 +03:00
u32 delay_min ; /* min delay (usec) */
2005-12-14 10:13:28 +03:00
u32 epoch_start ; /* beginning of an epoch */
u32 ack_cnt ; /* number of acks */
u32 tcp_cwnd ; /* estimated tcp cwnd */
2015-01-29 04:01:38 +03:00
u16 unused ;
2008-10-29 07:07:18 +03:00
u8 sample_cnt ; /* number of samples to decide curr_rtt */
u8 found ; /* the exit point is found? */
u32 round_start ; /* beginning of each round */
u32 end_seq ; /* end_seq of the round */
2011-03-14 10:52:15 +03:00
u32 last_ack ; /* last time when the ACK spacing is close */
2008-10-29 07:07:18 +03:00
u32 curr_rtt ; /* the minimum rtt of current round */
2005-12-14 10:13:28 +03:00
} ;
static inline void bictcp_reset ( struct bictcp * ca )
{
ca - > cnt = 0 ;
ca - > last_max_cwnd = 0 ;
ca - > last_cwnd = 0 ;
ca - > last_time = 0 ;
ca - > bic_origin_point = 0 ;
ca - > bic_K = 0 ;
ca - > delay_min = 0 ;
ca - > epoch_start = 0 ;
ca - > ack_cnt = 0 ;
ca - > tcp_cwnd = 0 ;
2008-10-29 07:07:18 +03:00
ca - > found = 0 ;
}
2019-12-23 23:27:52 +03:00
static inline u32 bictcp_clock_us ( const struct sock * sk )
2011-03-14 10:52:15 +03:00
{
2019-12-23 23:27:52 +03:00
return tcp_sk ( sk ) - > tcp_mstamp ;
2011-03-14 10:52:15 +03:00
}
2008-10-29 07:07:18 +03:00
static inline void bictcp_hystart_reset ( struct sock * sk )
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct bictcp * ca = inet_csk_ca ( sk ) ;
2019-12-23 23:27:52 +03:00
ca - > round_start = ca - > last_ack = bictcp_clock_us ( sk ) ;
2008-10-29 07:07:18 +03:00
ca - > end_seq = tp - > snd_nxt ;
2019-12-23 23:27:51 +03:00
ca - > curr_rtt = ~ 0U ;
2008-10-29 07:07:18 +03:00
ca - > sample_cnt = 0 ;
2005-12-14 10:13:28 +03:00
}
static void bictcp_init ( struct sock * sk )
{
2012-01-18 21:47:59 +04:00
struct bictcp * ca = inet_csk_ca ( sk ) ;
bictcp_reset ( ca ) ;
2008-10-29 07:07:18 +03:00
if ( hystart )
bictcp_hystart_reset ( sk ) ;
if ( ! hystart & & initial_ssthresh )
2005-12-14 10:13:28 +03:00
tcp_sk ( sk ) - > snd_ssthresh = initial_ssthresh ;
}
2015-09-10 07:55:07 +03:00
static void bictcp_cwnd_event ( struct sock * sk , enum tcp_ca_event event )
{
if ( event = = CA_EVENT_TX_START ) {
struct bictcp * ca = inet_csk_ca ( sk ) ;
2017-05-17 00:00:03 +03:00
u32 now = tcp_jiffies32 ;
2015-09-17 18:38:00 +03:00
s32 delta ;
delta = now - tcp_sk ( sk ) - > lsndtime ;
2015-09-10 07:55:07 +03:00
/* We were application limited (idle) for a while.
* Shift epoch_start to keep cwnd growth to cubic curve .
*/
2015-09-17 18:38:00 +03:00
if ( ca - > epoch_start & & delta > 0 ) {
2015-09-10 07:55:07 +03:00
ca - > epoch_start + = delta ;
2015-09-17 18:38:00 +03:00
if ( after ( ca - > epoch_start , now ) )
ca - > epoch_start = now ;
}
2015-09-10 07:55:07 +03:00
return ;
}
}
2007-03-22 22:10:58 +03:00
/* calculate the cubic root of x using a table lookup followed by one
* Newton - Raphson iteration .
* Avg err ~ = 0.195 %
2005-12-14 10:13:28 +03:00
*/
2005-12-22 06:32:36 +03:00
static u32 cubic_root ( u64 a )
2005-12-14 10:13:28 +03:00
{
2007-03-22 22:10:58 +03:00
u32 x , b , shift ;
/*
* cbrt ( x ) MSB values for x MSB values in [ 0. .63 ] .
* Precomputed then refined by hand - Willy Tarreau
*
* For x in [ 0. .63 ] ,
* v = cbrt ( x < < 18 ) - 1
* cbrt ( x ) = ( v [ x ] + 10 ) > > 6
2005-12-22 06:32:36 +03:00
*/
2007-03-22 22:10:58 +03:00
static const u8 v [ ] = {
/* 0x00 */ 0 , 54 , 54 , 54 , 118 , 118 , 118 , 118 ,
/* 0x08 */ 123 , 129 , 134 , 138 , 143 , 147 , 151 , 156 ,
/* 0x10 */ 157 , 161 , 164 , 168 , 170 , 173 , 176 , 179 ,
/* 0x18 */ 181 , 185 , 187 , 190 , 192 , 194 , 197 , 199 ,
/* 0x20 */ 200 , 202 , 204 , 206 , 209 , 211 , 213 , 215 ,
/* 0x28 */ 217 , 219 , 221 , 222 , 224 , 225 , 227 , 229 ,
/* 0x30 */ 231 , 232 , 234 , 236 , 237 , 239 , 240 , 242 ,
/* 0x38 */ 244 , 245 , 246 , 248 , 250 , 251 , 252 , 254 ,
} ;
b = fls64 ( a ) ;
if ( b < 7 ) {
/* a in [0..63] */
return ( ( u32 ) v [ ( u32 ) a ] + 35 ) > > 6 ;
}
b = ( ( b * 84 ) > > 8 ) - 1 ;
shift = ( a > > ( b * 3 ) ) ;
x = ( ( u32 ) ( ( ( u32 ) v [ shift ] + 10 ) < < b ) ) > > 6 ;
/*
* Newton - Raphson iteration
* 2
* x = ( 2 * x + a / x ) / 3
* k + 1 k k
*/
2008-05-01 15:34:28 +04:00
x = ( 2 * x + ( u32 ) div64_u64 ( a , ( u64 ) x * ( u64 ) ( x - 1 ) ) ) ;
2007-03-22 22:10:58 +03:00
x = ( ( x * 341 ) > > 10 ) ;
2005-12-22 06:32:36 +03:00
return x ;
2005-12-14 10:13:28 +03:00
}
/*
* Compute congestion window to use .
*/
2015-01-29 04:01:38 +03:00
static inline void bictcp_update ( struct bictcp * ca , u32 cwnd , u32 acked )
2005-12-14 10:13:28 +03:00
{
2013-08-06 04:10:15 +04:00
u32 delta , bic_target , max_cnt ;
u64 offs , t ;
2005-12-14 10:13:28 +03:00
2015-01-29 04:01:38 +03:00
ca - > ack_cnt + = acked ; /* count the number of ACKed packets */
2005-12-14 10:13:28 +03:00
if ( ca - > last_cwnd = = cwnd & &
2017-05-17 00:00:06 +03:00
( s32 ) ( tcp_jiffies32 - ca - > last_time ) < = HZ / 32 )
2005-12-14 10:13:28 +03:00
return ;
tcp: fix timing issue in CUBIC slope calculation
This patch fixes a bug in CUBIC that causes cwnd to increase slightly
too slowly when multiple ACKs arrive in the same jiffy.
If cwnd is supposed to increase at a rate of more than once per jiffy,
then CUBIC was sometimes too slow. Because the bic_target is
calculated for a future point in time, calculated with time in
jiffies, the cwnd can increase over the course of the jiffy while the
bic_target calculated as the proper CUBIC cwnd at time
t=tcp_time_stamp+rtt does not increase, because tcp_time_stamp only
increases on jiffy tick boundaries.
So since the cnt is set to:
ca->cnt = cwnd / (bic_target - cwnd);
as cwnd increases but bic_target does not increase due to jiffy
granularity, the cnt becomes too large, causing cwnd to increase
too slowly.
For example:
- suppose at the beginning of a jiffy, cwnd=40, bic_target=44
- so CUBIC sets:
ca->cnt = cwnd / (bic_target - cwnd) = 40 / (44 - 40) = 40/4 = 10
- suppose we get 10 acks, each for 1 segment, so tcp_cong_avoid_ai()
increases cwnd to 41
- so CUBIC sets:
ca->cnt = cwnd / (bic_target - cwnd) = 41 / (44 - 41) = 41 / 3 = 13
So now CUBIC will wait for 13 packets to be ACKed before increasing
cwnd to 42, insted of 10 as it should.
The fix is to avoid adjusting the slope (determined by ca->cnt)
multiple times within a jiffy, and instead skip to compute the Reno
cwnd, the "TCP friendliness" code path.
Reported-by: Eyal Perry <eyalpe@mellanox.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-01-29 04:01:39 +03:00
/* The CUBIC function can update ca->cnt at most once per jiffy.
* On all cwnd reduction events , ca - > epoch_start is set to 0 ,
* which will force a recalculation of ca - > cnt .
*/
2017-05-17 00:00:06 +03:00
if ( ca - > epoch_start & & tcp_jiffies32 = = ca - > last_time )
tcp: fix timing issue in CUBIC slope calculation
This patch fixes a bug in CUBIC that causes cwnd to increase slightly
too slowly when multiple ACKs arrive in the same jiffy.
If cwnd is supposed to increase at a rate of more than once per jiffy,
then CUBIC was sometimes too slow. Because the bic_target is
calculated for a future point in time, calculated with time in
jiffies, the cwnd can increase over the course of the jiffy while the
bic_target calculated as the proper CUBIC cwnd at time
t=tcp_time_stamp+rtt does not increase, because tcp_time_stamp only
increases on jiffy tick boundaries.
So since the cnt is set to:
ca->cnt = cwnd / (bic_target - cwnd);
as cwnd increases but bic_target does not increase due to jiffy
granularity, the cnt becomes too large, causing cwnd to increase
too slowly.
For example:
- suppose at the beginning of a jiffy, cwnd=40, bic_target=44
- so CUBIC sets:
ca->cnt = cwnd / (bic_target - cwnd) = 40 / (44 - 40) = 40/4 = 10
- suppose we get 10 acks, each for 1 segment, so tcp_cong_avoid_ai()
increases cwnd to 41
- so CUBIC sets:
ca->cnt = cwnd / (bic_target - cwnd) = 41 / (44 - 41) = 41 / 3 = 13
So now CUBIC will wait for 13 packets to be ACKed before increasing
cwnd to 42, insted of 10 as it should.
The fix is to avoid adjusting the slope (determined by ca->cnt)
multiple times within a jiffy, and instead skip to compute the Reno
cwnd, the "TCP friendliness" code path.
Reported-by: Eyal Perry <eyalpe@mellanox.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-01-29 04:01:39 +03:00
goto tcp_friendliness ;
2005-12-14 10:13:28 +03:00
ca - > last_cwnd = cwnd ;
2017-05-17 00:00:06 +03:00
ca - > last_time = tcp_jiffies32 ;
2005-12-14 10:13:28 +03:00
if ( ca - > epoch_start = = 0 ) {
2017-05-17 00:00:06 +03:00
ca - > epoch_start = tcp_jiffies32 ; /* record beginning */
2015-01-29 04:01:38 +03:00
ca - > ack_cnt = acked ; /* start counting */
2005-12-14 10:13:28 +03:00
ca - > tcp_cwnd = cwnd ; /* syn with cubic */
if ( ca - > last_max_cwnd < = cwnd ) {
ca - > bic_K = 0 ;
ca - > bic_origin_point = cwnd ;
} else {
2005-12-22 06:32:08 +03:00
/* Compute new K based on
* ( wmax - cwnd ) * ( srtt > > 3 / HZ ) / c * 2 ^ ( 3 * bictcp_HZ )
*/
ca - > bic_K = cubic_root ( cube_factor
* ( ca - > last_max_cwnd - cwnd ) ) ;
2005-12-14 10:13:28 +03:00
ca - > bic_origin_point = ca - > last_max_cwnd ;
}
}
2007-02-09 17:24:47 +03:00
/* cubic function - calc*/
/* calculate c * time^3 / rtt,
* while considering overflow in calculation of time ^ 3
2005-12-22 06:32:08 +03:00
* ( so time ^ 3 is done by using 64 bit )
2005-12-14 10:13:28 +03:00
* and without the support of division of 64 bit numbers
2005-12-22 06:32:08 +03:00
* ( so all divisions are done by using 32 bit )
2007-02-09 17:24:47 +03:00
* also NOTE the unit of those veriables
* time = ( t - K ) / 2 ^ bictcp_HZ
* c = bic_scale > > 10
2005-12-14 10:13:28 +03:00
* rtt = ( srtt > > 3 ) / HZ
* ! ! ! The following code does not have overflow problems ,
* if the cwnd < 1 million packets ! ! !
2007-02-09 17:24:47 +03:00
*/
2005-12-14 10:13:28 +03:00
2017-05-17 00:00:06 +03:00
t = ( s32 ) ( tcp_jiffies32 - ca - > epoch_start ) ;
2019-12-23 23:27:52 +03:00
t + = usecs_to_jiffies ( ca - > delay_min ) ;
2005-12-14 10:13:28 +03:00
/* change the unit from HZ to bictcp_HZ */
2013-08-06 04:10:15 +04:00
t < < = BICTCP_HZ ;
do_div ( t , HZ ) ;
2005-12-14 10:13:28 +03:00
2007-02-09 17:24:47 +03:00
if ( t < ca - > bic_K ) /* t - K */
2005-12-22 06:32:08 +03:00
offs = ca - > bic_K - t ;
2007-02-09 17:24:47 +03:00
else
offs = t - ca - > bic_K ;
2005-12-14 10:13:28 +03:00
2005-12-22 06:32:08 +03:00
/* c/rtt * (t-K)^3 */
delta = ( cube_rtt_scale * offs * offs * offs ) > > ( 10 + 3 * BICTCP_HZ ) ;
2014-08-30 10:32:05 +04:00
if ( t < ca - > bic_K ) /* below origin*/
2007-02-09 17:24:47 +03:00
bic_target = ca - > bic_origin_point - delta ;
2014-08-30 10:32:05 +04:00
else /* above origin*/
2007-02-09 17:24:47 +03:00
bic_target = ca - > bic_origin_point + delta ;
2005-12-14 10:13:28 +03:00
2007-02-09 17:24:47 +03:00
/* cubic function - calc bictcp_cnt*/
if ( bic_target > cwnd ) {
2005-12-14 10:13:28 +03:00
ca - > cnt = cwnd / ( bic_target - cwnd ) ;
2007-02-09 17:24:47 +03:00
} else {
ca - > cnt = 100 * cwnd ; /* very small increment*/
}
2005-12-14 10:13:28 +03:00
2011-03-14 10:52:18 +03:00
/*
* The initial growth of cubic function may be too conservative
* when the available bandwidth is still unknown .
*/
2012-01-18 21:47:59 +04:00
if ( ca - > last_max_cwnd = = 0 & & ca - > cnt > 20 )
2011-03-14 10:52:18 +03:00
ca - > cnt = 20 ; /* increase cwnd 5% per RTT */
tcp: fix timing issue in CUBIC slope calculation
This patch fixes a bug in CUBIC that causes cwnd to increase slightly
too slowly when multiple ACKs arrive in the same jiffy.
If cwnd is supposed to increase at a rate of more than once per jiffy,
then CUBIC was sometimes too slow. Because the bic_target is
calculated for a future point in time, calculated with time in
jiffies, the cwnd can increase over the course of the jiffy while the
bic_target calculated as the proper CUBIC cwnd at time
t=tcp_time_stamp+rtt does not increase, because tcp_time_stamp only
increases on jiffy tick boundaries.
So since the cnt is set to:
ca->cnt = cwnd / (bic_target - cwnd);
as cwnd increases but bic_target does not increase due to jiffy
granularity, the cnt becomes too large, causing cwnd to increase
too slowly.
For example:
- suppose at the beginning of a jiffy, cwnd=40, bic_target=44
- so CUBIC sets:
ca->cnt = cwnd / (bic_target - cwnd) = 40 / (44 - 40) = 40/4 = 10
- suppose we get 10 acks, each for 1 segment, so tcp_cong_avoid_ai()
increases cwnd to 41
- so CUBIC sets:
ca->cnt = cwnd / (bic_target - cwnd) = 41 / (44 - 41) = 41 / 3 = 13
So now CUBIC will wait for 13 packets to be ACKed before increasing
cwnd to 42, insted of 10 as it should.
The fix is to avoid adjusting the slope (determined by ca->cnt)
multiple times within a jiffy, and instead skip to compute the Reno
cwnd, the "TCP friendliness" code path.
Reported-by: Eyal Perry <eyalpe@mellanox.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-01-29 04:01:39 +03:00
tcp_friendliness :
2005-12-14 10:13:28 +03:00
/* TCP Friendly */
if ( tcp_friendliness ) {
2005-12-22 06:32:08 +03:00
u32 scale = beta_scale ;
2014-08-30 10:32:05 +04:00
2005-12-22 06:32:08 +03:00
delta = ( cwnd * scale ) > > 3 ;
2007-02-09 17:24:47 +03:00
while ( ca - > ack_cnt > delta ) { /* update tcp cwnd */
ca - > ack_cnt - = delta ;
ca - > tcp_cwnd + + ;
2005-12-14 10:13:28 +03:00
}
2014-08-30 10:32:05 +04:00
if ( ca - > tcp_cwnd > cwnd ) { /* if bic is slower than tcp */
2005-12-22 06:32:08 +03:00
delta = ca - > tcp_cwnd - cwnd ;
max_cnt = cwnd / delta ;
2005-12-14 10:13:28 +03:00
if ( ca - > cnt > max_cnt )
ca - > cnt = max_cnt ;
}
2007-02-09 17:24:47 +03:00
}
2005-12-14 10:13:28 +03:00
2015-03-11 00:17:04 +03:00
/* The maximum rate of cwnd increase CUBIC allows is 1 packet per
* 2 packets ACKed , meaning cwnd grows at 1.5 x per RTT .
*/
ca - > cnt = max ( ca - > cnt , 2U ) ;
2005-12-14 10:13:28 +03:00
}
2014-05-03 08:18:05 +04:00
static void bictcp_cong_avoid ( struct sock * sk , u32 ack , u32 acked )
2005-12-14 10:13:28 +03:00
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct bictcp * ca = inet_csk_ca ( sk ) ;
2014-05-03 08:18:05 +04:00
if ( ! tcp_is_cwnd_limited ( sk ) )
2005-12-14 10:13:28 +03:00
return ;
2015-07-09 23:16:29 +03:00
if ( tcp_in_slow_start ( tp ) ) {
2008-10-29 07:07:18 +03:00
if ( hystart & & after ( ack , ca - > end_seq ) )
bictcp_hystart_reset ( sk ) ;
2015-01-29 04:01:38 +03:00
acked = tcp_slow_start ( tp , acked ) ;
if ( ! acked )
return ;
2005-12-14 10:13:28 +03:00
}
2015-01-29 04:01:38 +03:00
bictcp_update ( ca , tp - > snd_cwnd , acked ) ;
tcp_cong_avoid_ai ( tp , ca - > cnt , acked ) ;
2005-12-14 10:13:28 +03:00
}
static u32 bictcp_recalc_ssthresh ( struct sock * sk )
{
const struct tcp_sock * tp = tcp_sk ( sk ) ;
struct bictcp * ca = inet_csk_ca ( sk ) ;
ca - > epoch_start = 0 ; /* end of epoch */
/* Wmax and fast convergence */
if ( tp - > snd_cwnd < ca - > last_max_cwnd & & fast_convergence )
ca - > last_max_cwnd = ( tp - > snd_cwnd * ( BICTCP_BETA_SCALE + beta ) )
/ ( 2 * BICTCP_BETA_SCALE ) ;
else
ca - > last_max_cwnd = tp - > snd_cwnd ;
return max ( ( tp - > snd_cwnd * beta ) / BICTCP_BETA_SCALE , 2U ) ;
}
static void bictcp_state ( struct sock * sk , u8 new_state )
{
2008-10-29 07:07:18 +03:00
if ( new_state = = TCP_CA_Loss ) {
2005-12-14 10:13:28 +03:00
bictcp_reset ( inet_csk_ca ( sk ) ) ;
2008-10-29 07:07:18 +03:00
bictcp_hystart_reset ( sk ) ;
}
}
2019-12-30 17:06:19 +03:00
/* Account for TSO/GRO delays.
* Otherwise short RTT flows could get too small ssthresh , since during
* slow start we begin with small TSO packets and ca - > delay_min would
* not account for long aggregation delay when TSO packets get bigger .
* Ideally even with a very small RTT we would like to have at least one
* TSO packet being sent and received by GRO , and another one in qdisc layer .
* We apply another 100 % factor because @ rate is doubled at this point .
* We cap the cushion to 1 ms .
*/
static u32 hystart_ack_delay ( struct sock * sk )
{
unsigned long rate ;
rate = READ_ONCE ( sk - > sk_pacing_rate ) ;
if ( ! rate )
return 0 ;
return min_t ( u64 , USEC_PER_MSEC ,
div64_ul ( ( u64 ) GSO_MAX_SIZE * 4 * USEC_PER_SEC , rate ) ) ;
}
2008-10-29 07:07:18 +03:00
static void hystart_update ( struct sock * sk , u32 delay )
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct bictcp * ca = inet_csk_ca ( sk ) ;
2019-12-23 23:27:54 +03:00
u32 threshold ;
2008-10-29 07:07:18 +03:00
2014-12-05 03:13:23 +03:00
if ( hystart_detect & HYSTART_ACK_TRAIN ) {
2019-12-23 23:27:52 +03:00
u32 now = bictcp_clock_us ( sk ) ;
2008-10-29 07:07:18 +03:00
/* first detection parameter - ack-train detection */
2019-12-23 23:27:52 +03:00
if ( ( s32 ) ( now - ca - > last_ack ) < = hystart_ack_delta_us ) {
2011-03-14 10:52:15 +03:00
ca - > last_ack = now ;
2019-12-23 23:27:54 +03:00
2019-12-30 17:06:19 +03:00
threshold = ca - > delay_min + hystart_ack_delay ( sk ) ;
2019-12-23 23:27:54 +03:00
/* Hystart ack train triggers if we get ack past
* ca - > delay_min / 2.
* Pacing might have delayed packets up to RTT / 2
* during slow start .
*/
if ( sk - > sk_pacing_status = = SK_PACING_NONE )
threshold > > = 1 ;
if ( ( s32 ) ( now - ca - > round_start ) > threshold ) {
2019-12-23 23:27:50 +03:00
ca - > found = 1 ;
2019-12-30 17:06:19 +03:00
pr_debug ( " hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u \n " ,
now - ca - > round_start , threshold ,
ca - > delay_min , hystart_ack_delay ( sk ) , tp - > snd_cwnd ) ;
2016-04-30 00:16:47 +03:00
NET_INC_STATS ( sock_net ( sk ) ,
LINUX_MIB_TCPHYSTARTTRAINDETECT ) ;
NET_ADD_STATS ( sock_net ( sk ) ,
LINUX_MIB_TCPHYSTARTTRAINCWND ,
tp - > snd_cwnd ) ;
2014-12-05 03:13:23 +03:00
tp - > snd_ssthresh = tp - > snd_cwnd ;
}
2008-10-29 07:07:18 +03:00
}
2014-12-05 03:13:23 +03:00
}
2008-10-29 07:07:18 +03:00
2014-12-05 03:13:23 +03:00
if ( hystart_detect & HYSTART_DELAY ) {
2008-10-29 07:07:18 +03:00
/* obtain the minimum delay of more than sampling packets */
if ( ca - > sample_cnt < HYSTART_MIN_SAMPLES ) {
2019-12-23 23:27:51 +03:00
if ( ca - > curr_rtt > delay )
2008-10-29 07:07:18 +03:00
ca - > curr_rtt = delay ;
ca - > sample_cnt + + ;
} else {
if ( ca - > curr_rtt > ca - > delay_min +
2014-12-05 03:13:49 +03:00
HYSTART_DELAY_THRESH ( ca - > delay_min > > 3 ) ) {
2019-12-23 23:27:50 +03:00
ca - > found = 1 ;
2016-04-30 00:16:47 +03:00
NET_INC_STATS ( sock_net ( sk ) ,
LINUX_MIB_TCPHYSTARTDELAYDETECT ) ;
NET_ADD_STATS ( sock_net ( sk ) ,
LINUX_MIB_TCPHYSTARTDELAYCWND ,
tp - > snd_cwnd ) ;
2014-12-05 03:13:23 +03:00
tp - > snd_ssthresh = tp - > snd_cwnd ;
}
2008-10-29 07:07:18 +03:00
}
}
2005-12-14 10:13:28 +03:00
}
2016-05-11 20:02:13 +03:00
static void bictcp_acked ( struct sock * sk , const struct ack_sample * sample )
2005-12-14 10:13:28 +03:00
{
2008-10-29 07:07:18 +03:00
const struct tcp_sock * tp = tcp_sk ( sk ) ;
2007-07-26 10:50:06 +04:00
struct bictcp * ca = inet_csk_ca ( sk ) ;
u32 delay ;
2005-12-14 10:13:28 +03:00
2007-07-26 10:50:06 +04:00
/* Some calls are for duplicates without timetamps */
2016-05-11 20:02:13 +03:00
if ( sample - > rtt_us < 0 )
2007-07-26 10:50:06 +04:00
return ;
/* Discard delay samples right after fast recovery */
2017-05-17 00:00:06 +03:00
if ( ca - > epoch_start & & ( s32 ) ( tcp_jiffies32 - ca - > epoch_start ) < HZ )
2007-07-26 10:50:06 +04:00
return ;
2019-12-23 23:27:52 +03:00
delay = sample - > rtt_us ;
2007-07-26 10:50:06 +04:00
if ( delay = = 0 )
delay = 1 ;
/* first time call or link delay decreases */
2019-12-30 17:06:19 +03:00
if ( ca - > delay_min = = 0 | | ca - > delay_min > delay )
ca - > delay_min = delay ;
2008-10-29 07:07:18 +03:00
/* hystart triggers when cwnd is larger than some threshold */
2019-12-30 17:06:19 +03:00
if ( ! ca - > found & & tcp_in_slow_start ( tp ) & & hystart & &
2008-10-29 07:07:18 +03:00
tp - > snd_cwnd > = hystart_low_window )
hystart_update ( sk , delay ) ;
2007-07-26 10:50:06 +04:00
}
2005-12-14 10:13:28 +03:00
2011-03-10 11:40:17 +03:00
static struct tcp_congestion_ops cubictcp __read_mostly = {
2005-12-14 10:13:28 +03:00
. init = bictcp_init ,
. ssthresh = bictcp_recalc_ssthresh ,
. cong_avoid = bictcp_cong_avoid ,
. set_state = bictcp_state ,
2017-08-04 06:38:52 +03:00
. undo_cwnd = tcp_reno_undo_cwnd ,
2015-09-10 07:55:07 +03:00
. cwnd_event = bictcp_cwnd_event ,
2005-12-14 10:13:28 +03:00
. pkts_acked = bictcp_acked ,
. owner = THIS_MODULE ,
. name = " cubic " ,
} ;
static int __init cubictcp_register ( void )
{
2006-08-26 04:10:33 +04:00
BUILD_BUG_ON ( sizeof ( struct bictcp ) > ICSK_CA_PRIV_SIZE ) ;
2005-12-22 06:32:08 +03:00
/* Precompute a bunch of the scaling factors that are used per-packet
* based on SRTT of 100 ms
*/
2014-08-30 10:32:05 +04:00
beta_scale = 8 * ( BICTCP_BETA_SCALE + beta ) / 3
/ ( BICTCP_BETA_SCALE - beta ) ;
2005-12-22 06:32:08 +03:00
2006-10-26 10:04:12 +04:00
cube_rtt_scale = ( bic_scale * 10 ) ; /* 1024*c/rtt */
2005-12-22 06:32:08 +03:00
/* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
* so K = cubic_root ( ( wmax - cwnd ) * rtt / c )
* the unit of K is bictcp_HZ = 2 ^ 10 , not HZ
*
* c = bic_scale > > 10
* rtt = 100 ms
*
* the following code has been designed and tested for
* cwnd < 1 million packets
* RTT < 100 seconds
* HZ < 1 , 000 , 00 ( corresponding to 10 nano - second )
*/
/* 1/c * 2^2*bictcp_HZ * srtt */
cube_factor = 1ull < < ( 10 + 3 * BICTCP_HZ ) ; /* 2^40 */
/* divide by bic_scale and by constant Srtt (100ms) */
do_div ( cube_factor , bic_scale * 10 ) ;
2005-12-14 10:13:28 +03:00
return tcp_register_congestion_control ( & cubictcp ) ;
}
static void __exit cubictcp_unregister ( void )
{
tcp_unregister_congestion_control ( & cubictcp ) ;
}
module_init ( cubictcp_register ) ;
module_exit ( cubictcp_unregister ) ;
MODULE_AUTHOR ( " Sangtae Ha, Stephen Hemminger " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " CUBIC TCP " ) ;
2008-10-29 07:07:18 +03:00
MODULE_VERSION ( " 2.3 " ) ;