2019-05-19 15:08:20 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2005-06-23 23:28:11 +04:00
/*
* H - TCP congestion control . The algorithm is detailed in :
* R . N . Shorten , D . J . Leith :
* " H-TCP: TCP for high-speed and long-distance networks "
* Proc . PFLDnet , Argonne , 2004.
2020-07-06 20:38:50 +03:00
* https : //www.hamilton.ie/net/htcp3.pdf
2005-06-23 23:28:11 +04:00
*/
# include <linux/mm.h>
# include <linux/module.h>
# include <net/tcp.h>
2007-02-13 00:34:03 +03:00
# define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */
# define BETA_MIN (1<<6) /* 0.5 with shift << 7 */
2005-06-23 23:28:11 +04:00
# define BETA_MAX 102 /* 0.8 with shift << 7 */
2007-02-13 00:34:03 +03:00
static int use_rtt_scaling __read_mostly = 1 ;
2005-06-23 23:28:11 +04:00
module_param ( use_rtt_scaling , int , 0644 ) ;
MODULE_PARM_DESC ( use_rtt_scaling , " turn on/off RTT scaling " ) ;
2007-02-13 00:34:03 +03:00
static int use_bandwidth_switch __read_mostly = 1 ;
2005-06-23 23:28:11 +04:00
module_param ( use_bandwidth_switch , int , 0644 ) ;
MODULE_PARM_DESC ( use_bandwidth_switch , " turn on/off bandwidth switcher " ) ;
struct htcp {
2006-10-26 10:05:52 +04:00
u32 alpha ; /* Fixed point arith, << 7 */
2005-06-23 23:28:11 +04:00
u8 beta ; /* Fixed point arith, << 7 */
2007-02-13 00:34:03 +03:00
u8 modeswitch ; /* Delay modeswitch
until we had at least one congestion event */
2006-03-21 09:22:47 +03:00
u16 pkts_acked ;
u32 packetcount ;
2005-06-23 23:28:11 +04:00
u32 minRTT ;
u32 maxRTT ;
2006-11-11 02:01:14 +03:00
u32 last_cong ; /* Time since last congestion event end */
u32 undo_last_cong ;
2005-06-23 23:28:11 +04:00
u32 undo_maxRTT ;
u32 undo_old_maxB ;
/* Bandwidth estimation */
u32 minB ;
u32 maxB ;
u32 old_maxB ;
u32 Bi ;
u32 lasttime ;
} ;
2007-02-13 00:34:03 +03:00
static inline u32 htcp_cong_time ( const struct htcp * ca )
2006-03-21 09:23:10 +03:00
{
return jiffies - ca - > last_cong ;
}
2007-02-13 00:34:03 +03:00
static inline u32 htcp_ccount ( const struct htcp * ca )
2006-03-21 09:23:10 +03:00
{
2007-02-13 00:34:03 +03:00
return htcp_cong_time ( ca ) / ca - > minRTT ;
2006-03-21 09:23:10 +03:00
}
2005-06-23 23:28:11 +04:00
static inline void htcp_reset ( struct htcp * ca )
{
2006-03-21 09:23:10 +03:00
ca - > undo_last_cong = ca - > last_cong ;
2005-06-23 23:28:11 +04:00
ca - > undo_maxRTT = ca - > maxRTT ;
ca - > undo_old_maxB = ca - > old_maxB ;
2006-03-21 09:23:10 +03:00
ca - > last_cong = jiffies ;
2005-06-23 23:28:11 +04:00
}
2005-08-10 11:03:31 +04:00
static u32 htcp_cwnd_undo ( struct sock * sk )
2005-06-23 23:28:11 +04:00
{
2005-08-10 11:03:31 +04:00
struct htcp * ca = inet_csk_ca ( sk ) ;
2007-02-13 00:34:03 +03:00
2008-11-12 12:41:09 +03:00
if ( ca - > undo_last_cong ) {
ca - > last_cong = ca - > undo_last_cong ;
ca - > maxRTT = ca - > undo_maxRTT ;
ca - > old_maxB = ca - > undo_old_maxB ;
ca - > undo_last_cong = 0 ;
}
2007-02-13 00:34:03 +03:00
2017-08-04 06:38:51 +03:00
return tcp_reno_undo_cwnd ( sk ) ;
2005-06-23 23:28:11 +04:00
}
2007-07-26 10:50:28 +04:00
static inline void measure_rtt ( struct sock * sk , u32 srtt )
2005-06-23 23:28:11 +04:00
{
2005-08-10 11:03:31 +04:00
const struct inet_connection_sock * icsk = inet_csk ( sk ) ;
struct htcp * ca = inet_csk_ca ( sk ) ;
2005-06-23 23:28:11 +04:00
/* keep track of minimum RTT seen so far, minRTT is zero at first */
if ( ca - > minRTT > srtt | | ! ca - > minRTT )
ca - > minRTT = srtt ;
/* max RTT */
2007-08-08 05:29:05 +04:00
if ( icsk - > icsk_ca_state = = TCP_CA_Open ) {
2005-06-23 23:28:11 +04:00
if ( ca - > maxRTT < ca - > minRTT )
ca - > maxRTT = ca - > minRTT ;
2009-11-23 21:41:23 +03:00
if ( ca - > maxRTT < srtt & &
srtt < = ca - > maxRTT + msecs_to_jiffies ( 20 ) )
2005-06-23 23:28:11 +04:00
ca - > maxRTT = srtt ;
}
}
2014-08-30 10:32:05 +04:00
static void measure_achieved_throughput ( struct sock * sk ,
2016-05-11 20:02:13 +03:00
const struct ack_sample * sample )
2005-06-23 23:28:11 +04:00
{
2005-08-10 11:03:31 +04:00
const struct inet_connection_sock * icsk = inet_csk ( sk ) ;
const struct tcp_sock * tp = tcp_sk ( sk ) ;
struct htcp * ca = inet_csk_ca ( sk ) ;
2017-05-17 00:00:13 +03:00
u32 now = tcp_jiffies32 ;
2005-06-23 23:28:11 +04:00
2006-03-21 09:22:47 +03:00
if ( icsk - > icsk_ca_state = = TCP_CA_Open )
2016-05-11 20:02:13 +03:00
ca - > pkts_acked = sample - > pkts_acked ;
2006-03-21 09:22:47 +03:00
2016-05-11 20:02:13 +03:00
if ( sample - > rtt_us > 0 )
measure_rtt ( sk , usecs_to_jiffies ( sample - > rtt_us ) ) ;
2007-07-26 10:50:28 +04:00
2006-03-21 09:22:47 +03:00
if ( ! use_bandwidth_switch )
return ;
2005-06-23 23:28:11 +04:00
/* achieved throughput calculations */
2009-02-28 07:44:36 +03:00
if ( ! ( ( 1 < < icsk - > icsk_ca_state ) & ( TCPF_CA_Open | TCPF_CA_Disorder ) ) ) {
2005-06-23 23:28:11 +04:00
ca - > packetcount = 0 ;
ca - > lasttime = now ;
return ;
}
2016-05-11 20:02:13 +03:00
ca - > packetcount + = sample - > pkts_acked ;
2005-06-23 23:28:11 +04:00
2022-04-06 02:35:38 +03:00
if ( ca - > packetcount > = tcp_snd_cwnd ( tp ) - ( ca - > alpha > > 7 ? : 1 ) & &
2009-11-23 21:41:23 +03:00
now - ca - > lasttime > = ca - > minRTT & &
ca - > minRTT > 0 ) {
2007-02-13 00:34:03 +03:00
__u32 cur_Bi = ca - > packetcount * HZ / ( now - ca - > lasttime ) ;
2006-03-21 09:23:10 +03:00
if ( htcp_ccount ( ca ) < = 3 ) {
2005-06-23 23:28:11 +04:00
/* just after backoff */
ca - > minB = ca - > maxB = ca - > Bi = cur_Bi ;
} else {
2007-02-13 00:34:03 +03:00
ca - > Bi = ( 3 * ca - > Bi + cur_Bi ) / 4 ;
2005-06-23 23:28:11 +04:00
if ( ca - > Bi > ca - > maxB )
ca - > maxB = ca - > Bi ;
if ( ca - > minB > ca - > maxB )
ca - > minB = ca - > maxB ;
}
ca - > packetcount = 0 ;
ca - > lasttime = now ;
}
}
static inline void htcp_beta_update ( struct htcp * ca , u32 minRTT , u32 maxRTT )
{
if ( use_bandwidth_switch ) {
u32 maxB = ca - > maxB ;
u32 old_maxB = ca - > old_maxB ;
2014-08-30 10:32:05 +04:00
ca - > old_maxB = ca - > maxB ;
2007-02-13 00:34:03 +03:00
if ( ! between ( 5 * maxB , 4 * old_maxB , 6 * old_maxB ) ) {
2005-06-23 23:28:11 +04:00
ca - > beta = BETA_MIN ;
ca - > modeswitch = 0 ;
return ;
}
}
2006-03-21 09:22:20 +03:00
if ( ca - > modeswitch & & minRTT > msecs_to_jiffies ( 10 ) & & maxRTT ) {
2007-02-13 00:34:03 +03:00
ca - > beta = ( minRTT < < 7 ) / maxRTT ;
2005-06-23 23:28:11 +04:00
if ( ca - > beta < BETA_MIN )
ca - > beta = BETA_MIN ;
else if ( ca - > beta > BETA_MAX )
ca - > beta = BETA_MAX ;
} else {
ca - > beta = BETA_MIN ;
ca - > modeswitch = 1 ;
}
}
static inline void htcp_alpha_update ( struct htcp * ca )
{
u32 minRTT = ca - > minRTT ;
u32 factor = 1 ;
2006-03-21 09:23:10 +03:00
u32 diff = htcp_cong_time ( ca ) ;
2005-06-23 23:28:11 +04:00
if ( diff > HZ ) {
diff - = HZ ;
2007-02-13 00:34:03 +03:00
factor = 1 + ( 10 * diff + ( ( diff / 2 ) * ( diff / 2 ) / HZ ) ) / HZ ;
2005-06-23 23:28:11 +04:00
}
if ( use_rtt_scaling & & minRTT ) {
2007-02-13 00:34:03 +03:00
u32 scale = ( HZ < < 3 ) / ( 10 * minRTT ) ;
/* clamping ratio to interval [0.5,10]<<3 */
scale = min ( max ( scale , 1U < < 2 ) , 10U < < 3 ) ;
factor = ( factor < < 3 ) / scale ;
2005-06-23 23:28:11 +04:00
if ( ! factor )
factor = 1 ;
}
2007-02-13 00:34:03 +03:00
ca - > alpha = 2 * factor * ( ( 1 < < 7 ) - ca - > beta ) ;
2005-06-23 23:28:11 +04:00
if ( ! ca - > alpha )
ca - > alpha = ALPHA_BASE ;
}
2007-02-13 00:34:03 +03:00
/*
* After we have the rtt data to calculate beta , we ' d still prefer to wait one
2005-06-23 23:28:11 +04:00
* rtt before we adjust our beta to ensure we are working from a consistent
* data .
*
* This function should be called when we hit a congestion event since only at
* that point do we really have a real sense of maxRTT ( the queues en route
* were getting just too full now ) .
*/
2005-08-10 11:03:31 +04:00
static void htcp_param_update ( struct sock * sk )
2005-06-23 23:28:11 +04:00
{
2005-08-10 11:03:31 +04:00
struct htcp * ca = inet_csk_ca ( sk ) ;
2005-06-23 23:28:11 +04:00
u32 minRTT = ca - > minRTT ;
u32 maxRTT = ca - > maxRTT ;
htcp_beta_update ( ca , minRTT , maxRTT ) ;
htcp_alpha_update ( ca ) ;
2007-02-13 00:34:03 +03:00
/* add slowly fading memory for maxRTT to accommodate routing changes */
2005-06-23 23:28:11 +04:00
if ( minRTT > 0 & & maxRTT > minRTT )
2007-02-13 00:34:03 +03:00
ca - > maxRTT = minRTT + ( ( maxRTT - minRTT ) * 95 ) / 100 ;
2005-06-23 23:28:11 +04:00
}
2005-08-10 11:03:31 +04:00
static u32 htcp_recalc_ssthresh ( struct sock * sk )
2005-06-23 23:28:11 +04:00
{
2005-08-10 11:03:31 +04:00
const struct tcp_sock * tp = tcp_sk ( sk ) ;
const struct htcp * ca = inet_csk_ca ( sk ) ;
2007-02-13 00:34:03 +03:00
2005-08-10 11:03:31 +04:00
htcp_param_update ( sk ) ;
2022-04-06 02:35:38 +03:00
return max ( ( tcp_snd_cwnd ( tp ) * ca - > beta ) > > 7 , 2U ) ;
2005-06-23 23:28:11 +04:00
}
2014-05-03 08:18:05 +04:00
static void htcp_cong_avoid ( struct sock * sk , u32 ack , u32 acked )
2005-06-23 23:28:11 +04:00
{
2005-08-10 11:03:31 +04:00
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct htcp * ca = inet_csk_ca ( sk ) ;
2005-06-23 23:28:11 +04:00
2014-05-03 08:18:05 +04:00
if ( ! tcp_is_cwnd_limited ( sk ) )
2005-06-23 23:28:11 +04:00
return ;
2015-07-09 23:16:29 +03:00
if ( tcp_in_slow_start ( tp ) )
2013-10-31 22:07:31 +04:00
tcp_slow_start ( tp , acked ) ;
2005-11-11 04:07:24 +03:00
else {
/* In dangerous area, increase slowly.
2005-06-23 23:28:11 +04:00
* In theory this is tp - > snd_cwnd + = alpha / tp - > snd_cwnd
*/
2022-04-06 02:35:38 +03:00
if ( ( tp - > snd_cwnd_cnt * ca - > alpha ) > > 7 > = tcp_snd_cwnd ( tp ) ) {
if ( tcp_snd_cwnd ( tp ) < tp - > snd_cwnd_clamp )
tcp_snd_cwnd_set ( tp , tcp_snd_cwnd ( tp ) + 1 ) ;
2005-06-23 23:28:11 +04:00
tp - > snd_cwnd_cnt = 0 ;
2006-03-21 09:23:10 +03:00
htcp_alpha_update ( ca ) ;
2006-03-21 09:22:47 +03:00
} else
tp - > snd_cwnd_cnt + = ca - > pkts_acked ;
ca - > pkts_acked = 1 ;
2005-06-23 23:28:11 +04:00
}
}
2005-08-10 11:03:31 +04:00
static void htcp_init ( struct sock * sk )
2005-06-23 23:28:11 +04:00
{
2005-08-10 11:03:31 +04:00
struct htcp * ca = inet_csk_ca ( sk ) ;
2005-06-23 23:28:11 +04:00
memset ( ca , 0 , sizeof ( struct htcp ) ) ;
ca - > alpha = ALPHA_BASE ;
ca - > beta = BETA_MIN ;
2006-03-21 09:22:47 +03:00
ca - > pkts_acked = 1 ;
2006-03-21 09:23:10 +03:00
ca - > last_cong = jiffies ;
2005-06-23 23:28:11 +04:00
}
2005-08-10 11:03:31 +04:00
static void htcp_state ( struct sock * sk , u8 new_state )
2005-06-23 23:28:11 +04:00
{
switch ( new_state ) {
2006-03-21 09:23:10 +03:00
case TCP_CA_Open :
{
struct htcp * ca = inet_csk_ca ( sk ) ;
2014-08-30 10:32:05 +04:00
2008-11-12 12:41:09 +03:00
if ( ca - > undo_last_cong ) {
ca - > last_cong = jiffies ;
ca - > undo_last_cong = 0 ;
}
2006-03-21 09:23:10 +03:00
}
break ;
2005-06-23 23:28:11 +04:00
case TCP_CA_CWR :
case TCP_CA_Recovery :
case TCP_CA_Loss :
2005-08-10 11:03:31 +04:00
htcp_reset ( inet_csk_ca ( sk ) ) ;
2005-06-23 23:28:11 +04:00
break ;
}
}
2011-03-10 11:40:17 +03:00
static struct tcp_congestion_ops htcp __read_mostly = {
2005-06-23 23:28:11 +04:00
. init = htcp_init ,
. ssthresh = htcp_recalc_ssthresh ,
. cong_avoid = htcp_cong_avoid ,
. set_state = htcp_state ,
. undo_cwnd = htcp_cwnd_undo ,
. pkts_acked = measure_achieved_throughput ,
. owner = THIS_MODULE ,
. name = " htcp " ,
} ;
static int __init htcp_register ( void )
{
2006-08-26 04:10:33 +04:00
BUILD_BUG_ON ( sizeof ( struct htcp ) > ICSK_CA_PRIV_SIZE ) ;
2005-06-23 23:28:11 +04:00
BUILD_BUG_ON ( BETA_MIN > = BETA_MAX ) ;
return tcp_register_congestion_control ( & htcp ) ;
}
static void __exit htcp_unregister ( void )
{
tcp_unregister_congestion_control ( & htcp ) ;
}
module_init ( htcp_register ) ;
module_exit ( htcp_unregister ) ;
MODULE_AUTHOR ( " Baruch Even " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " H-TCP " ) ;