2019-05-19 15:08:20 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2006-06-06 04:28:30 +04:00
/*
* TCP Veno congestion control
*
* This is based on the congestion detection / avoidance scheme described in
* C . P . Fu , S . C . Liew .
* " TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks. "
* IEEE Journal on Selected Areas in Communication ,
* Feb . 2003.
2020-07-06 20:38:50 +03:00
* See https : //www.ie.cuhk.edu.hk/fileadmin/staff_upload/soung/Journal/J3.pdf
2006-06-06 04:28:30 +04:00
*/
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/skbuff.h>
# include <linux/inet_diag.h>
# include <net/tcp.h>
/* Default values of the Veno variables, in fixed-point representation
* with V_PARAM_SHIFT bits to the right of the binary point .
*/
# define V_PARAM_SHIFT 1
static const int beta = 3 < < V_PARAM_SHIFT ;
/* Veno variables */
struct veno {
u8 doing_veno_now ; /* if true, do veno for this rtt */
u16 cntrtt ; /* # of rtts measured within last rtt */
u32 minrtt ; /* min of rtts measured within last rtt (in usec) */
u32 basertt ; /* the min of all Veno rtt measurements seen (in usec) */
u32 inc ; /* decide whether to increase cwnd */
u32 diff ; /* calculate the diff rate */
} ;
/* There are several situations when we must "re-start" Veno:
*
* o when a connection is established
* o after an RTO
* o after fast recovery
* o when we send a packet and there is no outstanding
* unacknowledged data ( restarting an idle connection )
*
*/
static inline void veno_enable ( struct sock * sk )
{
struct veno * veno = inet_csk_ca ( sk ) ;
/* turn on Veno */
veno - > doing_veno_now = 1 ;
veno - > minrtt = 0x7fffffff ;
}
static inline void veno_disable ( struct sock * sk )
{
struct veno * veno = inet_csk_ca ( sk ) ;
/* turn off Veno */
veno - > doing_veno_now = 0 ;
}
static void tcp_veno_init ( struct sock * sk )
{
struct veno * veno = inet_csk_ca ( sk ) ;
veno - > basertt = 0x7fffffff ;
veno - > inc = 1 ;
veno_enable ( sk ) ;
}
/* Do rtt sampling needed for Veno. */
2016-05-11 20:02:13 +03:00
static void tcp_veno_pkts_acked ( struct sock * sk ,
const struct ack_sample * sample )
2006-06-06 04:28:30 +04:00
{
struct veno * veno = inet_csk_ca ( sk ) ;
2007-04-24 09:26:16 +04:00
u32 vrtt ;
2016-05-11 20:02:13 +03:00
if ( sample - > rtt_us < 0 )
2007-06-16 02:08:43 +04:00
return ;
2007-04-24 09:26:16 +04:00
/* Never allow zero rtt or baseRTT */
2016-05-11 20:02:13 +03:00
vrtt = sample - > rtt_us + 1 ;
2006-06-06 04:28:30 +04:00
/* Filter to find propagation delay: */
if ( vrtt < veno - > basertt )
veno - > basertt = vrtt ;
/* Find the min rtt during the last rtt to find
* the current prop . delay + queuing delay :
*/
veno - > minrtt = min ( veno - > minrtt , vrtt ) ;
veno - > cntrtt + + ;
}
static void tcp_veno_state ( struct sock * sk , u8 ca_state )
{
if ( ca_state = = TCP_CA_Open )
veno_enable ( sk ) ;
else
veno_disable ( sk ) ;
}
/*
* If the connection is idle and we are restarting ,
* then we don ' t want to do any Veno calculations
* until we get fresh rtt samples . So when we
* restart , we reset our Veno state to a clean
* state . After we get acks for this flight of
* packets , _then_ we can make Veno calculations
* again .
*/
static void tcp_veno_cwnd_event ( struct sock * sk , enum tcp_ca_event event )
{
if ( event = = CA_EVENT_CWND_RESTART | | event = = CA_EVENT_TX_START )
tcp_veno_init ( sk ) ;
}
2014-05-03 08:18:05 +04:00
static void tcp_veno_cong_avoid ( struct sock * sk , u32 ack , u32 acked )
2006-06-06 04:28:30 +04:00
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct veno * veno = inet_csk_ca ( sk ) ;
2008-05-01 13:47:38 +04:00
if ( ! veno - > doing_veno_now ) {
2014-05-03 08:18:05 +04:00
tcp_reno_cong_avoid ( sk , ack , acked ) ;
2008-05-01 13:47:38 +04:00
return ;
}
2006-06-06 04:28:30 +04:00
/* limited by applications */
2014-05-03 08:18:05 +04:00
if ( ! tcp_is_cwnd_limited ( sk ) )
2006-06-06 04:28:30 +04:00
return ;
/* We do the Veno calculations only if we got enough rtt samples */
if ( veno - > cntrtt < = 2 ) {
/* We don't have enough rtt samples to do the Veno
* calculation , so we ' ll behave like Reno .
*/
2014-05-03 08:18:05 +04:00
tcp_reno_cong_avoid ( sk , ack , acked ) ;
2006-06-06 04:28:30 +04:00
} else {
2008-04-30 12:04:03 +04:00
u64 target_cwnd ;
u32 rtt ;
2006-06-06 04:28:30 +04:00
/* We have enough rtt samples, so, using the Veno
* algorithm , we determine the state of the network .
*/
rtt = veno - > minrtt ;
2022-04-06 02:35:38 +03:00
target_cwnd = ( u64 ) tcp_snd_cwnd ( tp ) * veno - > basertt ;
2008-04-30 12:04:03 +04:00
target_cwnd < < = V_PARAM_SHIFT ;
do_div ( target_cwnd , rtt ) ;
2006-06-06 04:28:30 +04:00
2022-04-06 02:35:38 +03:00
veno - > diff = ( tcp_snd_cwnd ( tp ) < < V_PARAM_SHIFT ) - target_cwnd ;
2006-06-06 04:28:30 +04:00
2015-07-09 23:16:29 +03:00
if ( tcp_in_slow_start ( tp ) ) {
2020-03-16 09:35:09 +03:00
/* Slow start. */
2020-03-16 09:35:10 +03:00
acked = tcp_slow_start ( tp , acked ) ;
if ( ! acked )
goto done ;
2020-03-16 09:35:09 +03:00
}
/* Congestion avoidance. */
if ( veno - > diff < beta ) {
/* In the "non-congestive state", increase cwnd
* every rtt .
*/
2022-04-06 02:35:38 +03:00
tcp_cong_avoid_ai ( tp , tcp_snd_cwnd ( tp ) , acked ) ;
2006-06-06 04:28:30 +04:00
} else {
2020-03-16 09:35:09 +03:00
/* In the "congestive state", increase cwnd
* every other rtt .
*/
2022-04-06 02:35:38 +03:00
if ( tp - > snd_cwnd_cnt > = tcp_snd_cwnd ( tp ) ) {
2020-03-16 09:35:09 +03:00
if ( veno - > inc & &
2022-04-06 02:35:38 +03:00
tcp_snd_cwnd ( tp ) < tp - > snd_cwnd_clamp ) {
tcp_snd_cwnd_set ( tp , tcp_snd_cwnd ( tp ) + 1 ) ;
2020-03-16 09:35:09 +03:00
veno - > inc = 0 ;
2006-06-06 04:28:30 +04:00
} else
2020-03-16 09:35:09 +03:00
veno - > inc = 1 ;
tp - > snd_cwnd_cnt = 0 ;
} else
2020-03-16 09:35:10 +03:00
tp - > snd_cwnd_cnt + = acked ;
2006-06-06 04:28:30 +04:00
}
2020-03-16 09:35:09 +03:00
done :
2022-04-06 02:35:38 +03:00
if ( tcp_snd_cwnd ( tp ) < 2 )
tcp_snd_cwnd_set ( tp , 2 ) ;
else if ( tcp_snd_cwnd ( tp ) > tp - > snd_cwnd_clamp )
tcp_snd_cwnd_set ( tp , tp - > snd_cwnd_clamp ) ;
2006-06-06 04:28:30 +04:00
}
/* Wipe the slate clean for the next rtt. */
/* veno->cntrtt = 0; */
veno - > minrtt = 0x7fffffff ;
}
/* Veno MD phase */
static u32 tcp_veno_ssthresh ( struct sock * sk )
{
const struct tcp_sock * tp = tcp_sk ( sk ) ;
struct veno * veno = inet_csk_ca ( sk ) ;
if ( veno - > diff < beta )
/* in "non-congestive state", cut cwnd by 1/5 */
2022-04-06 02:35:38 +03:00
return max ( tcp_snd_cwnd ( tp ) * 4 / 5 , 2U ) ;
2006-06-06 04:28:30 +04:00
else
/* in "congestive state", cut cwnd by 1/2 */
2022-04-06 02:35:38 +03:00
return max ( tcp_snd_cwnd ( tp ) > > 1U , 2U ) ;
2006-06-06 04:28:30 +04:00
}
2011-03-10 11:40:17 +03:00
static struct tcp_congestion_ops tcp_veno __read_mostly = {
2006-06-06 04:28:30 +04:00
. init = tcp_veno_init ,
. ssthresh = tcp_veno_ssthresh ,
2017-08-04 06:38:52 +03:00
. undo_cwnd = tcp_reno_undo_cwnd ,
2006-06-06 04:28:30 +04:00
. cong_avoid = tcp_veno_cong_avoid ,
2007-04-24 09:26:16 +04:00
. pkts_acked = tcp_veno_pkts_acked ,
2006-06-06 04:28:30 +04:00
. set_state = tcp_veno_state ,
. cwnd_event = tcp_veno_cwnd_event ,
. owner = THIS_MODULE ,
. name = " veno " ,
} ;
static int __init tcp_veno_register ( void )
{
2006-08-26 04:10:33 +04:00
BUILD_BUG_ON ( sizeof ( struct veno ) > ICSK_CA_PRIV_SIZE ) ;
2006-06-06 04:28:30 +04:00
tcp_register_congestion_control ( & tcp_veno ) ;
return 0 ;
}
static void __exit tcp_veno_unregister ( void )
{
tcp_unregister_congestion_control ( & tcp_veno ) ;
}
module_init ( tcp_veno_register ) ;
module_exit ( tcp_veno_unregister ) ;
MODULE_AUTHOR ( " Bin Zhou, Cheng Peng Fu " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " TCP Veno " ) ;