tcp: add max_quickacks param to tcp_incr_quickack and tcp_enter_quickack_mode
[ Upstream commit 9a9c9b51e54618861420093ae6e9b50a961914c5 ] We want to add finer control of the number of ACK packets sent after ECN events. This patch is not changing current behavior, it only enables following change. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
5a1baf1944
commit
1c005489fa
@ -372,7 +372,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe, size_t len,
|
||||
unsigned int flags);
|
||||
|
||||
void tcp_enter_quickack_mode(struct sock *sk);
|
||||
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
|
||||
static inline void tcp_dec_quickack_mode(struct sock *sk,
|
||||
const unsigned int pkts)
|
||||
{
|
||||
|
@ -138,7 +138,7 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
|
||||
*/
|
||||
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
|
||||
__tcp_send_ack(sk, ca->prior_rcv_nxt);
|
||||
tcp_enter_quickack_mode(sk);
|
||||
tcp_enter_quickack_mode(sk, 1);
|
||||
}
|
||||
|
||||
ca->prior_rcv_nxt = tp->rcv_nxt;
|
||||
@ -159,7 +159,7 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
|
||||
*/
|
||||
if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
|
||||
__tcp_send_ack(sk, ca->prior_rcv_nxt);
|
||||
tcp_enter_quickack_mode(sk);
|
||||
tcp_enter_quickack_mode(sk, 1);
|
||||
}
|
||||
|
||||
ca->prior_rcv_nxt = tp->rcv_nxt;
|
||||
|
@ -198,21 +198,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
|
||||
}
|
||||
}
|
||||
|
||||
static void tcp_incr_quickack(struct sock *sk)
|
||||
static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
|
||||
|
||||
if (quickacks == 0)
|
||||
quickacks = 2;
|
||||
quickacks = min(quickacks, max_quickacks);
|
||||
if (quickacks > icsk->icsk_ack.quick)
|
||||
icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
|
||||
icsk->icsk_ack.quick = quickacks;
|
||||
}
|
||||
|
||||
void tcp_enter_quickack_mode(struct sock *sk)
|
||||
void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
tcp_incr_quickack(sk);
|
||||
|
||||
tcp_incr_quickack(sk, max_quickacks);
|
||||
icsk->icsk_ack.pingpong = 0;
|
||||
icsk->icsk_ack.ato = TCP_ATO_MIN;
|
||||
}
|
||||
@ -257,7 +259,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
|
||||
* it is probably a retransmit.
|
||||
*/
|
||||
if (tp->ecn_flags & TCP_ECN_SEEN)
|
||||
tcp_enter_quickack_mode((struct sock *)tp);
|
||||
tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
|
||||
break;
|
||||
case INET_ECN_CE:
|
||||
if (tcp_ca_needs_ecn((struct sock *)tp))
|
||||
@ -265,7 +267,7 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
|
||||
|
||||
if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
|
||||
/* Better not delay acks, sender can have a very low cwnd */
|
||||
tcp_enter_quickack_mode((struct sock *)tp);
|
||||
tcp_enter_quickack_mode((struct sock *)tp, TCP_MAX_QUICKACKS);
|
||||
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
|
||||
}
|
||||
tp->ecn_flags |= TCP_ECN_SEEN;
|
||||
@ -686,7 +688,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
|
||||
/* The _first_ data packet received, initialize
|
||||
* delayed ACK engine.
|
||||
*/
|
||||
tcp_incr_quickack(sk);
|
||||
tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
|
||||
icsk->icsk_ack.ato = TCP_ATO_MIN;
|
||||
} else {
|
||||
int m = now - icsk->icsk_ack.lrcvtime;
|
||||
@ -702,7 +704,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
|
||||
/* Too long gap. Apparently sender failed to
|
||||
* restart window, so that we send ACKs quickly.
|
||||
*/
|
||||
tcp_incr_quickack(sk);
|
||||
tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
|
||||
sk_mem_reclaim(sk);
|
||||
}
|
||||
}
|
||||
@ -4160,7 +4162,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
|
||||
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
|
||||
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
|
||||
tcp_enter_quickack_mode(sk);
|
||||
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
|
||||
|
||||
if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
|
||||
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
|
||||
@ -4710,7 +4712,7 @@ queue_and_out:
|
||||
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
|
||||
|
||||
out_of_window:
|
||||
tcp_enter_quickack_mode(sk);
|
||||
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
|
||||
inet_csk_schedule_ack(sk);
|
||||
drop:
|
||||
tcp_drop(sk, skb);
|
||||
@ -5791,7 +5793,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
* to stand against the temptation 8) --ANK
|
||||
*/
|
||||
inet_csk_schedule_ack(sk);
|
||||
tcp_enter_quickack_mode(sk);
|
||||
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
|
||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
|
||||
TCP_DELACK_MAX, TCP_RTO_MAX);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user