From b6b6d6533a14b5ddcf9f9c5239fc3721fc6beda0 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 30 Sep 2020 05:54:56 -0700 Subject: [PATCH 1/2] inet: remove icsk_ack.blocked TCP has been using it to work around the possibility of tcp_delack_timer() finding the socket owned by user. After commit 6f458dfb4092 ("tcp: improve latencies of timer triggered events") we added TCP_DELACK_TIMER_DEFERRED atomic bit for more immediate recovery, so we can get rid of icsk_ack.blocked This frees space that following patch will reuse. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/net/inet_connection_sock.h | 4 ++-- net/dccp/timer.c | 1 - net/ipv4/inet_connection_sock.c | 2 +- net/ipv4/tcp.c | 6 ++---- net/ipv4/tcp_output.c | 7 ++----- net/ipv4/tcp_timer.c | 1 - 6 files changed, 7 insertions(+), 14 deletions(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index dc763ca9413c..79875f976190 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -110,7 +110,7 @@ struct inet_connection_sock { __u8 pending; /* ACK is pending */ __u8 quick; /* Scheduled number of quick acks */ __u8 pingpong; /* The session is interactive */ - __u8 blocked; /* Delayed ACK was blocked by socket lock */ + /* one byte hole. */ __u32 ato; /* Predicted tick of soft clock */ unsigned long timeout; /* Currently scheduled timeout */ __u32 lrcvtime; /* timestamp of last received data packet */ @@ -198,7 +198,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) sk_stop_timer(sk, &icsk->icsk_retransmit_timer); #endif } else if (what == ICSK_TIME_DACK) { - icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0; + icsk->icsk_ack.pending = 0; #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_delack_timer); #endif diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 927c796d7682..a934d2932373 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -176,7 +176,6 @@ static void dccp_delack_timer(struct timer_list *t) bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ - icsk->icsk_ack.blocked = 1; __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index b457dd2d6c75..4148f5f78f31 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -564,7 +564,7 @@ void inet_csk_clear_xmit_timers(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); - icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; + icsk->icsk_pending = icsk->icsk_ack.pending = 0; sk_stop_timer(sk, &icsk->icsk_retransmit_timer); sk_stop_timer(sk, &icsk->icsk_delack_timer); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2a8bfa89a515..ed2805564424 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1538,10 +1538,8 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) if (inet_csk_ack_scheduled(sk)) { const struct inet_connection_sock *icsk = inet_csk(sk); - /* Delayed ACKs frequently hit locked sockets during bulk - * receive. */ - if (icsk->icsk_ack.blocked || - /* Once-per-two-segments ACK was not sent by tcp_input.c */ + + if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || /* * If this read emptied read buffer, we send ACK, if diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 386978dcd318..6bd4e383030e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3911,11 +3911,8 @@ void tcp_send_delayed_ack(struct sock *sk) /* Use new timeout only if there wasn't a older one earlier. */ if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { - /* If delack timer was blocked or is about to expire, - * send ACK now. - */ - if (icsk->icsk_ack.blocked || - time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { + /* If delack timer is about to expire, send ACK now. */ + if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { tcp_send_ack(sk); return; } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 0c08c420fbc2..6c62b9ea1320 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -331,7 +331,6 @@ static void tcp_delack_timer(struct timer_list *t) if (!sock_owned_by_user(sk)) { tcp_delack_timer_handler(sk); } else { - icsk->icsk_ack.blocked = 1; __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); /* deleguate our work to tcp_release_cb() */ if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) From a37c2134bed6f28c6d6aefa2699331e6e4e9c4f1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 30 Sep 2020 05:54:57 -0700 Subject: [PATCH 2/2] tcp: add exponential backoff in __tcp_send_ack() Whenever host is under very high memory pressure, __tcp_send_ack() skb allocation fails, and we setup a 200 ms (TCP_DELACK_MAX) timer before retrying. On hosts with high number of TCP sockets, we can spend considerable amount of cpu cycles in these attempts, add high pressure on various spinlocks in mm-layer, ultimately blocking threads attempting to free space from making any progress. This patch adds standard exponential backoff to avoid adding fuel to the fire. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/net/inet_connection_sock.h | 3 ++- net/ipv4/tcp_output.c | 11 ++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 79875f976190..7338b3865a2a 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -110,7 +110,7 @@ struct inet_connection_sock { __u8 pending; /* ACK is pending */ __u8 quick; /* Scheduled number of quick acks */ __u8 pingpong; /* The session is interactive */ - /* one byte hole. */ + __u8 retry; /* Number of attempts */ __u32 ato; /* Predicted tick of soft clock */ unsigned long timeout; /* Currently scheduled timeout */ __u32 lrcvtime; /* timestamp of last received data packet */ @@ -199,6 +199,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) #endif } else if (what == ICSK_TIME_DACK) { icsk->icsk_ack.pending = 0; + icsk->icsk_ack.retry = 0; #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_delack_timer); #endif diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6bd4e383030e..bf48cd73e967 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3941,10 +3941,15 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); if (unlikely(!buff)) { + struct inet_connection_sock *icsk = inet_csk(sk); + unsigned long delay; + + delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; + if (delay < TCP_RTO_MAX) + icsk->icsk_ack.retry++; inet_csk_schedule_ack(sk); - inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - TCP_DELACK_MAX, TCP_RTO_MAX); + icsk->icsk_ack.ato = TCP_ATO_MIN; + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX); return; }