tcp: Fix data-races around some timeout sysctl knobs.

[ Upstream commit 39e24435a776e9de5c6dd188836cf2523547804b ]

While reading these sysctl knobs, they can be changed concurrently.
Thus, we need to add READ_ONCE() to their readers.

  - tcp_retries1
  - tcp_retries2
  - tcp_orphan_retries
  - tcp_fin_timeout

Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Kuniyuki Iwashima 2022-07-15 10:17:50 -07:00 committed by Greg Kroah-Hartman
parent 59ce170d33
commit f197442a0e
4 changed files with 9 additions and 8 deletions

View File

@ -1430,7 +1430,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
static inline int tcp_fin_time(const struct sock *sk) static inline int tcp_fin_time(const struct sock *sk)
{ {
int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; int fin_timeout = tcp_sk(sk)->linger2 ? :
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
const int rto = inet_csk(sk)->icsk_rto; const int rto = inet_csk(sk)->icsk_rto;
if (fin_timeout < (rto << 2) - (rto >> 1)) if (fin_timeout < (rto << 2) - (rto >> 1))

View File

@ -3386,7 +3386,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_LINGER2: case TCP_LINGER2:
val = tp->linger2; val = tp->linger2;
if (val >= 0) if (val >= 0)
val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ; val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
break; break;
case TCP_DEFER_ACCEPT: case TCP_DEFER_ACCEPT:
val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,

View File

@ -3776,7 +3776,7 @@ void tcp_send_probe0(struct sock *sk)
} }
if (err <= 0) { if (err <= 0) {
if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
icsk->icsk_backoff++; icsk->icsk_backoff++;
icsk->icsk_probes_out++; icsk->icsk_probes_out++;
probe_max = TCP_RTO_MAX; probe_max = TCP_RTO_MAX;

View File

@ -124,7 +124,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
*/ */
static int tcp_orphan_retries(struct sock *sk, bool alive) static int tcp_orphan_retries(struct sock *sk, bool alive)
{ {
int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */ int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
/* We know from an ICMP that something is wrong. */ /* We know from an ICMP that something is wrong. */
if (sk->sk_err_soft && !alive) if (sk->sk_err_soft && !alive)
@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
expired = icsk->icsk_retransmits >= retry_until; expired = icsk->icsk_retransmits >= retry_until;
} else { } else {
if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) { if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
/* Black hole detection */ /* Black hole detection */
tcp_mtu_probing(icsk, sk); tcp_mtu_probing(icsk, sk);
@ -235,7 +235,7 @@ static int tcp_write_timeout(struct sock *sk)
sk_rethink_txhash(sk); sk_rethink_txhash(sk);
} }
retry_until = net->ipv4.sysctl_tcp_retries2; retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
if (sock_flag(sk, SOCK_DEAD)) { if (sock_flag(sk, SOCK_DEAD)) {
const bool alive = icsk->icsk_rto < TCP_RTO_MAX; const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
@ -362,7 +362,7 @@ static void tcp_probe_timer(struct sock *sk)
(s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout) (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
goto abort; goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
if (sock_flag(sk, SOCK_DEAD)) { if (sock_flag(sk, SOCK_DEAD)) {
const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
@ -556,7 +556,7 @@ out_reset_timer:
} }
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0)) if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
__sk_dst_reset(sk); __sk_dst_reset(sk);
out:; out:;