tcp: make global challenge ack rate limitation per net-ns and default disabled
Because per host rate limiting has been proven problematic (side channel attacks can be based on it), per host rate limiting of challenge acks ideally should be per netns and turned off by default. This is a long due followup of following commits:083ae30828
("tcp: enable per-socket rate limiting of all 'challenge acks'")f2b2c582e8
("tcp: mitigate ACK loops for connections as tcp_sock")75ff39ccc1
("tcp: make challenge acks less predictable") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Jason Baron <jbaron@akamai.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
8c70521238
commit
79e3602caa
@ -1035,7 +1035,10 @@ tcp_limit_output_bytes - INTEGER
|
||||
tcp_challenge_ack_limit - INTEGER
|
||||
Limits number of Challenge ACK sent per second, as recommended
|
||||
in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
|
||||
Default: 1000
|
||||
Note that this per netns rate limit can allow some side channel
|
||||
attacks and probably should not be enabled.
|
||||
TCP stack implements per TCP socket limits anyway.
|
||||
Default: INT_MAX (unlimited)
|
||||
|
||||
UDP variables
|
||||
=============
|
||||
|
@ -179,6 +179,8 @@ struct netns_ipv4 {
|
||||
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
|
||||
atomic_t tfo_active_disable_times;
|
||||
unsigned long tfo_active_disable_stamp;
|
||||
u32 tcp_challenge_timestamp;
|
||||
u32 tcp_challenge_count;
|
||||
|
||||
int sysctl_udp_wmem_min;
|
||||
int sysctl_udp_rmem_min;
|
||||
|
@ -3614,12 +3614,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
|
||||
/* RFC 5961 7 [ACK Throttling] */
|
||||
static void tcp_send_challenge_ack(struct sock *sk)
|
||||
{
|
||||
/* unprotected vars, we dont care of overwrites */
|
||||
static u32 challenge_timestamp;
|
||||
static unsigned int challenge_count;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct net *net = sock_net(sk);
|
||||
u32 count, now;
|
||||
u32 count, now, ack_limit;
|
||||
|
||||
/* First check our per-socket dupack rate limit. */
|
||||
if (__tcp_oow_rate_limited(net,
|
||||
@ -3627,18 +3624,22 @@ static void tcp_send_challenge_ack(struct sock *sk)
|
||||
&tp->last_oow_ack_time))
|
||||
return;
|
||||
|
||||
ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
|
||||
if (ack_limit == INT_MAX)
|
||||
goto send_ack;
|
||||
|
||||
/* Then check host-wide RFC 5961 rate limit. */
|
||||
now = jiffies / HZ;
|
||||
if (now != READ_ONCE(challenge_timestamp)) {
|
||||
u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
|
||||
if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) {
|
||||
u32 half = (ack_limit + 1) >> 1;
|
||||
|
||||
WRITE_ONCE(challenge_timestamp, now);
|
||||
WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit));
|
||||
WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
|
||||
WRITE_ONCE(net->ipv4.tcp_challenge_count, half + prandom_u32_max(ack_limit));
|
||||
}
|
||||
count = READ_ONCE(challenge_count);
|
||||
count = READ_ONCE(net->ipv4.tcp_challenge_count);
|
||||
if (count > 0) {
|
||||
WRITE_ONCE(challenge_count, count - 1);
|
||||
WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1);
|
||||
send_ack:
|
||||
NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
|
||||
tcp_send_ack(sk);
|
||||
}
|
||||
|
@ -3139,8 +3139,10 @@ static int __net_init tcp_sk_init(struct net *net)
|
||||
net->ipv4.sysctl_tcp_tso_win_divisor = 3;
|
||||
/* Default TSQ limit of 16 TSO segments */
|
||||
net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
|
||||
/* rfc5961 challenge ack rate limiting */
|
||||
net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
|
||||
|
||||
/* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
|
||||
net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
|
||||
|
||||
net->ipv4.sysctl_tcp_min_tso_segs = 2;
|
||||
net->ipv4.sysctl_tcp_tso_rtt_log = 9; /* 2^9 = 512 usec */
|
||||
net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
|
||||
|
Loading…
Reference in New Issue
Block a user