From 0a6b2a1dc2a2105f178255fe495eb914b09cb37a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Feb 2018 11:56:47 -0800 Subject: [PATCH 1/6] tcp: switch to GSO being always on Oleksandr Natalenko reported performance issues with BBR without FQ packet scheduler that were root caused to lack of SG and GSO/TSO on his configuration. In this mode, TCP internal pacing has to setup a high resolution timer for each MSS sent. We could implement in TCP a strategy similar to the one adopted in commit fefa569a9d4b ("net_sched: sch_fq: account for schedule/timers drifts") or decide to finally switch TCP stack to a GSO only mode. This has many benefits : 1) Most TCP developments are done with TSO in mind. 2) Less high-resolution timers needs to be armed for TCP-pacing 3) GSO can benefit of xmit_more hint 4) Receiver GRO is more effective (as if TSO was used for real on sender) -> Lower ACK traffic 5) Write queues have less overhead (one skb holds about 64KB of payload) 6) SACK coalescing just works. 7) rtx rb-tree contains less packets, SACK is cheaper. This patch implements the minimum patch, but we can remove some legacy code as follow ups. Tested: On 40Gbit link, one netperf -t TCP_STREAM BBR+fq: sg on: 26 Gbits/sec sg off: 15.7 Gbits/sec (was 2.3 Gbit before patch) BBR+pfifo_fast: sg on: 24.2 Gbits/sec sg off: 14.9 Gbits/sec (was 0.66 Gbit before patch !!! ) BBR+fq_codel: sg on: 24.4 Gbits/sec sg off: 15 Gbits/sec (was 0.66 Gbit before patch !!! ) Signed-off-by: Eric Dumazet Reported-by: Oleksandr Natalenko Signed-off-by: David S. Miller --- include/net/sock.h | 1 + net/core/sock.c | 2 +- net/ipv4/tcp.c | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/include/net/sock.h b/include/net/sock.h index 3aa7b7d6e6c7..f0f576ff5603 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -417,6 +417,7 @@ struct sock { struct page_frag sk_frag; netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; + netdev_features_t sk_route_forced_caps; int sk_gso_type; unsigned int sk_gso_max_size; gfp_t sk_allocation; diff --git a/net/core/sock.c b/net/core/sock.c index a1fa4a548f1b..507d8c6c4319 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1777,7 +1777,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) u32 max_segs = 1; sk_dst_set(sk, dst); - sk->sk_route_caps = dst->dev->features; + sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; sk->sk_route_caps &= ~sk->sk_route_nocaps; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 48636aee23c3..4b46a2ae46e3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -453,6 +453,7 @@ void tcp_init_sock(struct sock *sk) sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; sk_sockets_allocated_inc(sk); + sk->sk_route_forced_caps = NETIF_F_GSO; } EXPORT_SYMBOL(tcp_init_sock); From 74d4a8f8d378ddbe7caf3331804c3d5a276a9b1a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Feb 2018 11:56:48 -0800 Subject: [PATCH 2/6] tcp: remove sk_can_gso() use After previous commit, sk_can_gso() is always true. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 34 ++++++++-------------------------- net/ipv4/tcp_input.c | 3 --- 2 files changed, 8 insertions(+), 29 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4b46a2ae46e3..6f35c12af85a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -898,7 +898,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, struct tcp_sock *tp = tcp_sk(sk); u32 new_size_goal, size_goal; - if (!large_allowed || !sk_can_gso(sk)) + if (!large_allowed) return mss_now; /* Note : tcp_tso_autosize() will eventually split this later */ @@ -1103,27 +1103,11 @@ static int linear_payload_sz(bool first_skb) return 0; } -static int select_size(const struct sock *sk, bool sg, bool first_skb, bool zc) +static int select_size(bool first_skb, bool zc) { - const struct tcp_sock *tp = tcp_sk(sk); - int tmp = tp->mss_cache; - - if (sg) { - if (zc) - return 0; - - if (sk_can_gso(sk)) { - tmp = linear_payload_sz(first_skb); - } else { - int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); - - if (tmp >= pgbreak && - tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) - tmp = pgbreak; - } - } - - return tmp; + if (zc) + return 0; + return linear_payload_sz(first_skb); } void tcp_free_fastopen_req(struct tcp_sock *tp) @@ -1188,7 +1172,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) int flags, err, copied = 0; int mss_now = 0, size_goal, copied_syn = 0; bool process_backlog = false; - bool sg, zc = false; + bool zc = false; long timeo; flags = msg->msg_flags; @@ -1269,8 +1253,6 @@ restart: if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; - sg = !!(sk->sk_route_caps & NETIF_F_SG); - while (msg_data_left(msg)) { int copy = 0; int max = size_goal; @@ -1298,7 +1280,7 @@ new_segment: goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); - linear = select_size(sk, sg, first_skb, zc); + linear = select_size(first_skb, zc); skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation, first_skb); if (!skb) @@ -1344,7 +1326,7 @@ new_segment: if (!skb_can_coalesce(skb, i, pfrag->page, pfrag->offset)) { - if (i >= sysctl_max_skb_frags || !sg) { + if (i >= sysctl_max_skb_frags) { tcp_mark_push(tp, skb); goto new_segment; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a6b48f6253e3..06b9c4765f42 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1358,9 +1358,6 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, int len; int in_sack; - if (!sk_can_gso(sk)) - goto fallback; - /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) From dead7cdb0daec58490891e59f4fae0c5c76fa5f3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Feb 2018 11:56:49 -0800 Subject: [PATCH 3/6] tcp: remove sk_check_csum_caps() Since TCP relies on GSO, we do not need this helper anymore. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/sock.h | 9 --------- net/ipv4/tcp.c | 11 +++-------- 2 files changed, 3 insertions(+), 17 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index f0f576ff5603..b9624581d639 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1863,15 +1863,6 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) sk->sk_route_caps &= ~flags; } -static inline bool sk_check_csum_caps(struct sock *sk) -{ - return (sk->sk_route_caps & NETIF_F_HW_CSUM) || - (sk->sk_family == PF_INET && - (sk->sk_route_caps & NETIF_F_IP_CSUM)) || - (sk->sk_family == PF_INET6 && - (sk->sk_route_caps & NETIF_F_IPV6_CSUM)); -} - static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, struct iov_iter *from, char *to, int copy, int offset) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6f35c12af85a..7c4140271887 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1063,8 +1063,7 @@ EXPORT_SYMBOL_GPL(do_tcp_sendpages); int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags) { - if (!(sk->sk_route_caps & NETIF_F_SG) || - !sk_check_csum_caps(sk)) + if (!(sk->sk_route_caps & NETIF_F_SG)) return sock_no_sendpage_locked(sk, page, offset, size, flags); tcp_rate_check_app_limited(sk); /* is sending application-limited? */ @@ -1190,7 +1189,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) goto out_err; } - zc = sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG; + zc = sk->sk_route_caps & NETIF_F_SG; if (!zc) uarg->zerocopy = 0; } @@ -1287,11 +1286,7 @@ new_segment: goto wait_for_memory; process_backlog = true; - /* - * Check whether we can use HW checksum. - */ - if (sk_check_csum_caps(sk)) - skb->ip_summed = CHECKSUM_PARTIAL; + skb->ip_summed = CHECKSUM_PARTIAL; skb_entail(sk, skb); copy = size_goal; From 65ec60973af97dab72094490b4d2e9b8e169456c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Feb 2018 11:56:50 -0800 Subject: [PATCH 4/6] tcp: tcp_sendmsg() only deals with CHECKSUM_PARTIAL We no longer have skbs with skb->ip_summed == CHECKSUM_NONE in TCP write queues. We can remove dead code in tcp_sendmsg(). Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7c4140271887..a33539798bf6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1254,14 +1254,10 @@ restart: while (msg_data_left(msg)) { int copy = 0; - int max = size_goal; skb = tcp_write_queue_tail(sk); - if (skb) { - if (skb->ip_summed == CHECKSUM_NONE) - max = mss_now; - copy = max - skb->len; - } + if (skb) + copy = size_goal - skb->len; if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { bool first_skb; @@ -1290,7 +1286,6 @@ new_segment: skb_entail(sk, skb); copy = size_goal; - max = size_goal; /* All packets are restored as if they have * already been sent. skb_mstamp isn't set to @@ -1374,7 +1369,7 @@ new_segment: goto out; } - if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) + if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) continue; if (forced_push(tp)) { From 4a64fd6ccf127973d1e2b2fc2f8024e550130617 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Feb 2018 11:56:51 -0800 Subject: [PATCH 5/6] tcp: remove dead code from tcp_set_skb_tso_segs() We no longer have skbs with skb->ip_summed == CHECKSUM_NONE in TCP write queues. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b2bca373f8be..0196923aec42 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1206,7 +1206,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) /* Initialize TSO segments for a packet. */ static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) { - if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { + if (skb->len <= mss_now) { /* Avoid the costly divide in the normal * non-TSO case. */ From 98be9b12096fb46773b4a509d3822fd17c82218e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Feb 2018 11:56:52 -0800 Subject: [PATCH 6/6] tcp: remove dead code after CHECKSUM_PARTIAL adoption Since all skbs in write/rtx queues have CHECKSUM_PARTIAL, we can remove dead code. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 13 +++---------- net/ipv4/tcp_output.c | 38 +++++--------------------------------- 2 files changed, 8 insertions(+), 43 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f3e52bc98980..2c6aec2643e8 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -561,16 +561,9 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct tcphdr *th = tcp_hdr(skb); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); - skb->csum_start = skb_transport_header(skb) - skb->head; - skb->csum_offset = offsetof(struct tcphdr, check); - } else { - th->check = tcp_v4_check(skb->len, saddr, daddr, - csum_partial(th, - th->doff << 2, - skb->csum)); - } + th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); } /* This routine computes an IPv4 TCP checksum. */ diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 0196923aec42..8795d76f987c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1335,21 +1335,9 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; tcp_skb_fragment_eor(skb, buff); - if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { - /* Copy and checksum data tail into the new buffer. */ - buff->csum = csum_partial_copy_nocheck(skb->data + len, - skb_put(buff, nsize), - nsize, 0); + skb_split(skb, buff, len); - skb_trim(skb, len); - - skb->csum = csum_block_sub(skb->csum, buff->csum, len); - } else { - skb->ip_summed = CHECKSUM_PARTIAL; - skb_split(skb, buff, len); - } - - buff->ip_summed = skb->ip_summed; + buff->ip_summed = CHECKSUM_PARTIAL; buff->tstamp = skb->tstamp; tcp_fragment_tstamp(skb, buff); @@ -1901,7 +1889,7 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue, tcp_skb_fragment_eor(skb, buff); - buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; + buff->ip_summed = CHECKSUM_PARTIAL; skb_split(skb, buff, len); tcp_fragment_tstamp(skb, buff); @@ -2134,7 +2122,7 @@ static int tcp_mtu_probe(struct sock *sk) TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; TCP_SKB_CB(nskb)->sacked = 0; nskb->csum = 0; - nskb->ip_summed = skb->ip_summed; + nskb->ip_summed = CHECKSUM_PARTIAL; tcp_insert_write_queue_before(nskb, skb, sk); tcp_highest_sack_replace(sk, skb, nskb); @@ -2142,14 +2130,7 @@ static int tcp_mtu_probe(struct sock *sk) len = 0; tcp_for_write_queue_from_safe(skb, next, sk) { copy = min_t(int, skb->len, probe_size - len); - if (nskb->ip_summed) { - skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); - } else { - __wsum csum = skb_copy_and_csum_bits(skb, 0, - skb_put(nskb, copy), - copy, 0); - nskb->csum = csum_block_add(nskb->csum, csum, len); - } + skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); if (skb->len <= copy) { /* We've eaten all the data from this skb. @@ -2166,9 +2147,6 @@ static int tcp_mtu_probe(struct sock *sk) ~(TCPHDR_FIN|TCPHDR_PSH); if (!skb_shinfo(skb)->nr_frags) { skb_pull(skb, copy); - if (skb->ip_summed != CHECKSUM_PARTIAL) - skb->csum = csum_partial(skb->data, - skb->len, 0); } else { __pskb_trim_head(skb, copy); tcp_set_skb_tso_segs(skb, mss_now); @@ -2746,12 +2724,6 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) } tcp_highest_sack_replace(sk, next_skb, skb); - if (next_skb->ip_summed == CHECKSUM_PARTIAL) - skb->ip_summed = CHECKSUM_PARTIAL; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); - /* Update sequence range on original skb. */ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;