From dc05360fee660a9dbe59824b3f7896534210432b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 22 Mar 2019 08:56:38 -0700 Subject: [PATCH 1/3] net: convert rps_needed and rfs_needed to new static branch api We prefer static_branch_unlikely() over static_key_false() these days. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/tun.c | 2 +- include/linux/netdevice.h | 4 ++-- include/net/sock.h | 2 +- net/core/dev.c | 10 +++++----- net/core/net-sysfs.c | 4 ++-- net/core/sysctl_net_core.c | 8 ++++---- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 27798aacb671..24d0220b9ba0 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1042,7 +1042,7 @@ static int tun_net_close(struct net_device *dev) static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) { #ifdef CONFIG_RPS - if (tun->numqueues == 1 && static_key_false(&rps_needed)) { + if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 823762291ebf..166fdc0a78b4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -194,8 +194,8 @@ struct net_device_stats { #ifdef CONFIG_RPS #include -extern struct static_key rps_needed; -extern struct static_key rfs_needed; +extern struct static_key_false rps_needed; +extern struct static_key_false rfs_needed; #endif struct neighbour; diff --git a/include/net/sock.h b/include/net/sock.h index 8de5ee258b93..fecdf639225c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -966,7 +966,7 @@ static inline void sock_rps_record_flow_hash(__u32 hash) static inline void sock_rps_record_flow(const struct sock *sk) { #ifdef CONFIG_RPS - if (static_key_false(&rfs_needed)) { + if (static_branch_unlikely(&rfs_needed)) { /* Reading sk->sk_rxhash might incur an expensive cache line * miss. * diff --git a/net/core/dev.c b/net/core/dev.c index 676c9418f8e4..9ca2d3abfd1a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3982,9 +3982,9 @@ EXPORT_SYMBOL(rps_sock_flow_table); u32 rps_cpu_mask __read_mostly; EXPORT_SYMBOL(rps_cpu_mask); -struct static_key rps_needed __read_mostly; +struct static_key_false rps_needed __read_mostly; EXPORT_SYMBOL(rps_needed); -struct static_key rfs_needed __read_mostly; +struct static_key_false rfs_needed __read_mostly; EXPORT_SYMBOL(rfs_needed); static struct rps_dev_flow * @@ -4510,7 +4510,7 @@ static int netif_rx_internal(struct sk_buff *skb) } #ifdef CONFIG_RPS - if (static_key_false(&rps_needed)) { + if (static_branch_unlikely(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -5179,7 +5179,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb) rcu_read_lock(); #ifdef CONFIG_RPS - if (static_key_false(&rps_needed)) { + if (static_branch_unlikely(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu = get_rps_cpu(skb->dev, skb, &rflow); @@ -5227,7 +5227,7 @@ static void netif_receive_skb_list_internal(struct list_head *head) rcu_read_lock(); #ifdef CONFIG_RPS - if (static_key_false(&rps_needed)) { + if (static_branch_unlikely(&rps_needed)) { list_for_each_entry_safe(skb, next, head, list) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu = get_rps_cpu(skb->dev, skb, &rflow); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 4ff661f6f989..851cabb90bce 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -754,9 +754,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, rcu_assign_pointer(queue->rps_map, map); if (map) - static_key_slow_inc(&rps_needed); + static_branch_inc(&rps_needed); if (old_map) - static_key_slow_dec(&rps_needed); + static_branch_dec(&rps_needed); mutex_unlock(&rps_map_mutex); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 84bf2861f45f..1a2685694abd 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -95,12 +95,12 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write, if (sock_table != orig_sock_table) { rcu_assign_pointer(rps_sock_flow_table, sock_table); if (sock_table) { - static_key_slow_inc(&rps_needed); - static_key_slow_inc(&rfs_needed); + static_branch_inc(&rps_needed); + static_branch_inc(&rfs_needed); } if (orig_sock_table) { - static_key_slow_dec(&rps_needed); - static_key_slow_dec(&rfs_needed); + static_branch_dec(&rps_needed); + static_branch_dec(&rfs_needed); synchronize_rcu(); vfree(orig_sock_table); } From 472c2e07eef045145bc1493cc94a01c87140780a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 22 Mar 2019 08:56:39 -0700 Subject: [PATCH 2/3] tcp: add one skb cache for tx On hosts with a lot of cores, RPC workloads suffer from heavy contention on slab spinlocks. 20.69% [kernel] [k] queued_spin_lock_slowpath 5.64% [kernel] [k] _raw_spin_lock 3.83% [kernel] [k] syscall_return_via_sysret 3.48% [kernel] [k] __entry_text_start 1.76% [kernel] [k] __netif_receive_skb_core 1.64% [kernel] [k] __fget For each sendmsg(), we allocate one skb, and free it at the time ACK packet comes. In many cases, ACK packets are handled by another cpus, and this unfortunately incurs heavy costs for slab layer. This patch uses an extra pointer in socket structure, so that we try to reuse the same skb and avoid these expensive costs. We cache at most one skb per socket so this should be safe as far as memory pressure is concerned. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/sock.h | 5 +++++ net/ipv4/tcp.c | 50 +++++++++++++++++++++------------------------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index fecdf639225c..314c47a8f5d1 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -414,6 +414,7 @@ struct sock { struct sk_buff *sk_send_head; struct rb_root tcp_rtx_queue; }; + struct sk_buff *sk_tx_skb_cache; struct sk_buff_head sk_write_queue; __s32 sk_peek_off; int sk_write_pending; @@ -1463,6 +1464,10 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { + if (!sk->sk_tx_skb_cache) { + sk->sk_tx_skb_cache = skb; + return; + } sock_set_flag(sk, SOCK_QUEUE_SHRUNK); sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6baa6dc1b13b..f0b5a5999145 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -865,6 +865,21 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, { struct sk_buff *skb; + skb = sk->sk_tx_skb_cache; + if (skb && !size) { + const struct sk_buff_fclones *fclones; + + fclones = container_of(skb, struct sk_buff_fclones, skb1); + if (refcount_read(&fclones->fclone_ref) == 1) { + sk->sk_wmem_queued -= skb->truesize; + sk_mem_uncharge(sk, skb->truesize); + skb->truesize -= skb->data_len; + sk->sk_tx_skb_cache = NULL; + pskb_trim(skb, 0); + INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); + return skb; + } + } /* The TCP header must be at least 32-bit aligned. */ size = ALIGN(size, 4); @@ -1098,30 +1113,6 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, } EXPORT_SYMBOL(tcp_sendpage); -/* Do not bother using a page frag for very small frames. - * But use this heuristic only for the first skb in write queue. - * - * Having no payload in skb->head allows better SACK shifting - * in tcp_shift_skb_data(), reducing sack/rack overhead, because - * write queue has less skbs. - * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB. - * This also speeds up tso_fragment(), since it wont fallback - * to tcp_fragment(). - */ -static int linear_payload_sz(bool first_skb) -{ - if (first_skb) - return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); - return 0; -} - -static int select_size(bool first_skb, bool zc) -{ - if (zc) - return 0; - return linear_payload_sz(first_skb); -} - void tcp_free_fastopen_req(struct tcp_sock *tp) { if (tp->fastopen_req) { @@ -1272,7 +1263,6 @@ restart: if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { bool first_skb; - int linear; new_segment: if (!sk_stream_memory_free(sk)) @@ -1283,8 +1273,7 @@ new_segment: goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); - linear = select_size(first_skb, zc); - skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation, + skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, first_skb); if (!skb) goto wait_for_memory; @@ -2552,6 +2541,13 @@ void tcp_write_queue_purge(struct sock *sk) sk_wmem_free_skb(sk, skb); } tcp_rtx_queue_purge(sk); + skb = sk->sk_tx_skb_cache; + if (skb) { + sk->sk_wmem_queued -= skb->truesize; + sk_mem_uncharge(sk, skb->truesize); + __kfree_skb(skb); + sk->sk_tx_skb_cache = NULL; + } INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); sk_mem_reclaim(sk); tcp_clear_all_retrans_hints(tcp_sk(sk)); From 8b27dae5a2e89a61c46c6dbc76c040c0e6d0ed4c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 22 Mar 2019 08:56:40 -0700 Subject: [PATCH 3/3] tcp: add one skb cache for rx Often times, recvmsg() system calls and BH handling for a particular TCP socket are done on different cpus. This means the incoming skb had to be allocated on a cpu, but freed on another. This incurs a high spinlock contention in slab layer for small rpc, but also a high number of cache line ping pongs for larger packets. A full size GRO packet might use 45 page fragments, meaning that up to 45 put_page() can be involved. More over performing the __kfree_skb() in the recvmsg() context adds a latency for user applications, and increase probability of trapping them in backlog processing, since the BH handler might found the socket owned by the user. This patch, combined with the prior one increases the rpc performance by about 10 % on servers with large number of cores. (tcp_rr workload with 10,000 flows and 112 threads reach 9 Mpps instead of 8 Mpps) This also increases single bulk flow performance on 40Gbit+ links, since in this case there are often two cpus working in tandem : - CPU handling the NIC rx interrupts, feeding the receive queue, and (after this patch) freeing the skbs that were consumed. - CPU in recvmsg() system call, essentially 100 % busy copying out data to user space. Having at most one skb in a per-socket cache has very little risk of memory exhaustion, and since it is protected by socket lock, its management is essentially free. Note that if rps/rfs is used, we do not enable this feature, because there is high chance that the same cpu is handling both the recvmsg() system call and the TCP rx path, but that another cpu did the skb allocations in the device driver right before the RPS/RFS logic. To properly handle this case, it seems we would need to record on which cpu skb was allocated, and use a different channel to give skbs back to this cpu. Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/sock.h | 10 ++++++++++ net/ipv4/af_inet.c | 4 ++++ net/ipv4/tcp.c | 4 ++++ net/ipv4/tcp_ipv4.c | 11 +++++++++-- net/ipv6/tcp_ipv6.c | 12 +++++++++--- 5 files changed, 36 insertions(+), 5 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 314c47a8f5d1..577d91fb5626 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -368,6 +368,7 @@ struct sock { atomic_t sk_drops; int sk_rcvlowat; struct sk_buff_head sk_error_queue; + struct sk_buff *sk_rx_skb_cache; struct sk_buff_head sk_receive_queue; /* * The backlog queue is special, it is always used with @@ -2438,6 +2439,15 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); + if ( +#ifdef CONFIG_RPS + !static_branch_unlikely(&rps_needed) && +#endif + !sk->sk_rx_skb_cache) { + sk->sk_rx_skb_cache = skb; + skb_orphan(skb); + return; + } __kfree_skb(skb); } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index eab3ebde981e..7f3a984ad618 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -136,6 +136,10 @@ void inet_sock_destruct(struct sock *sk) struct inet_sock *inet = inet_sk(sk); __skb_queue_purge(&sk->sk_receive_queue); + if (sk->sk_rx_skb_cache) { + __kfree_skb(sk->sk_rx_skb_cache); + sk->sk_rx_skb_cache = NULL; + } __skb_queue_purge(&sk->sk_error_queue); sk_mem_reclaim(sk); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f0b5a5999145..29b94edf05f9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2583,6 +2583,10 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); + if (sk->sk_rx_skb_cache) { + __kfree_skb(sk->sk_rx_skb_cache); + sk->sk_rx_skb_cache = NULL; + } tp->copied_seq = tp->rcv_nxt; tp->urg_data = 0; tcp_write_queue_purge(sk); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 277d71239d75..3979939804b7 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1774,6 +1774,7 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, int tcp_v4_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); + struct sk_buff *skb_to_free; int sdif = inet_sdif(skb); const struct iphdr *iph; const struct tcphdr *th; @@ -1905,11 +1906,17 @@ process: tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { + skb_to_free = sk->sk_rx_skb_cache; + sk->sk_rx_skb_cache = NULL; ret = tcp_v4_do_rcv(sk, skb); - } else if (tcp_add_backlog(sk, skb)) { - goto discard_and_relse; + } else { + if (tcp_add_backlog(sk, skb)) + goto discard_and_relse; + skb_to_free = NULL; } bh_unlock_sock(sk); + if (skb_to_free) + __kfree_skb(skb_to_free); put_and_return: if (refcounted) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 983ad7a75102..77d723bbe050 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1436,6 +1436,7 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, static int tcp_v6_rcv(struct sk_buff *skb) { + struct sk_buff *skb_to_free; int sdif = inet6_sdif(skb); const struct tcphdr *th; const struct ipv6hdr *hdr; @@ -1562,12 +1563,17 @@ process: tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { + skb_to_free = sk->sk_rx_skb_cache; + sk->sk_rx_skb_cache = NULL; ret = tcp_v6_do_rcv(sk, skb); - } else if (tcp_add_backlog(sk, skb)) { - goto discard_and_relse; + } else { + if (tcp_add_backlog(sk, skb)) + goto discard_and_relse; + skb_to_free = NULL; } bh_unlock_sock(sk); - + if (skb_to_free) + __kfree_skb(skb_to_free); put_and_return: if (refcounted) sock_put(sk);