2019-05-29 17:12:43 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2013-10-04 05:16:47 +04:00
/*
* Copyright ( c ) 2007 - 2013 Nicira , Inc .
*/
# ifndef FLOW_TABLE_H
# define FLOW_TABLE_H 1
# include <linux/kernel.h>
# include <linux/netlink.h>
# include <linux/openvswitch.h>
# include <linux/spinlock.h>
# include <linux/types.h>
# include <linux/rcupdate.h>
# include <linux/if_ether.h>
# include <linux/in6.h>
# include <linux/jiffies.h>
# include <linux/time.h>
# include <net/inet_ecn.h>
# include <net/ip_tunnels.h>
# include "flow.h"
2019-11-01 17:23:45 +03:00
struct mask_cache_entry {
u32 skb_hash ;
u32 mask_index ;
} ;
2020-07-31 15:21:34 +03:00
struct mask_cache {
struct rcu_head rcu ;
u32 cache_size ; /* Must be ^2 value. */
struct mask_cache_entry __percpu * mask_cache ;
} ;
2020-07-15 15:09:28 +03:00
struct mask_count {
int index ;
u64 counter ;
} ;
2020-10-17 21:24:51 +03:00
struct mask_array_stats {
struct u64_stats_sync syncp ;
u64 usage_cntrs [ ] ;
} ;
2019-11-01 17:23:46 +03:00
struct mask_array {
struct rcu_head rcu ;
int count , max ;
2020-10-17 21:24:51 +03:00
struct mask_array_stats __percpu * masks_usage_stats ;
2020-07-15 15:09:28 +03:00
u64 * masks_usage_zero_cntr ;
2023-10-14 09:34:53 +03:00
struct sw_flow_mask __rcu * masks [ ] __counted_by ( max ) ;
2019-11-01 17:23:46 +03:00
} ;
2013-10-04 11:14:23 +04:00
struct table_instance {
2019-03-12 09:31:02 +03:00
struct hlist_head * buckets ;
2013-10-04 11:14:23 +04:00
unsigned int n_buckets ;
2013-10-04 05:16:47 +04:00
struct rcu_head rcu ;
int node_ver ;
u32 hash_seed ;
} ;
2013-10-04 11:14:23 +04:00
struct flow_table {
struct table_instance __rcu * ti ;
2015-01-22 03:42:52 +03:00
struct table_instance __rcu * ufid_ti ;
2020-07-31 15:21:34 +03:00
struct mask_cache __rcu * mask_cache ;
2019-11-01 17:23:46 +03:00
struct mask_array __rcu * mask_array ;
2013-10-04 11:14:23 +04:00
unsigned long last_rehash ;
unsigned int count ;
2015-01-22 03:42:52 +03:00
unsigned int ufid_count ;
2013-10-04 11:14:23 +04:00
} ;
openvswitch: Per NUMA node flow stats.
Keep kernel flow stats for each NUMA node rather than each (logical)
CPU. This avoids using the per-CPU allocator and removes most of the
kernel-side OVS locking overhead otherwise on the top of perf reports
and allows OVS to scale better with higher number of threads.
With 9 handlers and 4 revalidators netperf TCP_CRR test flow setup
rate doubles on a server with two hyper-threaded physical CPUs (16
logical cores each) compared to the current OVS master. Tested with
non-trivial flow table with a TCP port match rule forcing all new
connections with unique port numbers to OVS userspace. The IP
addresses are still wildcarded, so the kernel flows are not considered
as exact match 5-tuple flows. This type of flows can be expected to
appear in large numbers as the result of more effective wildcarding
made possible by improvements in OVS userspace flow classifier.
Perf results for this test (master):
Events: 305K cycles
+ 8.43% ovs-vswitchd [kernel.kallsyms] [k] mutex_spin_on_owner
+ 5.64% ovs-vswitchd [kernel.kallsyms] [k] __ticket_spin_lock
+ 4.75% ovs-vswitchd ovs-vswitchd [.] find_match_wc
+ 3.32% ovs-vswitchd libpthread-2.15.so [.] pthread_mutex_lock
+ 2.61% ovs-vswitchd [kernel.kallsyms] [k] pcpu_alloc_area
+ 2.19% ovs-vswitchd ovs-vswitchd [.] flow_hash_in_minimask_range
+ 2.03% swapper [kernel.kallsyms] [k] intel_idle
+ 1.84% ovs-vswitchd libpthread-2.15.so [.] pthread_mutex_unlock
+ 1.64% ovs-vswitchd ovs-vswitchd [.] classifier_lookup
+ 1.58% ovs-vswitchd libc-2.15.so [.] 0x7f4e6
+ 1.07% ovs-vswitchd [kernel.kallsyms] [k] memset
+ 1.03% netperf [kernel.kallsyms] [k] __ticket_spin_lock
+ 0.92% swapper [kernel.kallsyms] [k] __ticket_spin_lock
...
And after this patch:
Events: 356K cycles
+ 6.85% ovs-vswitchd ovs-vswitchd [.] find_match_wc
+ 4.63% ovs-vswitchd libpthread-2.15.so [.] pthread_mutex_lock
+ 3.06% ovs-vswitchd [kernel.kallsyms] [k] __ticket_spin_lock
+ 2.81% ovs-vswitchd ovs-vswitchd [.] flow_hash_in_minimask_range
+ 2.51% ovs-vswitchd libpthread-2.15.so [.] pthread_mutex_unlock
+ 2.27% ovs-vswitchd ovs-vswitchd [.] classifier_lookup
+ 1.84% ovs-vswitchd libc-2.15.so [.] 0x15d30f
+ 1.74% ovs-vswitchd [kernel.kallsyms] [k] mutex_spin_on_owner
+ 1.47% swapper [kernel.kallsyms] [k] intel_idle
+ 1.34% ovs-vswitchd ovs-vswitchd [.] flow_hash_in_minimask
+ 1.33% ovs-vswitchd ovs-vswitchd [.] rule_actions_unref
+ 1.16% ovs-vswitchd ovs-vswitchd [.] hindex_node_with_hash
+ 1.16% ovs-vswitchd ovs-vswitchd [.] do_xlate_actions
+ 1.09% ovs-vswitchd ovs-vswitchd [.] ofproto_rule_ref
+ 1.01% netperf [kernel.kallsyms] [k] __ticket_spin_lock
...
There is a small increase in kernel spinlock overhead due to the same
spinlock being shared between multiple cores of the same physical CPU,
but that is barely visible in the netperf TCP_CRR test performance
(maybe ~1% performance drop, hard to tell exactly due to variance in
the test results), when testing for kernel module throughput (with no
userspace activity, handful of kernel flows).
On flow setup, a single stats instance is allocated (for the NUMA node
0). As CPUs from multiple NUMA nodes start updating stats, new
NUMA-node specific stats instances are allocated. This allocation on
the packet processing code path is made to never block or look for
emergency memory pools, minimizing the allocation latency. If the
allocation fails, the existing preallocated stats instance is used.
Also, if only CPUs from one NUMA-node are updating the preallocated
stats instance, no additional stats instances are allocated. This
eliminates the need to pre-allocate stats instances that will not be
used, also relieving the stats reader from the burden of reading stats
that are never used.
Signed-off-by: Jarno Rajahalme <jrajahalme@nicira.com>
Acked-by: Pravin B Shelar <pshelar@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
2014-03-27 23:42:54 +04:00
extern struct kmem_cache * flow_stats_cache ;
2013-10-04 05:16:47 +04:00
int ovs_flow_init ( void ) ;
void ovs_flow_exit ( void ) ;
2014-03-27 23:35:23 +04:00
struct sw_flow * ovs_flow_alloc ( void ) ;
2013-10-04 05:16:47 +04:00
void ovs_flow_free ( struct sw_flow * , bool deferred ) ;
2013-10-04 11:14:23 +04:00
int ovs_flow_tbl_init ( struct flow_table * ) ;
2014-11-06 17:58:52 +03:00
int ovs_flow_tbl_count ( const struct flow_table * table ) ;
2014-05-07 05:41:20 +04:00
void ovs_flow_tbl_destroy ( struct flow_table * table ) ;
2013-10-04 11:14:23 +04:00
int ovs_flow_tbl_flush ( struct flow_table * flow_table ) ;
2013-10-04 05:16:47 +04:00
2013-10-04 11:17:42 +04:00
int ovs_flow_tbl_insert ( struct flow_table * table , struct sw_flow * flow ,
2014-11-06 17:58:52 +03:00
const struct sw_flow_mask * mask ) ;
2013-10-04 05:16:47 +04:00
void ovs_flow_tbl_remove ( struct flow_table * table , struct sw_flow * flow ) ;
2013-10-22 21:42:46 +04:00
int ovs_flow_tbl_num_masks ( const struct flow_table * table ) ;
2020-07-31 15:21:34 +03:00
u32 ovs_flow_tbl_masks_cache_size ( const struct flow_table * table ) ;
int ovs_flow_tbl_masks_cache_resize ( struct flow_table * table , u32 size ) ;
2013-10-04 11:14:23 +04:00
struct sw_flow * ovs_flow_tbl_dump_next ( struct table_instance * table ,
2013-10-04 05:16:47 +04:00
u32 * bucket , u32 * idx ) ;
2013-11-25 22:42:46 +04:00
struct sw_flow * ovs_flow_tbl_lookup_stats ( struct flow_table * ,
2019-11-01 17:23:45 +03:00
const struct sw_flow_key * ,
u32 skb_hash ,
2020-07-31 15:20:56 +03:00
u32 * n_mask_hit ,
u32 * n_cache_hit ) ;
2013-11-25 22:42:46 +04:00
struct sw_flow * ovs_flow_tbl_lookup ( struct flow_table * ,
const struct sw_flow_key * ) ;
2014-07-01 07:30:29 +04:00
struct sw_flow * ovs_flow_tbl_lookup_exact ( struct flow_table * tbl ,
2014-11-06 17:58:52 +03:00
const struct sw_flow_match * match ) ;
2015-01-22 03:42:52 +03:00
struct sw_flow * ovs_flow_tbl_lookup_ufid ( struct flow_table * ,
const struct sw_flow_id * ) ;
bool ovs_flow_cmp ( const struct sw_flow * , const struct sw_flow_match * ) ;
2013-10-04 05:16:47 +04:00
void ovs_flow_mask_key ( struct sw_flow_key * dst , const struct sw_flow_key * src ,
2015-09-22 06:21:20 +03:00
bool full , const struct sw_flow_mask * mask ) ;
2020-07-15 15:09:28 +03:00
void ovs_flow_masks_rebalance ( struct flow_table * table ) ;
2020-08-12 12:56:39 +03:00
void table_instance_flow_flush ( struct flow_table * table ,
struct table_instance * ti ,
struct table_instance * ufid_ti ) ;
2020-07-15 15:09:28 +03:00
2013-10-04 05:16:47 +04:00
# endif /* flow_table.h */