net: openvswitch: optimize flow mask cache hash collision

Port the codes to linux upstream and with little changes.

Pravin B Shelar, says:
| In case hash collision on mask cache, OVS does extra flow
| lookup. Following patch avoid it.

Link: 0e6efbe271
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Tested-by: Greg Rose <gvrose8192@gmail.com>
Signed-off-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Tonghao Zhang 2019-11-01 22:23:48 +08:00 committed by David S. Miller
parent 1689754de6
commit a7f35e78e7

View File

@ -508,6 +508,9 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
return NULL;
}
/* Flow lookup does full lookup on flow table. It starts with
* mask from index passed in *index.
*/
static struct sw_flow *flow_lookup(struct flow_table *tbl,
struct table_instance *ti,
struct mask_array *ma,
@ -515,21 +518,34 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
u32 *n_mask_hit,
u32 *index)
{
struct sw_flow_mask *mask;
struct sw_flow *flow;
int i;
if (*index < ma->max) {
mask = rcu_dereference_ovsl(ma->masks[*index]);
if (mask) {
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow)
return flow;
}
}
for (i = 0; i < ma->max; i++) {
struct sw_flow_mask *mask;
if (i == *index)
continue;
mask = rcu_dereference_ovsl(ma->masks[i]);
if (mask) {
if (!mask)
continue;
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
if (flow) { /* Found */
*index = i;
return flow;
}
}
}
return NULL;
}
@ -546,58 +562,54 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
u32 skb_hash,
u32 *n_mask_hit)
{
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct mask_cache_entry *entries, *ce, *del;
struct mask_array *ma = rcu_dereference(tbl->mask_array);
struct table_instance *ti = rcu_dereference(tbl->ti);
struct mask_cache_entry *entries, *ce;
struct sw_flow *flow;
u32 hash = skb_hash;
u32 hash;
int seg;
*n_mask_hit = 0;
if (unlikely(!skb_hash)) {
u32 __always_unused mask_index;
u32 mask_index = 0;
return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
}
del = NULL;
/* Pre and post recirulation flows usually have the same skb_hash
* value. To avoid hash collisions, rehash the 'skb_hash' with
* 'recirc_id'. */
if (key->recirc_id)
skb_hash = jhash_1word(skb_hash, key->recirc_id);
ce = NULL;
hash = skb_hash;
entries = this_cpu_ptr(tbl->mask_cache);
/* Find the cache entry 'ce' to operate on. */
for (seg = 0; seg < MC_HASH_SEGS; seg++) {
int index;
int index = hash & (MC_HASH_ENTRIES - 1);
struct mask_cache_entry *e;
index = hash & (MC_HASH_ENTRIES - 1);
ce = &entries[index];
if (ce->skb_hash == skb_hash) {
struct sw_flow_mask *mask;
struct sw_flow *flow;
mask = rcu_dereference_ovsl(ma->masks[ce->mask_index]);
if (mask) {
flow = masked_flow_lookup(ti, key, mask,
n_mask_hit);
if (flow) /* Found */
e = &entries[index];
if (e->skb_hash == skb_hash) {
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
&e->mask_index);
if (!flow)
e->skb_hash = 0;
return flow;
}
del = ce;
break;
}
if (!del || (del->skb_hash && !ce->skb_hash) ||
(rcu_dereference_ovsl(ma->masks[del->mask_index]) &&
!rcu_dereference_ovsl(ma->masks[ce->mask_index]))) {
del = ce;
}
if (!ce || e->skb_hash < ce->skb_hash)
ce = e; /* A better replacement cache candidate. */
hash >>= MC_HASH_SHIFT;
}
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &del->mask_index);
/* Cache miss, do full lookup. */
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
if (flow)
del->skb_hash = skb_hash;
ce->skb_hash = skb_hash;
return flow;
}
@ -607,9 +619,8 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
{
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
u32 __always_unused n_mask_hit;
u32 __always_unused index;
u32 index = 0;
return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
}