net: openvswitch: optimize flow mask cache hash collision
Port the codes to linux upstream and with little changes.
Pravin B Shelar, says:
| In case hash collision on mask cache, OVS does extra flow
| lookup. Following patch avoid it.
Link: 0e6efbe271
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Tested-by: Greg Rose <gvrose8192@gmail.com>
Signed-off-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1689754de6
commit
a7f35e78e7
@ -508,6 +508,9 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Flow lookup does full lookup on flow table. It starts with
|
||||||
|
* mask from index passed in *index.
|
||||||
|
*/
|
||||||
static struct sw_flow *flow_lookup(struct flow_table *tbl,
|
static struct sw_flow *flow_lookup(struct flow_table *tbl,
|
||||||
struct table_instance *ti,
|
struct table_instance *ti,
|
||||||
struct mask_array *ma,
|
struct mask_array *ma,
|
||||||
@ -515,19 +518,32 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
|
|||||||
u32 *n_mask_hit,
|
u32 *n_mask_hit,
|
||||||
u32 *index)
|
u32 *index)
|
||||||
{
|
{
|
||||||
|
struct sw_flow_mask *mask;
|
||||||
struct sw_flow *flow;
|
struct sw_flow *flow;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ma->max; i++) {
|
if (*index < ma->max) {
|
||||||
struct sw_flow_mask *mask;
|
mask = rcu_dereference_ovsl(ma->masks[*index]);
|
||||||
|
|
||||||
mask = rcu_dereference_ovsl(ma->masks[i]);
|
|
||||||
if (mask) {
|
if (mask) {
|
||||||
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
|
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
|
||||||
if (flow) { /* Found */
|
if (flow)
|
||||||
*index = i;
|
|
||||||
return flow;
|
return flow;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < ma->max; i++) {
|
||||||
|
|
||||||
|
if (i == *index)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
mask = rcu_dereference_ovsl(ma->masks[i]);
|
||||||
|
if (!mask)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
|
||||||
|
if (flow) { /* Found */
|
||||||
|
*index = i;
|
||||||
|
return flow;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -546,58 +562,54 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
|
|||||||
u32 skb_hash,
|
u32 skb_hash,
|
||||||
u32 *n_mask_hit)
|
u32 *n_mask_hit)
|
||||||
{
|
{
|
||||||
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
|
struct mask_array *ma = rcu_dereference(tbl->mask_array);
|
||||||
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
|
struct table_instance *ti = rcu_dereference(tbl->ti);
|
||||||
struct mask_cache_entry *entries, *ce, *del;
|
struct mask_cache_entry *entries, *ce;
|
||||||
struct sw_flow *flow;
|
struct sw_flow *flow;
|
||||||
u32 hash = skb_hash;
|
u32 hash;
|
||||||
int seg;
|
int seg;
|
||||||
|
|
||||||
*n_mask_hit = 0;
|
*n_mask_hit = 0;
|
||||||
if (unlikely(!skb_hash)) {
|
if (unlikely(!skb_hash)) {
|
||||||
u32 __always_unused mask_index;
|
u32 mask_index = 0;
|
||||||
|
|
||||||
return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
|
return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
del = NULL;
|
/* Pre and post recirulation flows usually have the same skb_hash
|
||||||
|
* value. To avoid hash collisions, rehash the 'skb_hash' with
|
||||||
|
* 'recirc_id'. */
|
||||||
|
if (key->recirc_id)
|
||||||
|
skb_hash = jhash_1word(skb_hash, key->recirc_id);
|
||||||
|
|
||||||
|
ce = NULL;
|
||||||
|
hash = skb_hash;
|
||||||
entries = this_cpu_ptr(tbl->mask_cache);
|
entries = this_cpu_ptr(tbl->mask_cache);
|
||||||
|
|
||||||
|
/* Find the cache entry 'ce' to operate on. */
|
||||||
for (seg = 0; seg < MC_HASH_SEGS; seg++) {
|
for (seg = 0; seg < MC_HASH_SEGS; seg++) {
|
||||||
int index;
|
int index = hash & (MC_HASH_ENTRIES - 1);
|
||||||
|
struct mask_cache_entry *e;
|
||||||
|
|
||||||
index = hash & (MC_HASH_ENTRIES - 1);
|
e = &entries[index];
|
||||||
ce = &entries[index];
|
if (e->skb_hash == skb_hash) {
|
||||||
|
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
|
||||||
if (ce->skb_hash == skb_hash) {
|
&e->mask_index);
|
||||||
struct sw_flow_mask *mask;
|
if (!flow)
|
||||||
struct sw_flow *flow;
|
e->skb_hash = 0;
|
||||||
|
return flow;
|
||||||
mask = rcu_dereference_ovsl(ma->masks[ce->mask_index]);
|
|
||||||
if (mask) {
|
|
||||||
flow = masked_flow_lookup(ti, key, mask,
|
|
||||||
n_mask_hit);
|
|
||||||
if (flow) /* Found */
|
|
||||||
return flow;
|
|
||||||
}
|
|
||||||
|
|
||||||
del = ce;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!del || (del->skb_hash && !ce->skb_hash) ||
|
if (!ce || e->skb_hash < ce->skb_hash)
|
||||||
(rcu_dereference_ovsl(ma->masks[del->mask_index]) &&
|
ce = e; /* A better replacement cache candidate. */
|
||||||
!rcu_dereference_ovsl(ma->masks[ce->mask_index]))) {
|
|
||||||
del = ce;
|
|
||||||
}
|
|
||||||
|
|
||||||
hash >>= MC_HASH_SHIFT;
|
hash >>= MC_HASH_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &del->mask_index);
|
/* Cache miss, do full lookup. */
|
||||||
|
flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index);
|
||||||
if (flow)
|
if (flow)
|
||||||
del->skb_hash = skb_hash;
|
ce->skb_hash = skb_hash;
|
||||||
|
|
||||||
return flow;
|
return flow;
|
||||||
}
|
}
|
||||||
@ -607,9 +619,8 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
|
|||||||
{
|
{
|
||||||
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
|
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
|
||||||
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
|
struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
|
||||||
|
|
||||||
u32 __always_unused n_mask_hit;
|
u32 __always_unused n_mask_hit;
|
||||||
u32 __always_unused index;
|
u32 index = 0;
|
||||||
|
|
||||||
return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
|
return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user