net: sched: cls_flower: implement offload tcf_proto_op

Add the reoffload tcf_proto_op in flower to generate an offload message
for each filter in the given tcf_proto. Call the specified callback with
this new offload message. The function only returns an error if the
callback rejects adding a 'hardware only' rule.

A filter contains a flag to indicate if it is in hardware or not. To
ensure the reoffload function properly maintains this flag, keep a
reference counter for the number of instances of the filter that are in
hardware. Only update the flag when this counter changes from or to 0. Add
a generic helper function to implement this behaviour.

Signed-off-by: John Hurley <john.hurley@netronome.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
John Hurley 2018-06-25 14:30:06 -07:00 committed by David S. Miller
parent e56185c78b
commit 31533cba43
2 changed files with 59 additions and 0 deletions

View File

@ -336,6 +336,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
block->offloadcnt--; block->offloadcnt--;
} }
static inline void
tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
u32 *flags, bool add)
{
if (add) {
if (!*cnt)
tcf_block_offload_inc(block, flags);
(*cnt)++;
} else {
(*cnt)--;
if (!*cnt)
tcf_block_offload_dec(block, flags);
}
}
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{ {
struct qdisc_skb_cb *qcb; struct qdisc_skb_cb *qcb;

View File

@ -87,6 +87,7 @@ struct cls_fl_filter {
struct list_head list; struct list_head list;
u32 handle; u32 handle;
u32 flags; u32 flags;
unsigned int in_hw_count;
struct rcu_work rwork; struct rcu_work rwork;
struct net_device *hw_dev; struct net_device *hw_dev;
}; };
@ -289,6 +290,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
fl_hw_destroy_filter(tp, f, NULL); fl_hw_destroy_filter(tp, f, NULL);
return err; return err;
} else if (err > 0) { } else if (err > 0) {
f->in_hw_count = err;
tcf_block_offload_inc(block, &f->flags); tcf_block_offload_inc(block, &f->flags);
} }
@ -1087,6 +1089,47 @@ skip:
} }
} }
static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
void *cb_priv, struct netlink_ext_ack *extack)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
struct fl_flow_mask *mask;
struct cls_fl_filter *f;
int err;
list_for_each_entry(mask, &head->masks, list) {
list_for_each_entry(f, &mask->filters, list) {
if (tc_skip_hw(f->flags))
continue;
tc_cls_common_offload_init(&cls_flower.common, tp,
f->flags, extack);
cls_flower.command = add ?
TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long)f;
cls_flower.dissector = &mask->dissector;
cls_flower.mask = &f->mkey;
cls_flower.key = &f->key;
cls_flower.exts = &f->exts;
cls_flower.classid = f->res.classid;
err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
if (err) {
if (add && tc_skip_sw(f->flags))
return err;
continue;
}
tc_cls_offload_cnt_update(block, &f->in_hw_count,
&f->flags, add);
}
}
return 0;
}
static int fl_dump_key_val(struct sk_buff *skb, static int fl_dump_key_val(struct sk_buff *skb,
void *val, int val_type, void *val, int val_type,
void *mask, int mask_type, int len) void *mask, int mask_type, int len)
@ -1438,6 +1481,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.change = fl_change, .change = fl_change,
.delete = fl_delete, .delete = fl_delete,
.walk = fl_walk, .walk = fl_walk,
.reoffload = fl_reoffload,
.dump = fl_dump, .dump = fl_dump,
.bind_class = fl_bind_class, .bind_class = fl_bind_class,
.owner = THIS_MODULE, .owner = THIS_MODULE,