From 242453c227d14751fed0a8809a58f1bf3c7d837a Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 7 Aug 2019 09:13:49 +0800 Subject: [PATCH 1/6] cls_api: modify the tc_indr_block_ing_cmd parameters. This patch make tc_indr_block_ing_cmd can't access struct tc_indr_block_dev and tc_indr_block_cb. Signed-off-by: wenxu Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/sched/cls_api.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 9d85d3295c7c..1dd210d353b7 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -677,26 +677,28 @@ static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb) static int tcf_block_setup(struct tcf_block *block, struct flow_block_offload *bo); -static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev, - struct tc_indr_block_cb *indr_block_cb, +static void tc_indr_block_ing_cmd(struct net_device *dev, + struct tcf_block *block, + tc_indr_block_bind_cb_t *cb, + void *cb_priv, enum flow_block_command command) { struct flow_block_offload bo = { .command = command, .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, - .net = dev_net(indr_dev->dev), - .block_shared = tcf_block_non_null_shared(indr_dev->block), + .net = dev_net(dev), + .block_shared = tcf_block_non_null_shared(block), }; INIT_LIST_HEAD(&bo.cb_list); - if (!indr_dev->block) + if (!block) return; - bo.block = &indr_dev->block->flow_block; + bo.block = &block->flow_block; - indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, - &bo); - tcf_block_setup(indr_dev->block, &bo); + cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); + + tcf_block_setup(block, &bo); } int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, @@ -715,7 +717,8 @@ int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, if (err) goto err_dev_put; - tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_BIND); + tc_indr_block_ing_cmd(dev, indr_dev->block, cb, cb_priv, + FLOW_BLOCK_BIND); return 0; err_dev_put: @@ -752,7 +755,8 @@ void __tc_indr_block_cb_unregister(struct net_device *dev, return; /* Send unbind message if required to free any block cbs. */ - tc_indr_block_ing_cmd(indr_dev, indr_block_cb, FLOW_BLOCK_UNBIND); + tc_indr_block_ing_cmd(dev, indr_dev->block, cb, indr_block_cb->cb_priv, + FLOW_BLOCK_UNBIND); tc_indr_block_cb_del(indr_block_cb); tc_indr_block_dev_put(indr_dev); } From f8436988574e50fe69f6f193ee76c3185b17a5c7 Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 7 Aug 2019 09:13:50 +0800 Subject: [PATCH 2/6] cls_api: remove the tcf_block cache Remove the tcf_block in the tc_indr_block_dev for muti-subsystem support. Signed-off-by: wenxu Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/sched/cls_api.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 1dd210d353b7..12eaa6c90e79 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -574,7 +574,6 @@ struct tc_indr_block_dev { struct net_device *dev; unsigned int refcnt; struct list_head cb_list; - struct tcf_block *block; }; struct tc_indr_block_cb { @@ -611,7 +610,6 @@ static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev) INIT_LIST_HEAD(&indr_dev->cb_list); indr_dev->dev = dev; - indr_dev->block = tc_dev_ingress_block(dev); if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node, tc_indr_setup_block_ht_params)) { kfree(indr_dev); @@ -706,6 +704,7 @@ int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, { struct tc_indr_block_cb *indr_block_cb; struct tc_indr_block_dev *indr_dev; + struct tcf_block *block; int err; indr_dev = tc_indr_block_dev_get(dev); @@ -717,8 +716,9 @@ int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, if (err) goto err_dev_put; - tc_indr_block_ing_cmd(dev, indr_dev->block, cb, cb_priv, - FLOW_BLOCK_BIND); + block = tc_dev_ingress_block(dev); + tc_indr_block_ing_cmd(dev, block, indr_block_cb->cb, + indr_block_cb->cb_priv, FLOW_BLOCK_BIND); return 0; err_dev_put: @@ -745,6 +745,7 @@ void __tc_indr_block_cb_unregister(struct net_device *dev, { struct tc_indr_block_cb *indr_block_cb; struct tc_indr_block_dev *indr_dev; + struct tcf_block *block; indr_dev = tc_indr_block_dev_lookup(dev); if (!indr_dev) @@ -755,8 +756,9 @@ void __tc_indr_block_cb_unregister(struct net_device *dev, return; /* Send unbind message if required to free any block cbs. */ - tc_indr_block_ing_cmd(dev, indr_dev->block, cb, indr_block_cb->cb_priv, - FLOW_BLOCK_UNBIND); + block = tc_dev_ingress_block(dev); + tc_indr_block_ing_cmd(dev, block, indr_block_cb->cb, + indr_block_cb->cb_priv, FLOW_BLOCK_UNBIND); tc_indr_block_cb_del(indr_block_cb); tc_indr_block_dev_put(indr_dev); } @@ -792,8 +794,6 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, if (!indr_dev) return; - indr_dev->block = command == FLOW_BLOCK_BIND ? block : NULL; - list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, &bo); From e4da91021153b78b9f2972212610bc71263925f4 Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 7 Aug 2019 09:13:51 +0800 Subject: [PATCH 3/6] cls_api: add flow_indr_block_call function This patch make indr_block_call don't access struct tc_indr_block_cb and tc_indr_block_dev directly Signed-off-by: wenxu Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/sched/cls_api.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 12eaa6c90e79..7c34fc6851c3 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -773,13 +773,27 @@ void tc_indr_block_cb_unregister(struct net_device *dev, } EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister); +static void flow_indr_block_call(struct net_device *dev, + struct flow_block_offload *bo, + enum flow_block_command command) +{ + struct tc_indr_block_cb *indr_block_cb; + struct tc_indr_block_dev *indr_dev; + + indr_dev = tc_indr_block_dev_lookup(dev); + if (!indr_dev) + return; + + list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) + indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, + bo); +} + static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, struct tcf_block_ext_info *ei, enum flow_block_command command, struct netlink_ext_ack *extack) { - struct tc_indr_block_cb *indr_block_cb; - struct tc_indr_block_dev *indr_dev; struct flow_block_offload bo = { .command = command, .binder_type = ei->binder_type, @@ -790,14 +804,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, }; INIT_LIST_HEAD(&bo.cb_list); - indr_dev = tc_indr_block_dev_lookup(dev); - if (!indr_dev) - return; - - list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) - indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, - &bo); - + flow_indr_block_call(dev, &bo, command); tcf_block_setup(block, &bo); } From 4e481908c51bf02457aecdedc2d80e1be22e0146 Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 7 Aug 2019 09:13:52 +0800 Subject: [PATCH 4/6] flow_offload: move tc indirect block to flow offload move tc indirect block to flow_offload and rename it to flow indirect block.The nf_tables can use the indr block architecture. Signed-off-by: wenxu Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_rep.c | 10 +- .../ethernet/netronome/nfp/flower/offload.c | 11 +- include/net/flow_offload.h | 29 ++ include/net/pkt_cls.h | 35 --- include/net/sch_generic.h | 3 - net/core/flow_offload.c | 215 ++++++++++++++ net/sched/cls_api.c | 266 +++--------------- 7 files changed, 293 insertions(+), 276 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index fbb9de633578..b7f113e996e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -781,9 +781,9 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, { int err; - err = __tc_indr_block_cb_register(netdev, rpriv, - mlx5e_rep_indr_setup_tc_cb, - rpriv); + err = __flow_indr_block_cb_register(netdev, rpriv, + mlx5e_rep_indr_setup_tc_cb, + rpriv); if (err) { struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); @@ -796,8 +796,8 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv, struct net_device *netdev) { - __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, - rpriv); + __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, + rpriv); } static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index ff8a9f1a38f8..3a4f4f042ae7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1649,16 +1649,17 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app, return NOTIFY_OK; if (event == NETDEV_REGISTER) { - err = __tc_indr_block_cb_register(netdev, app, - nfp_flower_indr_setup_tc_cb, - app); + err = __flow_indr_block_cb_register(netdev, app, + nfp_flower_indr_setup_tc_cb, + app); if (err) nfp_flower_cmsg_warn(app, "Indirect block reg failed - %s\n", netdev->name); } else if (event == NETDEV_UNREGISTER) { - __tc_indr_block_cb_unregister(netdev, - nfp_flower_indr_setup_tc_cb, app); + __flow_indr_block_cb_unregister(netdev, + nfp_flower_indr_setup_tc_cb, + app); } return NOTIFY_OK; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index d3b12bc8a114..46b8777ad05d 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -4,6 +4,7 @@ #include #include #include +#include struct flow_match { struct flow_dissector *dissector; @@ -370,4 +371,32 @@ static inline void flow_block_init(struct flow_block *flow_block) INIT_LIST_HEAD(&flow_block->cb_list); } +typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, + enum tc_setup_type type, void *type_data); + +typedef void flow_indr_block_ing_cmd_t(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command command); + +int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, + flow_indr_block_bind_cb_t *cb, + void *cb_ident); + +void __flow_indr_block_cb_unregister(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_ident); + +int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, + flow_indr_block_bind_cb_t *cb, void *cb_ident); + +void flow_indr_block_cb_unregister(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_ident); + +void flow_indr_block_call(struct net_device *dev, + flow_indr_block_ing_cmd_t *cb, + struct flow_block_offload *bo, + enum flow_block_command command); + #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index e429809ca90d..0790a4ed909c 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -70,15 +70,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) return block->q; } -int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident); -int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident); -void __tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident); -void tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident); - int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode); @@ -137,32 +128,6 @@ void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, { } -static inline -int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - return 0; -} - -static inline -int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - return 0; -} - -static inline -void __tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ -} - -static inline -void tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ -} - static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode) { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6b6b01234dd9..d9f359af0b93 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -23,9 +23,6 @@ struct tcf_walker; struct module; struct bpf_flow_keys; -typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, - enum tc_setup_type type, void *type_data); - struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index d63b970784dc..4cc18e462d50 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -2,6 +2,7 @@ #include #include #include +#include struct flow_rule *flow_rule_alloc(unsigned int num_actions) { @@ -280,3 +281,217 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, } } EXPORT_SYMBOL(flow_block_cb_setup_simple); + +static struct rhashtable indr_setup_block_ht; + +struct flow_indr_block_cb { + struct list_head list; + void *cb_priv; + flow_indr_block_bind_cb_t *cb; + void *cb_ident; +}; + +struct flow_indr_block_dev { + struct rhash_head ht_node; + struct net_device *dev; + unsigned int refcnt; + flow_indr_block_ing_cmd_t *block_ing_cmd_cb; + struct list_head cb_list; +}; + +static const struct rhashtable_params flow_indr_setup_block_ht_params = { + .key_offset = offsetof(struct flow_indr_block_dev, dev), + .head_offset = offsetof(struct flow_indr_block_dev, ht_node), + .key_len = sizeof(struct net_device *), +}; + +static struct flow_indr_block_dev * +flow_indr_block_dev_lookup(struct net_device *dev) +{ + return rhashtable_lookup_fast(&indr_setup_block_ht, &dev, + flow_indr_setup_block_ht_params); +} + +static struct flow_indr_block_dev * +flow_indr_block_dev_get(struct net_device *dev) +{ + struct flow_indr_block_dev *indr_dev; + + indr_dev = flow_indr_block_dev_lookup(dev); + if (indr_dev) + goto inc_ref; + + indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL); + if (!indr_dev) + return NULL; + + INIT_LIST_HEAD(&indr_dev->cb_list); + indr_dev->dev = dev; + if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node, + flow_indr_setup_block_ht_params)) { + kfree(indr_dev); + return NULL; + } + +inc_ref: + indr_dev->refcnt++; + return indr_dev; +} + +static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev) +{ + if (--indr_dev->refcnt) + return; + + rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node, + flow_indr_setup_block_ht_params); + kfree(indr_dev); +} + +static struct flow_indr_block_cb * +flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev, + flow_indr_block_bind_cb_t *cb, void *cb_ident) +{ + struct flow_indr_block_cb *indr_block_cb; + + list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) + if (indr_block_cb->cb == cb && + indr_block_cb->cb_ident == cb_ident) + return indr_block_cb; + return NULL; +} + +static struct flow_indr_block_cb * +flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv, + flow_indr_block_bind_cb_t *cb, void *cb_ident) +{ + struct flow_indr_block_cb *indr_block_cb; + + indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident); + if (indr_block_cb) + return ERR_PTR(-EEXIST); + + indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL); + if (!indr_block_cb) + return ERR_PTR(-ENOMEM); + + indr_block_cb->cb_priv = cb_priv; + indr_block_cb->cb = cb; + indr_block_cb->cb_ident = cb_ident; + list_add(&indr_block_cb->list, &indr_dev->cb_list); + + return indr_block_cb; +} + +static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb) +{ + list_del(&indr_block_cb->list); + kfree(indr_block_cb); +} + +int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, + flow_indr_block_bind_cb_t *cb, + void *cb_ident) +{ + struct flow_indr_block_cb *indr_block_cb; + struct flow_indr_block_dev *indr_dev; + int err; + + indr_dev = flow_indr_block_dev_get(dev); + if (!indr_dev) + return -ENOMEM; + + indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident); + err = PTR_ERR_OR_ZERO(indr_block_cb); + if (err) + goto err_dev_put; + + if (indr_dev->block_ing_cmd_cb) + indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb, + indr_block_cb->cb_priv, + FLOW_BLOCK_BIND); + + return 0; + +err_dev_put: + flow_indr_block_dev_put(indr_dev); + return err; +} +EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register); + +int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, + flow_indr_block_bind_cb_t *cb, + void *cb_ident) +{ + int err; + + rtnl_lock(); + err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident); + rtnl_unlock(); + + return err; +} +EXPORT_SYMBOL_GPL(flow_indr_block_cb_register); + +void __flow_indr_block_cb_unregister(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_ident) +{ + struct flow_indr_block_cb *indr_block_cb; + struct flow_indr_block_dev *indr_dev; + + indr_dev = flow_indr_block_dev_lookup(dev); + if (!indr_dev) + return; + + indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident); + if (!indr_block_cb) + return; + + if (indr_dev->block_ing_cmd_cb) + indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb, + indr_block_cb->cb_priv, + FLOW_BLOCK_UNBIND); + + flow_indr_block_cb_del(indr_block_cb); + flow_indr_block_dev_put(indr_dev); +} +EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister); + +void flow_indr_block_cb_unregister(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_ident) +{ + rtnl_lock(); + __flow_indr_block_cb_unregister(dev, cb, cb_ident); + rtnl_unlock(); +} +EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister); + +void flow_indr_block_call(struct net_device *dev, + flow_indr_block_ing_cmd_t cb, + struct flow_block_offload *bo, + enum flow_block_command command) +{ + struct flow_indr_block_cb *indr_block_cb; + struct flow_indr_block_dev *indr_dev; + + indr_dev = flow_indr_block_dev_lookup(dev); + if (!indr_dev) + return; + + indr_dev->block_ing_cmd_cb = command == FLOW_BLOCK_BIND + ? cb : NULL; + + list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) + indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, + bo); +} +EXPORT_SYMBOL_GPL(flow_indr_block_call); + +static int __init init_flow_indr_rhashtable(void) +{ + return rhashtable_init(&indr_setup_block_ht, + &flow_indr_setup_block_ht_params); +} +subsys_initcall(init_flow_indr_rhashtable); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 7c34fc6851c3..0b0dde26783d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -37,6 +37,7 @@ #include #include #include +#include extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; @@ -545,6 +546,33 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) } } +static int tcf_block_setup(struct tcf_block *block, + struct flow_block_offload *bo); + +static void tc_indr_block_ing_cmd(struct net_device *dev, + struct tcf_block *block, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command command) +{ + struct flow_block_offload bo = { + .command = command, + .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, + .net = dev_net(dev), + .block_shared = tcf_block_non_null_shared(block), + }; + INIT_LIST_HEAD(&bo.cb_list); + + if (!block) + return; + + bo.block = &block->flow_block; + + cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); + + tcf_block_setup(block, &bo); +} + static struct tcf_block *tc_dev_ingress_block(struct net_device *dev) { const struct Qdisc_class_ops *cops; @@ -567,229 +595,18 @@ static struct tcf_block *tc_dev_ingress_block(struct net_device *dev) return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL); } -static struct rhashtable indr_setup_block_ht; - -struct tc_indr_block_dev { - struct rhash_head ht_node; - struct net_device *dev; - unsigned int refcnt; - struct list_head cb_list; -}; - -struct tc_indr_block_cb { - struct list_head list; - void *cb_priv; - tc_indr_block_bind_cb_t *cb; - void *cb_ident; -}; - -static const struct rhashtable_params tc_indr_setup_block_ht_params = { - .key_offset = offsetof(struct tc_indr_block_dev, dev), - .head_offset = offsetof(struct tc_indr_block_dev, ht_node), - .key_len = sizeof(struct net_device *), -}; - -static struct tc_indr_block_dev * -tc_indr_block_dev_lookup(struct net_device *dev) +static void tc_indr_block_get_and_ing_cmd(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command command) { - return rhashtable_lookup_fast(&indr_setup_block_ht, &dev, - tc_indr_setup_block_ht_params); + struct tcf_block *block = tc_dev_ingress_block(dev); + + tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command); } -static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev) -{ - struct tc_indr_block_dev *indr_dev; - - indr_dev = tc_indr_block_dev_lookup(dev); - if (indr_dev) - goto inc_ref; - - indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL); - if (!indr_dev) - return NULL; - - INIT_LIST_HEAD(&indr_dev->cb_list); - indr_dev->dev = dev; - if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node, - tc_indr_setup_block_ht_params)) { - kfree(indr_dev); - return NULL; - } - -inc_ref: - indr_dev->refcnt++; - return indr_dev; -} - -static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev) -{ - if (--indr_dev->refcnt) - return; - - rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node, - tc_indr_setup_block_ht_params); - kfree(indr_dev); -} - -static struct tc_indr_block_cb * -tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - struct tc_indr_block_cb *indr_block_cb; - - list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) - if (indr_block_cb->cb == cb && - indr_block_cb->cb_ident == cb_ident) - return indr_block_cb; - return NULL; -} - -static struct tc_indr_block_cb * -tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - struct tc_indr_block_cb *indr_block_cb; - - indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident); - if (indr_block_cb) - return ERR_PTR(-EEXIST); - - indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL); - if (!indr_block_cb) - return ERR_PTR(-ENOMEM); - - indr_block_cb->cb_priv = cb_priv; - indr_block_cb->cb = cb; - indr_block_cb->cb_ident = cb_ident; - list_add(&indr_block_cb->list, &indr_dev->cb_list); - - return indr_block_cb; -} - -static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb) -{ - list_del(&indr_block_cb->list); - kfree(indr_block_cb); -} - -static int tcf_block_setup(struct tcf_block *block, - struct flow_block_offload *bo); - -static void tc_indr_block_ing_cmd(struct net_device *dev, - struct tcf_block *block, - tc_indr_block_bind_cb_t *cb, - void *cb_priv, - enum flow_block_command command) -{ - struct flow_block_offload bo = { - .command = command, - .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, - .net = dev_net(dev), - .block_shared = tcf_block_non_null_shared(block), - }; - INIT_LIST_HEAD(&bo.cb_list); - - if (!block) - return; - - bo.block = &block->flow_block; - - cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); - - tcf_block_setup(block, &bo); -} - -int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - struct tc_indr_block_cb *indr_block_cb; - struct tc_indr_block_dev *indr_dev; - struct tcf_block *block; - int err; - - indr_dev = tc_indr_block_dev_get(dev); - if (!indr_dev) - return -ENOMEM; - - indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident); - err = PTR_ERR_OR_ZERO(indr_block_cb); - if (err) - goto err_dev_put; - - block = tc_dev_ingress_block(dev); - tc_indr_block_ing_cmd(dev, block, indr_block_cb->cb, - indr_block_cb->cb_priv, FLOW_BLOCK_BIND); - return 0; - -err_dev_put: - tc_indr_block_dev_put(indr_dev); - return err; -} -EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register); - -int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - int err; - - rtnl_lock(); - err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident); - rtnl_unlock(); - - return err; -} -EXPORT_SYMBOL_GPL(tc_indr_block_cb_register); - -void __tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - struct tc_indr_block_cb *indr_block_cb; - struct tc_indr_block_dev *indr_dev; - struct tcf_block *block; - - indr_dev = tc_indr_block_dev_lookup(dev); - if (!indr_dev) - return; - - indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident); - if (!indr_block_cb) - return; - - /* Send unbind message if required to free any block cbs. */ - block = tc_dev_ingress_block(dev); - tc_indr_block_ing_cmd(dev, block, indr_block_cb->cb, - indr_block_cb->cb_priv, FLOW_BLOCK_UNBIND); - tc_indr_block_cb_del(indr_block_cb); - tc_indr_block_dev_put(indr_dev); -} -EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister); - -void tc_indr_block_cb_unregister(struct net_device *dev, - tc_indr_block_bind_cb_t *cb, void *cb_ident) -{ - rtnl_lock(); - __tc_indr_block_cb_unregister(dev, cb, cb_ident); - rtnl_unlock(); -} -EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister); - -static void flow_indr_block_call(struct net_device *dev, - struct flow_block_offload *bo, - enum flow_block_command command) -{ - struct tc_indr_block_cb *indr_block_cb; - struct tc_indr_block_dev *indr_dev; - - indr_dev = tc_indr_block_dev_lookup(dev); - if (!indr_dev) - return; - - list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) - indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, - bo); -} - -static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, +static void tc_indr_block_call(struct tcf_block *block, + struct net_device *dev, struct tcf_block_ext_info *ei, enum flow_block_command command, struct netlink_ext_ack *extack) @@ -804,7 +621,7 @@ static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, }; INIT_LIST_HEAD(&bo.cb_list); - flow_indr_block_call(dev, &bo, command); + flow_indr_block_call(dev, tc_indr_block_get_and_ing_cmd, &bo, command); tcf_block_setup(block, &bo); } @@ -3378,11 +3195,6 @@ static int __init tc_filter_init(void) if (err) goto err_register_pernet_subsys; - err = rhashtable_init(&indr_setup_block_ht, - &tc_indr_setup_block_ht_params); - if (err) - goto err_rhash_setup_block_ht; - rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, RTNL_FLAG_DOIT_UNLOCKED); rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, @@ -3396,8 +3208,6 @@ static int __init tc_filter_init(void) return 0; -err_rhash_setup_block_ht: - unregister_pernet_subsys(&tcf_net_ops); err_register_pernet_subsys: destroy_workqueue(tc_filter_wq); return err; From 1150ab0f1b333ca310431dac65d8fa403b8471da Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 7 Aug 2019 09:13:53 +0800 Subject: [PATCH 5/6] flow_offload: support get multi-subsystem block It provide a callback list to find the blocks of tc and nft subsystems Signed-off-by: wenxu Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/net/flow_offload.h | 10 +++++++- net/core/flow_offload.c | 51 ++++++++++++++++++++++++++++---------- net/sched/cls_api.c | 9 ++++++- 3 files changed, 55 insertions(+), 15 deletions(-) diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 46b8777ad05d..e8069b6c474c 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -379,6 +379,15 @@ typedef void flow_indr_block_ing_cmd_t(struct net_device *dev, void *cb_priv, enum flow_block_command command); +struct flow_indr_block_ing_entry { + flow_indr_block_ing_cmd_t *cb; + struct list_head list; +}; + +void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry); + +void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry); + int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, flow_indr_block_bind_cb_t *cb, void *cb_ident); @@ -395,7 +404,6 @@ void flow_indr_block_cb_unregister(struct net_device *dev, void *cb_ident); void flow_indr_block_call(struct net_device *dev, - flow_indr_block_ing_cmd_t *cb, struct flow_block_offload *bo, enum flow_block_command command); diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 4cc18e462d50..64c3d4d72b9c 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -3,6 +3,7 @@ #include #include #include +#include struct flow_rule *flow_rule_alloc(unsigned int num_actions) { @@ -282,6 +283,8 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f, } EXPORT_SYMBOL(flow_block_cb_setup_simple); +static LIST_HEAD(block_ing_cb_list); + static struct rhashtable indr_setup_block_ht; struct flow_indr_block_cb { @@ -295,7 +298,6 @@ struct flow_indr_block_dev { struct rhash_head ht_node; struct net_device *dev; unsigned int refcnt; - flow_indr_block_ing_cmd_t *block_ing_cmd_cb; struct list_head cb_list; }; @@ -389,6 +391,20 @@ static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb) kfree(indr_block_cb); } +static void flow_block_ing_cmd(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command command) +{ + struct flow_indr_block_ing_entry *entry; + + rcu_read_lock(); + list_for_each_entry_rcu(entry, &block_ing_cb_list, list) { + entry->cb(dev, cb, cb_priv, command); + } + rcu_read_unlock(); +} + int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, flow_indr_block_bind_cb_t *cb, void *cb_ident) @@ -406,10 +422,8 @@ int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, if (err) goto err_dev_put; - if (indr_dev->block_ing_cmd_cb) - indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb, - indr_block_cb->cb_priv, - FLOW_BLOCK_BIND); + flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, + FLOW_BLOCK_BIND); return 0; @@ -448,10 +462,8 @@ void __flow_indr_block_cb_unregister(struct net_device *dev, if (!indr_block_cb) return; - if (indr_dev->block_ing_cmd_cb) - indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb, - indr_block_cb->cb_priv, - FLOW_BLOCK_UNBIND); + flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv, + FLOW_BLOCK_UNBIND); flow_indr_block_cb_del(indr_block_cb); flow_indr_block_dev_put(indr_dev); @@ -469,7 +481,6 @@ void flow_indr_block_cb_unregister(struct net_device *dev, EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister); void flow_indr_block_call(struct net_device *dev, - flow_indr_block_ing_cmd_t cb, struct flow_block_offload *bo, enum flow_block_command command) { @@ -480,15 +491,29 @@ void flow_indr_block_call(struct net_device *dev, if (!indr_dev) return; - indr_dev->block_ing_cmd_cb = command == FLOW_BLOCK_BIND - ? cb : NULL; - list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, bo); } EXPORT_SYMBOL_GPL(flow_indr_block_call); +static DEFINE_MUTEX(flow_indr_block_ing_cb_lock); +void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry) +{ + mutex_lock(&flow_indr_block_ing_cb_lock); + list_add_tail_rcu(&entry->list, &block_ing_cb_list); + mutex_unlock(&flow_indr_block_ing_cb_lock); +} +EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb); + +void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry) +{ + mutex_lock(&flow_indr_block_ing_cb_lock); + list_del_rcu(&entry->list); + mutex_unlock(&flow_indr_block_ing_cb_lock); +} +EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb); + static int __init init_flow_indr_rhashtable(void) { return rhashtable_init(&indr_setup_block_ht, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0b0dde26783d..e0d8b456e9f5 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -621,7 +621,7 @@ static void tc_indr_block_call(struct tcf_block *block, }; INIT_LIST_HEAD(&bo.cb_list); - flow_indr_block_call(dev, tc_indr_block_get_and_ing_cmd, &bo, command); + flow_indr_block_call(dev, &bo, command); tcf_block_setup(block, &bo); } @@ -3183,6 +3183,11 @@ static struct pernet_operations tcf_net_ops = { .size = sizeof(struct tcf_net), }; +static struct flow_indr_block_ing_entry block_ing_entry = { + .cb = tc_indr_block_get_and_ing_cmd, + .list = LIST_HEAD_INIT(block_ing_entry.list), +}; + static int __init tc_filter_init(void) { int err; @@ -3195,6 +3200,8 @@ static int __init tc_filter_init(void) if (err) goto err_register_pernet_subsys; + flow_indr_add_block_ing_cb(&block_ing_entry); + rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, RTNL_FLAG_DOIT_UNLOCKED); rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, From 9a32669fecfb484a1f78fe48d0e42a5efccb0675 Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 7 Aug 2019 09:13:54 +0800 Subject: [PATCH 6/6] netfilter: nf_tables_offload: support indr block call nftable support indr-block call. It makes nftable an offload vlan and tunnel device. nft add table netdev firewall nft add chain netdev firewall aclout { type filter hook ingress offload device mlx_pf0vf0 priority - 300 \; } nft add rule netdev firewall aclout ip daddr 10.0.0.1 fwd to vlan0 nft add chain netdev firewall aclin { type filter hook ingress device vlan0 priority - 300 \; } nft add rule netdev firewall aclin ip daddr 10.0.0.7 fwd to mlx_pf0vf0 Signed-off-by: wenxu Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/net/netfilter/nf_tables_offload.h | 4 + net/netfilter/nf_tables_api.c | 7 + net/netfilter/nf_tables_offload.c | 148 ++++++++++++++++++---- 3 files changed, 135 insertions(+), 24 deletions(-) diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 3196663a10e3..bffd51a90343 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -63,6 +63,10 @@ struct nft_rule; struct nft_flow_rule *nft_flow_rule_create(const struct nft_rule *rule); void nft_flow_rule_destroy(struct nft_flow_rule *flow); int nft_flow_rule_offload_commit(struct net *net); +void nft_indr_block_get_and_ing_cmd(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command command); #define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ (__reg)->base_offset = \ diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 605a7cfe7ca7..fe3b7b0c6c66 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -7593,6 +7593,11 @@ static struct pernet_operations nf_tables_net_ops = { .exit = nf_tables_exit_net, }; +static struct flow_indr_block_ing_entry block_ing_entry = { + .cb = nft_indr_block_get_and_ing_cmd, + .list = LIST_HEAD_INIT(block_ing_entry.list), +}; + static int __init nf_tables_module_init(void) { int err; @@ -7624,6 +7629,7 @@ static int __init nf_tables_module_init(void) goto err5; nft_chain_route_init(); + flow_indr_add_block_ing_cb(&block_ing_entry); return err; err5: rhltable_destroy(&nft_objname_ht); @@ -7640,6 +7646,7 @@ err1: static void __exit nf_tables_module_exit(void) { + flow_indr_del_block_ing_cb(&block_ing_entry); nfnetlink_subsys_unregister(&nf_tables_subsys); unregister_netdevice_notifier(&nf_tables_flowtable_notifier); nft_chain_filter_fini(); diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 64f5fd5f240e..d3c4c9c88bc8 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -171,24 +171,110 @@ static int nft_flow_offload_unbind(struct flow_block_offload *bo, return 0; } +static int nft_block_setup(struct nft_base_chain *basechain, + struct flow_block_offload *bo, + enum flow_block_command cmd) +{ + int err; + + switch (cmd) { + case FLOW_BLOCK_BIND: + err = nft_flow_offload_bind(bo, basechain); + break; + case FLOW_BLOCK_UNBIND: + err = nft_flow_offload_unbind(bo, basechain); + break; + default: + WARN_ON_ONCE(1); + err = -EOPNOTSUPP; + } + + return err; +} + +static int nft_block_offload_cmd(struct nft_base_chain *chain, + struct net_device *dev, + enum flow_block_command cmd) +{ + struct netlink_ext_ack extack = {}; + struct flow_block_offload bo = {}; + int err; + + bo.net = dev_net(dev); + bo.block = &chain->flow_block; + bo.command = cmd; + bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + bo.extack = &extack; + INIT_LIST_HEAD(&bo.cb_list); + + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); + if (err < 0) + return err; + + return nft_block_setup(chain, &bo, cmd); +} + +static void nft_indr_block_ing_cmd(struct net_device *dev, + struct nft_base_chain *chain, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command cmd) +{ + struct netlink_ext_ack extack = {}; + struct flow_block_offload bo = {}; + + if (!chain) + return; + + bo.net = dev_net(dev); + bo.block = &chain->flow_block; + bo.command = cmd; + bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + bo.extack = &extack; + INIT_LIST_HEAD(&bo.cb_list); + + cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); + + nft_block_setup(chain, &bo, cmd); +} + +static int nft_indr_block_offload_cmd(struct nft_base_chain *chain, + struct net_device *dev, + enum flow_block_command cmd) +{ + struct flow_block_offload bo = {}; + struct netlink_ext_ack extack = {}; + + bo.net = dev_net(dev); + bo.block = &chain->flow_block; + bo.command = cmd; + bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + bo.extack = &extack; + INIT_LIST_HEAD(&bo.cb_list); + + flow_indr_block_call(dev, &bo, cmd); + + if (list_empty(&bo.cb_list)) + return -EOPNOTSUPP; + + return nft_block_setup(chain, &bo, cmd); +} + #define FLOW_SETUP_BLOCK TC_SETUP_BLOCK static int nft_flow_offload_chain(struct nft_trans *trans, enum flow_block_command cmd) { struct nft_chain *chain = trans->ctx.chain; - struct netlink_ext_ack extack = {}; - struct flow_block_offload bo = {}; struct nft_base_chain *basechain; struct net_device *dev; - int err; if (!nft_is_base_chain(chain)) return -EOPNOTSUPP; basechain = nft_base_chain(chain); dev = basechain->ops.dev; - if (!dev || !dev->netdev_ops->ndo_setup_tc) + if (!dev) return -EOPNOTSUPP; /* Only default policy to accept is supported for now. */ @@ -197,26 +283,10 @@ static int nft_flow_offload_chain(struct nft_trans *trans, nft_trans_chain_policy(trans) != NF_ACCEPT) return -EOPNOTSUPP; - bo.command = cmd; - bo.block = &basechain->flow_block; - bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; - bo.extack = &extack; - INIT_LIST_HEAD(&bo.cb_list); - - err = dev->netdev_ops->ndo_setup_tc(dev, FLOW_SETUP_BLOCK, &bo); - if (err < 0) - return err; - - switch (cmd) { - case FLOW_BLOCK_BIND: - err = nft_flow_offload_bind(&bo, basechain); - break; - case FLOW_BLOCK_UNBIND: - err = nft_flow_offload_unbind(&bo, basechain); - break; - } - - return err; + if (dev->netdev_ops->ndo_setup_tc) + return nft_block_offload_cmd(basechain, dev, cmd); + else + return nft_indr_block_offload_cmd(basechain, dev, cmd); } int nft_flow_rule_offload_commit(struct net *net) @@ -266,3 +336,33 @@ int nft_flow_rule_offload_commit(struct net *net) return err; } + +void nft_indr_block_get_and_ing_cmd(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command command) +{ + struct net *net = dev_net(dev); + const struct nft_table *table; + const struct nft_chain *chain; + + list_for_each_entry_rcu(table, &net->nft.tables, list) { + if (table->family != NFPROTO_NETDEV) + continue; + + list_for_each_entry_rcu(chain, &table->chains, list) { + if (nft_is_base_chain(chain)) { + struct nft_base_chain *basechain; + + basechain = nft_base_chain(chain); + if (!strncmp(basechain->dev_name, dev->name, + IFNAMSIZ)) { + nft_indr_block_ing_cmd(dev, basechain, + cb, cb_priv, + command); + return; + } + } + } + } +}