From 7f76fa36754b08d9709ae50cd0a9477a6f998b21 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Fri, 9 Nov 2018 21:21:26 -0800 Subject: [PATCH 1/6] net: sched: register callbacks for indirect tc block binds Currently drivers can register to receive TC block bind/unbind callbacks by implementing the setup_tc ndo in any of their given netdevs. However, drivers may also be interested in binds to higher level devices (e.g. tunnel drivers) to potentially offload filters applied to them. Introduce indirect block devs which allows drivers to register callbacks for block binds on other devices. The callback is triggered when the device is bound to a block, allowing the driver to register for rules applied to that block using already available functions. Freeing an indirect block callback will trigger an unbind event (if necessary) to direct the driver to remove any offloaded rules and unreg any block rule callbacks. It is the responsibility of the implementing driver to clean any registered indirect block callbacks before exiting, if the block it still active at such a time. Allow registering an indirect block dev callback for a device that is already bound to a block. In this case (if it is an ingress block), register and also trigger the callback meaning that any already installed rules can be replayed to the calling driver. Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 34 +++++ include/net/sch_generic.h | 3 + net/sched/cls_api.c | 256 +++++++++++++++++++++++++++++++++++++- 3 files changed, 292 insertions(+), 1 deletion(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 00f71644fbcd..f6c0cd29dea4 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -81,6 +81,14 @@ void __tcf_block_cb_unregister(struct tcf_block *block, struct tcf_block_cb *block_cb); void tcf_block_cb_unregister(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident); +int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident); +int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident); +void __tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident); +void tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident); int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode); @@ -183,6 +191,32 @@ void tcf_block_cb_unregister(struct tcf_block *block, { } +static inline +int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + return 0; +} + +static inline +int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + return 0; +} + +static inline +void __tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ +} + +static inline +void tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ +} + static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode) { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a8dd1fc141b6..9481f2c142e2 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -24,6 +24,9 @@ struct bpf_flow_keys; typedef int tc_setup_cb_t(enum tc_setup_type type, void *type_data, void *cb_priv); +typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, + enum tc_setup_type type, void *type_data); + struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index f427a1e00e7e..d92f44ac4c39 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -365,6 +366,245 @@ static void tcf_chain_flush(struct tcf_chain *chain) } } +static struct tcf_block *tc_dev_ingress_block(struct net_device *dev) +{ + const struct Qdisc_class_ops *cops; + struct Qdisc *qdisc; + + if (!dev_ingress_queue(dev)) + return NULL; + + qdisc = dev_ingress_queue(dev)->qdisc_sleeping; + if (!qdisc) + return NULL; + + cops = qdisc->ops->cl_ops; + if (!cops) + return NULL; + + if (!cops->tcf_block) + return NULL; + + return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL); +} + +static struct rhashtable indr_setup_block_ht; + +struct tc_indr_block_dev { + struct rhash_head ht_node; + struct net_device *dev; + unsigned int refcnt; + struct list_head cb_list; + struct tcf_block *block; +}; + +struct tc_indr_block_cb { + struct list_head list; + void *cb_priv; + tc_indr_block_bind_cb_t *cb; + void *cb_ident; +}; + +static const struct rhashtable_params tc_indr_setup_block_ht_params = { + .key_offset = offsetof(struct tc_indr_block_dev, dev), + .head_offset = offsetof(struct tc_indr_block_dev, ht_node), + .key_len = sizeof(struct net_device *), +}; + +static struct tc_indr_block_dev * +tc_indr_block_dev_lookup(struct net_device *dev) +{ + return rhashtable_lookup_fast(&indr_setup_block_ht, &dev, + tc_indr_setup_block_ht_params); +} + +static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev) +{ + struct tc_indr_block_dev *indr_dev; + + indr_dev = tc_indr_block_dev_lookup(dev); + if (indr_dev) + goto inc_ref; + + indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL); + if (!indr_dev) + return NULL; + + INIT_LIST_HEAD(&indr_dev->cb_list); + indr_dev->dev = dev; + indr_dev->block = tc_dev_ingress_block(dev); + if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node, + tc_indr_setup_block_ht_params)) { + kfree(indr_dev); + return NULL; + } + +inc_ref: + indr_dev->refcnt++; + return indr_dev; +} + +static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev) +{ + if (--indr_dev->refcnt) + return; + + rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node, + tc_indr_setup_block_ht_params); + kfree(indr_dev); +} + +static struct tc_indr_block_cb * +tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + struct tc_indr_block_cb *indr_block_cb; + + list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) + if (indr_block_cb->cb == cb && + indr_block_cb->cb_ident == cb_ident) + return indr_block_cb; + return NULL; +} + +static struct tc_indr_block_cb * +tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + struct tc_indr_block_cb *indr_block_cb; + + indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident); + if (indr_block_cb) + return ERR_PTR(-EEXIST); + + indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL); + if (!indr_block_cb) + return ERR_PTR(-ENOMEM); + + indr_block_cb->cb_priv = cb_priv; + indr_block_cb->cb = cb; + indr_block_cb->cb_ident = cb_ident; + list_add(&indr_block_cb->list, &indr_dev->cb_list); + + return indr_block_cb; +} + +static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb) +{ + list_del(&indr_block_cb->list); + kfree(indr_block_cb); +} + +static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev, + struct tc_indr_block_cb *indr_block_cb, + enum tc_block_command command) +{ + struct tc_block_offload bo = { + .command = command, + .binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS, + .block = indr_dev->block, + }; + + if (!indr_dev->block) + return; + + indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, + &bo); +} + +int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + struct tc_indr_block_cb *indr_block_cb; + struct tc_indr_block_dev *indr_dev; + int err; + + indr_dev = tc_indr_block_dev_get(dev); + if (!indr_dev) + return -ENOMEM; + + indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident); + err = PTR_ERR_OR_ZERO(indr_block_cb); + if (err) + goto err_dev_put; + + tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND); + return 0; + +err_dev_put: + tc_indr_block_dev_put(indr_dev); + return err; +} +EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register); + +int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + int err; + + rtnl_lock(); + err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident); + rtnl_unlock(); + + return err; +} +EXPORT_SYMBOL_GPL(tc_indr_block_cb_register); + +void __tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + struct tc_indr_block_cb *indr_block_cb; + struct tc_indr_block_dev *indr_dev; + + indr_dev = tc_indr_block_dev_lookup(dev); + if (!indr_dev) + return; + + indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident); + if (!indr_block_cb) + return; + + /* Send unbind message if required to free any block cbs. */ + tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND); + tc_indr_block_cb_del(indr_block_cb); + tc_indr_block_dev_put(indr_dev); +} +EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister); + +void tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + rtnl_lock(); + __tc_indr_block_cb_unregister(dev, cb, cb_ident); + rtnl_unlock(); +} +EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister); + +static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev, + struct tcf_block_ext_info *ei, + enum tc_block_command command, + struct netlink_ext_ack *extack) +{ + struct tc_indr_block_cb *indr_block_cb; + struct tc_indr_block_dev *indr_dev; + struct tc_block_offload bo = { + .command = command, + .binder_type = ei->binder_type, + .block = block, + .extack = extack, + }; + + indr_dev = tc_indr_block_dev_lookup(dev); + if (!indr_dev) + return; + + indr_dev->block = command == TC_BLOCK_BIND ? block : NULL; + + list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) + indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, + &bo); +} + static bool tcf_block_offload_in_use(struct tcf_block *block) { return block->offloadcnt; @@ -406,12 +646,17 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack); if (err == -EOPNOTSUPP) goto no_offload_dev_inc; - return err; + if (err) + return err; + + tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack); + return 0; no_offload_dev_inc: if (tcf_block_offload_in_use(block)) return -EOPNOTSUPP; block->nooffloaddevcnt++; + tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack); return 0; } @@ -421,6 +666,8 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, struct net_device *dev = q->dev_queue->dev; int err; + tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL); + if (!dev->netdev_ops->ndo_setup_tc) goto no_offload_dev_dec; err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL); @@ -2355,6 +2602,11 @@ static int __init tc_filter_init(void) if (err) goto err_register_pernet_subsys; + err = rhashtable_init(&indr_setup_block_ht, + &tc_indr_setup_block_ht_params); + if (err) + goto err_rhash_setup_block_ht; + rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0); rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0); rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, @@ -2366,6 +2618,8 @@ static int __init tc_filter_init(void) return 0; +err_rhash_setup_block_ht: + unregister_pernet_subsys(&tcf_net_ops); err_register_pernet_subsys: destroy_workqueue(tc_filter_wq); return err; From 7885b4fc8d8e3d0e489f08144f3d4eb2952cb852 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Fri, 9 Nov 2018 21:21:27 -0800 Subject: [PATCH 2/6] nfp: flower: allow non repr netdev offload Previously the offload functions in NFP assumed that the ingress (or egress) netdev passed to them was an nfp repr. Modify the driver to permit the passing of non repr netdevs as the ingress device for an offload rule candidate. This may include devices such as tunnels. The driver should then base its offload decision on a combination of ingress device and egress port for a rule. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/flower/action.c | 14 ++++---- .../net/ethernet/netronome/nfp/flower/main.h | 3 +- .../net/ethernet/netronome/nfp/flower/match.c | 36 ++++++++++--------- .../ethernet/netronome/nfp/flower/offload.c | 33 ++++++++++------- 4 files changed, 48 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index fbc052d5bb47..2e64fe878da6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -149,11 +149,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, /* Set action output parameters. */ output->flags = cpu_to_be16(tmp_flags); - /* Only offload if egress ports are on the same device as the - * ingress port. - */ - if (!switchdev_port_same_parent_id(in_dev, out_dev)) - return -EOPNOTSUPP; + if (nfp_netdev_is_nfp_repr(in_dev)) { + /* Confirm ingress and egress are on same device. */ + if (!switchdev_port_same_parent_id(in_dev, out_dev)) + return -EOPNOTSUPP; + } + if (!nfp_netdev_is_nfp_repr(out_dev)) return -EOPNOTSUPP; @@ -840,9 +841,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, *a_len += sizeof(struct nfp_fl_push_vlan); } else if (is_tcf_tunnel_set(a)) { struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); - struct nfp_repr *repr = netdev_priv(netdev); - *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); + *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a); if (*tun_type == NFP_FL_TUNNEL_NONE) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 0f6f1675f6f1..4a2b1a915131 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -222,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data); -int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_flow_match(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index e54fb6034326..cdf75595f627 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, return 0; } - if (tun_type) + if (tun_type) { frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); - else + } else { + if (!cmsg_port) + return -EOPNOTSUPP; frame->in_port = cpu_to_be32(cmsg_port); + } return 0; } @@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame, } } -int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_flow_match(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type) { - struct nfp_repr *netdev_repr; + u32 cmsg_port = 0; int err; u8 *ext; u8 *msk; + if (nfp_netdev_is_nfp_repr(netdev)) + cmsg_port = nfp_repr_get_port_id(netdev); + memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size); @@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, /* Populate Exact Port data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, - nfp_repr_get_port_id(netdev), - false, tun_type); + cmsg_port, false, tun_type); if (err) return err; /* Populate Mask Port Data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, - nfp_repr_get_port_id(netdev), - true, tun_type); + cmsg_port, true, tun_type); if (err) return err; @@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, msk += sizeof(struct nfp_flower_ipv4_udp_tun); /* Configure tunnel end point MAC. */ - if (nfp_netdev_is_nfp_repr(netdev)) { - netdev_repr = netdev_priv(netdev); - nfp_tunnel_write_macs(netdev_repr->app); + nfp_tunnel_write_macs(app); - /* Store the tunnel destination in the rule data. - * This must be present and be an exact match. - */ - nfp_flow->nfp_tun_ipv4_addr = tun_dst; - nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst); - } + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + nfp_flow->nfp_tun_ipv4_addr = tun_dst; + nfp_tunnel_add_ipv4_off(app, tun_dst); if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { err = nfp_flower_compile_geneve_opt(ext, flow, false); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 29c95423ab64..2c32edfc1a9d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -56,11 +56,10 @@ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) static int -nfp_flower_xmit_flow(struct net_device *netdev, - struct nfp_fl_payload *nfp_flow, u8 mtype) +nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, + u8 mtype) { u32 meta_len, key_len, mask_len, act_len, tot_len; - struct nfp_repr *priv = netdev_priv(netdev); struct sk_buff *skb; unsigned char *msg; @@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev, nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; - skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); + skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev, nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; - nfp_ctrl_tx(priv->app->ctrl, skb); + nfp_ctrl_tx(app->ctrl, skb); return 0; } @@ -427,13 +426,16 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; - struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; + struct nfp_port *port = NULL; struct net_device *ingr_dev; int err; + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); + ingr_dev = egress ? NULL : netdev; flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, NFP_FL_STATS_CTX_DONT_CARE); @@ -462,8 +464,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, flow_pay->ingress_dev = egress ? NULL : netdev; - err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, - tun_type); + err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev, + flow_pay, tun_type); if (err) goto err_destroy_flow; @@ -476,7 +478,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - err = nfp_flower_xmit_flow(netdev, flow_pay, + err = nfp_flower_xmit_flow(app, flow_pay, NFP_FLOWER_CMSG_TYPE_FLOW_ADD); if (err) goto err_destroy_flow; @@ -487,7 +489,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - port->tc_offload_cnt++; + if (port) + port->tc_offload_cnt++; /* Deallocate flow payload when flower rule has been destroyed. */ kfree(key_layer); @@ -520,12 +523,15 @@ static int nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { - struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; + struct nfp_port *port = NULL; struct net_device *ingr_dev; int err; + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); + ingr_dev = egress ? NULL : netdev; nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, NFP_FL_STATS_CTX_DONT_CARE); @@ -539,13 +545,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, if (nfp_flow->nfp_tun_ipv4_addr) nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); - err = nfp_flower_xmit_flow(netdev, nfp_flow, + err = nfp_flower_xmit_flow(app, nfp_flow, NFP_FLOWER_CMSG_TYPE_FLOW_DEL); if (err) goto err_free_flow; err_free_flow: - port->tc_offload_cnt--; + if (port) + port->tc_offload_cnt--; kfree(nfp_flow->action_data); kfree(nfp_flow->mask_data); kfree(nfp_flow->unmasked_data); From 65b7970edf870c8b52fb2f3e847a9a4f0720a019 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Fri, 9 Nov 2018 21:21:28 -0800 Subject: [PATCH 3/6] nfp: flower: increase scope of netdev checking functions Both the actions and tunnel_conf files contain local functions that check the type of an input netdev. In preparation for re-use with tunnel offload via indirect blocks, move these to static inline functions in a header file. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/flower/action.c | 14 ---------- .../net/ethernet/netronome/nfp/flower/cmsg.h | 27 +++++++++++++++++++ .../netronome/nfp/flower/tunnel_conf.c | 19 ++----------- 3 files changed, 29 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 2e64fe878da6..8d54b36afee8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -2,7 +2,6 @@ /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include -#include #include #include #include @@ -11,7 +10,6 @@ #include #include #include -#include #include "cmsg.h" #include "main.h" @@ -92,18 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, return act_size; } -static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, - enum nfp_flower_tun_type tun_type) -{ - if (netif_is_vxlan(out_dev)) - return tun_type == NFP_FL_TUNNEL_VXLAN; - - if (netif_is_geneve(out_dev)) - return tun_type == NFP_FL_TUNNEL_GENEVE; - - return false; -} - static int nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, const struct tc_action *action, struct nfp_fl_payload *nfp_flow, diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 3e391555e191..15f41cfef9f1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -8,6 +8,7 @@ #include #include #include +#include #include "../nfp_app.h" #include "../nfpcore/nfp_cpp.h" @@ -499,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb) return skb->len - NFP_FLOWER_CMSG_HLEN; } +static inline bool +nfp_fl_netdev_is_tunnel_type(struct net_device *netdev, + enum nfp_flower_tun_type tun_type) +{ + if (netif_is_vxlan(netdev)) + return tun_type == NFP_FL_TUNNEL_VXLAN; + if (netif_is_geneve(netdev)) + return tun_type == NFP_FL_TUNNEL_GENEVE; + + return false; +} + +static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev) +{ + if (!netdev->rtnl_link_ops) + return false; + if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) + return true; + if (netif_is_vxlan(netdev)) + return true; + if (netif_is_geneve(netdev)) + return true; + + return false; +} + struct sk_buff * nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports); void diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 5d641d7dabff..2d9f26a725c2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -182,20 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) } } -static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) -{ - if (!netdev->rtnl_link_ops) - return false; - if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) - return true; - if (netif_is_vxlan(netdev)) - return true; - if (netif_is_geneve(netdev)) - return true; - - return false; -} - static int nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, gfp_t flag) @@ -617,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev, if (nfp_netdev_is_nfp_repr(netdev)) port = nfp_repr_get_port_id(netdev); - else if (!nfp_tun_is_netdev_to_offload(netdev)) + else if (!nfp_fl_is_netdev_to_offload(netdev)) return; entry = kmalloc(sizeof(*entry), GFP_KERNEL); @@ -660,7 +645,7 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app, { if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) { /* If non-nfp netdev then free its offload index. */ - if (nfp_tun_is_netdev_to_offload(netdev)) + if (nfp_fl_is_netdev_to_offload(netdev)) nfp_tun_del_mac_idx(app, netdev->ifindex); } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER) { From 3166dd07a9cba5447a9d7507a988e2eb37a04f95 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Fri, 9 Nov 2018 21:21:29 -0800 Subject: [PATCH 4/6] nfp: flower: offload tunnel decap rules via indirect TC blocks Previously, TC block tunnel decap rules were only offloaded when a callback was triggered through registration of the rules egress device. This meant that the driver had no access to the ingress netdev and so could not verify it was the same tunnel type that the rule implied. Register tunnel devices for indirect TC block offloads in NFP, giving access to new rules based on the ingress device rather than egress. Use this to verify the netdev type of VXLAN and Geneve based rules and offload the rules to HW if applicable. Tunnel registration is done via a netdev notifier. On notifier registration, this is triggered for already existing netdevs. This means that NFP can register for offloads from devices that exist before it is loaded (filter rules will be replayed from the TC core). Similarly, on notifier unregister, a call is triggered for each currently active netdev. This allows the driver to unregister any indirect block callbacks that may still be active. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/flower/main.c | 6 + .../net/ethernet/netronome/nfp/flower/main.h | 5 + .../ethernet/netronome/nfp/flower/offload.c | 137 +++++++++++++++++- 3 files changed, 144 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 2ad00773750f..d1c3c2081461 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -568,6 +568,8 @@ static int nfp_flower_init(struct nfp_app *app) goto err_cleanup_metadata; } + INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); + return 0; err_cleanup_metadata: @@ -684,6 +686,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, return ret; } + ret = nfp_flower_reg_indir_block_handler(app, netdev, event); + if (ret & NOTIFY_STOP_MASK) + return ret; + return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 4a2b1a915131..8c84829ebd21 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -130,6 +130,7 @@ struct nfp_fl_lag { * @reify_wait_queue: wait queue for repr reify response counting * @mtu_conf: Configuration of repr MTU value * @nfp_lag: Link aggregation data block + * @indr_block_cb_priv: List of priv data passed to indirect block cbs */ struct nfp_flower_priv { struct nfp_app *app; @@ -162,6 +163,7 @@ struct nfp_flower_priv { wait_queue_head_t reify_wait_queue; struct nfp_mtu_conf mtu_conf; struct nfp_fl_lag nfp_lag; + struct list_head indr_block_cb_priv; }; /** @@ -271,5 +273,8 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app, struct nfp_fl_pre_lag *pre_act); int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master); +int nfp_flower_reg_indir_block_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 2c32edfc1a9d..222e1a98cf16 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -128,6 +128,7 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, static int nfp_flower_calculate_key_layers(struct nfp_app *app, + struct net_device *netdev, struct nfp_fl_key_ls *ret_key_ls, struct tc_cls_flower_offload *flow, bool egress, @@ -186,8 +187,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, skb_flow_dissector_target(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL, flow->key); - if (!egress) - return -EOPNOTSUPP; if (mask_enc_ctl->addr_type != 0xffff || enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) @@ -250,6 +249,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, default: return -EOPNOTSUPP; } + + /* Ensure the ingress netdev matches the expected tun type. */ + if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) + return -EOPNOTSUPP; } else if (egress) { /* Reject non tunnel matches offloaded to egress repr. */ return -EOPNOTSUPP; @@ -451,8 +454,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (!key_layer) return -ENOMEM; - err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress, - &tun_type); + err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow, + egress, &tun_type); if (err) goto err_free_key_ls; @@ -693,3 +696,129 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } } + +struct nfp_flower_indr_block_cb_priv { + struct net_device *netdev; + struct nfp_app *app; + struct list_head list; +}; + +static struct nfp_flower_indr_block_cb_priv * +nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, + struct net_device *netdev) +{ + struct nfp_flower_indr_block_cb_priv *cb_priv; + struct nfp_flower_priv *priv = app->priv; + + /* All callback list access should be protected by RTNL. */ + ASSERT_RTNL(); + + list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) + if (cb_priv->netdev == netdev) + return cb_priv; + + return NULL; +} + +static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct nfp_flower_indr_block_cb_priv *priv = cb_priv; + struct tc_cls_flower_offload *flower = type_data; + + if (flower->common.chain_index) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return nfp_flower_repr_offload(priv->app, priv->netdev, + type_data, false); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, + struct tc_block_offload *f) +{ + struct nfp_flower_indr_block_cb_priv *cb_priv; + struct nfp_flower_priv *priv = app->priv; + int err; + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); + if (!cb_priv) + return -ENOMEM; + + cb_priv->netdev = netdev; + cb_priv->app = app; + list_add(&cb_priv->list, &priv->indr_block_cb_priv); + + err = tcf_block_cb_register(f->block, + nfp_flower_setup_indr_block_cb, + netdev, cb_priv, f->extack); + if (err) { + list_del(&cb_priv->list); + kfree(cb_priv); + } + + return err; + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, + nfp_flower_setup_indr_block_cb, netdev); + cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); + if (cb_priv) { + list_del(&cb_priv->list); + kfree(cb_priv); + } + + return 0; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int +nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return nfp_flower_setup_indr_tc_block(netdev, cb_priv, + type_data); + default: + return -EOPNOTSUPP; + } +} + +int nfp_flower_reg_indir_block_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event) +{ + int err; + + if (!nfp_fl_is_netdev_to_offload(netdev)) + return NOTIFY_OK; + + if (event == NETDEV_REGISTER) { + err = __tc_indr_block_cb_register(netdev, app, + nfp_flower_indr_setup_tc_cb, + netdev); + if (err) + nfp_flower_cmsg_warn(app, + "Indirect block reg failed - %s\n", + netdev->name); + } else if (event == NETDEV_UNREGISTER) { + __tc_indr_block_cb_unregister(netdev, + nfp_flower_indr_setup_tc_cb, + netdev); + } + + return NOTIFY_OK; +} From 4f63fde3fc08045a232923cc83fb08defbd7ca4b Mon Sep 17 00:00:00 2001 From: John Hurley Date: Fri, 9 Nov 2018 21:21:30 -0800 Subject: [PATCH 5/6] nfp: flower: remove TC egdev offloads Previously, only tunnel decap rules required egdev registration for offload in NFP. These are now supported via indirect TC block callbacks. Remove the egdev code from NFP. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/flower/main.c | 12 --- .../net/ethernet/netronome/nfp/flower/main.h | 3 - .../ethernet/netronome/nfp/flower/metadata.c | 1 + .../ethernet/netronome/nfp/flower/offload.c | 79 ++++--------------- 4 files changed, 17 insertions(+), 78 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index d1c3c2081461..5059110a1768 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); } -static int -nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev) -{ - return tc_setup_cb_egdev_register(netdev, - nfp_flower_setup_tc_egress_cb, - netdev_priv(netdev)); -} - static void nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); kfree(repr->app_priv); - - tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb, - netdev_priv(netdev)); } static void @@ -711,7 +700,6 @@ const struct nfp_app_type app_flower = { .vnic_init = nfp_flower_vnic_init, .vnic_clean = nfp_flower_vnic_clean, - .repr_init = nfp_flower_repr_netdev_init, .repr_preclean = nfp_flower_repr_netdev_preclean, .repr_clean = nfp_flower_repr_netdev_clean, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 8c84829ebd21..9d134aa871fc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -207,7 +207,6 @@ struct nfp_fl_payload { char *unmasked_data; char *mask_data; char *action_data; - bool ingress_offload; }; extern const struct rhashtable_params nfp_flower_table_params; @@ -259,8 +258,6 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); -int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, - void *cb_priv); void nfp_flower_lag_init(struct nfp_fl_lag *lag); void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); int nfp_flower_lag_reset(struct nfp_fl_lag *lag); diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 48729bf171e0..9b4711ce98f0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -287,6 +287,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt); nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie); + nfp_flow->ingress_dev = netdev; new_mask_id = 0; if (!nfp_check_mask_add(app, nfp_flow->mask_data, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 222e1a98cf16..0e2dfbb3ef86 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -131,7 +131,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, struct net_device *netdev, struct nfp_fl_key_ls *ret_key_ls, struct tc_cls_flower_offload *flow, - bool egress, enum nfp_flower_tun_type *tun_type) { struct flow_dissector_key_basic *mask_basic = NULL; @@ -253,9 +252,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, /* Ensure the ingress netdev matches the expected tun type. */ if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) return -EOPNOTSUPP; - } else if (egress) { - /* Reject non tunnel matches offloaded to egress repr. */ - return -EOPNOTSUPP; } if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { @@ -376,7 +372,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } static struct nfp_fl_payload * -nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) +nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) { struct nfp_fl_payload *flow_pay; @@ -400,7 +396,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->meta.flags = 0; - flow_pay->ingress_offload = !egress; return flow_pay; @@ -418,7 +413,6 @@ err_free_flow: * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure. - * @egress: NFP netdev is the egress. * * Adds a new flow to the repeated hash structure and action payload. * @@ -426,47 +420,33 @@ err_free_flow: */ static int nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow, bool egress) + struct tc_cls_flower_offload *flow) { enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; struct nfp_port *port = NULL; - struct net_device *ingr_dev; int err; if (nfp_netdev_is_nfp_repr(netdev)) port = nfp_port_from_netdev(netdev); - ingr_dev = egress ? NULL : netdev; - flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, - NFP_FL_STATS_CTX_DONT_CARE); - if (flow_pay) { - /* Ignore as duplicate if it has been added by different cb. */ - if (flow_pay->ingress_offload && egress) - return 0; - else - return -EOPNOTSUPP; - } - key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); if (!key_layer) return -ENOMEM; err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow, - egress, &tun_type); + &tun_type); if (err) goto err_free_key_ls; - flow_pay = nfp_flower_allocate_new(key_layer, egress); + flow_pay = nfp_flower_allocate_new(key_layer); if (!flow_pay) { err = -ENOMEM; goto err_free_key_ls; } - flow_pay->ingress_dev = egress ? NULL : netdev; - err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev, flow_pay, tun_type); if (err) @@ -476,8 +456,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - err = nfp_compile_flow_metadata(app, flow, flow_pay, - flow_pay->ingress_dev); + err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev); if (err) goto err_destroy_flow; @@ -515,7 +494,6 @@ err_free_key_ls: * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure - * @egress: Netdev is the egress dev. * * Removes a flow from the repeated hash structure and clears the * action payload. @@ -524,22 +502,20 @@ err_free_key_ls: */ static int nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow, bool egress) + struct tc_cls_flower_offload *flow) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; struct nfp_port *port = NULL; - struct net_device *ingr_dev; int err; if (nfp_netdev_is_nfp_repr(netdev)) port = nfp_port_from_netdev(netdev); - ingr_dev = egress ? NULL : netdev; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev, NFP_FL_STATS_CTX_DONT_CARE); if (!nfp_flow) - return egress ? 0 : -ENOENT; + return -ENOENT; err = nfp_modify_flow_metadata(app, nfp_flow); if (err) @@ -571,7 +547,6 @@ err_free_flow: * @app: Pointer to the APP handle * @netdev: Netdev structure. * @flow: TC flower classifier offload structure - * @egress: Netdev is the egress dev. * * Populates a flow statistics structure which which corresponds to a * specific flow. @@ -580,22 +555,17 @@ err_free_flow: */ static int nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow, bool egress) + struct tc_cls_flower_offload *flow) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; - struct net_device *ingr_dev; u32 ctx_id; - ingr_dev = egress ? NULL : netdev; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev, NFP_FL_STATS_CTX_DONT_CARE); if (!nfp_flow) return -EINVAL; - if (nfp_flow->ingress_offload && egress) - return 0; - ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); spin_lock_bh(&priv->stats_lock); @@ -612,35 +582,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flower, bool egress) + struct tc_cls_flower_offload *flower) { if (!eth_proto_is_802_3(flower->common.protocol)) return -EOPNOTSUPP; switch (flower->command) { case TC_CLSFLOWER_REPLACE: - return nfp_flower_add_offload(app, netdev, flower, egress); + return nfp_flower_add_offload(app, netdev, flower); case TC_CLSFLOWER_DESTROY: - return nfp_flower_del_offload(app, netdev, flower, egress); + return nfp_flower_del_offload(app, netdev, flower); case TC_CLSFLOWER_STATS: - return nfp_flower_get_stats(app, netdev, flower, egress); - default: - return -EOPNOTSUPP; - } -} - -int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - struct nfp_repr *repr = cb_priv; - - if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) - return -EOPNOTSUPP; - - switch (type) { - case TC_SETUP_CLSFLOWER: - return nfp_flower_repr_offload(repr->app, repr->netdev, - type_data, true); + return nfp_flower_get_stats(app, netdev, flower); default: return -EOPNOTSUPP; } @@ -657,7 +610,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, switch (type) { case TC_SETUP_CLSFLOWER: return nfp_flower_repr_offload(repr->app, repr->netdev, - type_data, false); + type_data); default: return -EOPNOTSUPP; } @@ -732,7 +685,7 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, switch (type) { case TC_SETUP_CLSFLOWER: return nfp_flower_repr_offload(priv->app, priv->netdev, - type_data, false); + type_data); default: return -EOPNOTSUPP; } From d4b69bad61e0fc4055c108fa28846ae981948d54 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Fri, 9 Nov 2018 21:21:31 -0800 Subject: [PATCH 6/6] nfp: flower: remove unnecessary code in flow lookup Recent changes to NFP mean that stats updates from fw to driver no longer require a flow lookup and (because egdev offload has been removed) the ingress netdev for a lookup is now always known. Remove obsolete code in a flow lookup that matches on host context and that allows for a netdev to be NULL. Signed-off-by: John Hurley Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 3 +-- drivers/net/ethernet/netronome/nfp/flower/metadata.c | 11 +++-------- drivers/net/ethernet/netronome/nfp/flower/offload.c | 6 ++---- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 9d134aa871fc..b858bac47621 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -20,7 +20,6 @@ struct nfp_fl_pre_lag; struct net_device; struct nfp_app; -#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff) #define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ init_unalloc) #define NFP_FLOWER_MASK_ENTRY_RS 256 @@ -242,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app, struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, - struct net_device *netdev, __be32 host_ctx); + struct net_device *netdev); struct nfp_fl_payload * nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 9b4711ce98f0..573a4400a26c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -21,7 +21,6 @@ struct nfp_mask_id_table { struct nfp_fl_flow_table_cmp_arg { struct net_device *netdev; unsigned long cookie; - __be32 host_ctx; }; static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) @@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) /* Must be called with either RTNL or rcu_read_lock */ struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, - struct net_device *netdev, __be32 host_ctx) + struct net_device *netdev) { struct nfp_fl_flow_table_cmp_arg flower_cmp_arg; struct nfp_flower_priv *priv = app->priv; flower_cmp_arg.netdev = netdev; flower_cmp_arg.cookie = tc_flower_cookie; - flower_cmp_arg.host_ctx = host_ctx; return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg, nfp_flower_table_params); @@ -307,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, priv->stats[stats_cxt].bytes = 0; priv->stats[stats_cxt].used = jiffies; - check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev, - NFP_FL_STATS_CTX_DONT_CARE); + check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (check_entry) { if (nfp_release_stats_entry(app, stats_cxt)) return -EINVAL; @@ -353,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key; const struct nfp_fl_payload *flow_entry = obj; - if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) && - (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE || - flow_entry->meta.host_ctx_id == cmp_arg->host_ctx)) + if (flow_entry->ingress_dev == cmp_arg->netdev) return flow_entry->tc_flower_cookie != cmp_arg->cookie; return 1; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 0e2dfbb3ef86..545d94168874 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -512,8 +512,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, if (nfp_netdev_is_nfp_repr(netdev)) port = nfp_port_from_netdev(netdev); - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev, - NFP_FL_STATS_CTX_DONT_CARE); + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (!nfp_flow) return -ENOENT; @@ -561,8 +560,7 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, struct nfp_fl_payload *nfp_flow; u32 ctx_id; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev, - NFP_FL_STATS_CTX_DONT_CARE); + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (!nfp_flow) return -EINVAL;