mlx5-updates-2021-04-02

This series provides trivial updates and cleanup to mlx5 driver
 
 1) Support for matching on ct_state inv and rel flag in connection tracking
 2) Reject TC rules that redirect from a VF to itself
 3) Parav provided some E-Switch cleanups that could be summarized to:
   3.1) Packing and Reduce structure sizes
   3.2) Dynamic allocation of rate limit tables and structures
 4) Vu Makes the netdev arfs and vlan tables allocation dynamic.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmBnpQQACgkQSD+KveBX
 +j48uAf/S6VrX6NPn0j1nG9LRPt679KYv5eync/PUPxAbHb9DLkNYYTjrbQYp77H
 KSFjRr7r7CXW5Vl6ZeAEA9O2ZzzNtpuTIDrTOaLu3oM6FgGho5Y8PIzsq4zUOudd
 y948kRosZedYIYCtgPFjHqhAOGkEZl7GX5oSSF2WTYtfPn8tgTU91FCYSTtEk+rM
 UBm9VuuZnz5hBtesl5zjqym6l27U3xsJpxvpR465iRB0rrIz0gaoyFLI1NTouMVz
 XYZZwsFtIrl4Ffnm18ayqO1zM7VgybpvPX2zrWddX8Eb7Cbr9r90PCK4qbtiUtnI
 fELrif57Rh6TNIQxafEovM5UGaZ+eA==
 =kCdd
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2021-04-02' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-04-02

This series provides trivial updates and cleanup to mlx5 driver

1) Support for matching on ct_state inv and rel flag in connection tracking
2) Reject TC rules that redirect from a VF to itself
3) Parav provided some E-Switch cleanups that could be summarized to:
  3.1) Packing and Reduce structure sizes
  3.2) Dynamic allocation of rate limit tables and structures
4) Vu Makes the netdev arfs and vlan tables allocation dynamic.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2021-04-04 01:41:08 -07:00
commit cd77ce9303
14 changed files with 323 additions and 198 deletions

View File

@ -49,18 +49,10 @@ struct mlx5e_promisc_table {
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
}; };
struct mlx5e_vlan_table { /* Forward declaration and APIs to get private fields of vlan_table */
struct mlx5e_flow_table ft; struct mlx5e_vlan_table;
DECLARE_BITMAP(active_cvlans, VLAN_N_VID); unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan);
DECLARE_BITMAP(active_svlans, VLAN_N_VID); struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan);
struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
struct mlx5_flow_handle *trap_rule;
bool cvlan_filter_disabled;
};
struct mlx5e_l2_table { struct mlx5e_l2_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
@ -200,31 +192,7 @@ static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
#endif /* CONFIG_MLX5_EN_RXNFC */ #endif /* CONFIG_MLX5_EN_RXNFC */
#ifdef CONFIG_MLX5_EN_ARFS #ifdef CONFIG_MLX5_EN_ARFS
#define ARFS_HASH_SHIFT BITS_PER_BYTE struct mlx5e_arfs_tables;
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};
struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
@ -255,12 +223,12 @@ struct mlx5e_flow_steering {
#endif #endif
struct mlx5e_tc_table tc; struct mlx5e_tc_table tc;
struct mlx5e_promisc_table promisc; struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table vlan; struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2; struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc; struct mlx5e_ttc_table ttc;
struct mlx5e_ttc_table inner_ttc; struct mlx5e_ttc_table inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS #ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables arfs; struct mlx5e_arfs_tables *arfs;
#endif #endif
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *accel_tcp; struct mlx5e_accel_fs_tcp *accel_tcp;

View File

@ -29,6 +29,8 @@
#define MLX5_CT_STATE_TRK_BIT BIT(2) #define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3) #define MLX5_CT_STATE_NAT_BIT BIT(3)
#define MLX5_CT_STATE_REPLY_BIT BIT(4) #define MLX5_CT_STATE_REPLY_BIT BIT(4)
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8) #define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0) #define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
@ -1207,8 +1209,8 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_ct_attr *ct_attr, struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
bool trk, est, untrk, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_rule *rule = flow_cls_offload_flow_rule(f);
bool trk, est, untrk, unest, new, rpl, unrpl;
struct flow_dissector_key_ct *mask, *key; struct flow_dissector_key_ct *mask, *key;
u32 ctstate = 0, ctstate_mask = 0; u32 ctstate = 0, ctstate_mask = 0;
u16 ct_state_on, ct_state_off; u16 ct_state_on, ct_state_off;
@ -1236,7 +1238,9 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
TCA_FLOWER_KEY_CT_FLAGS_NEW | TCA_FLOWER_KEY_CT_FLAGS_NEW |
TCA_FLOWER_KEY_CT_FLAGS_REPLY)) { TCA_FLOWER_KEY_CT_FLAGS_REPLY |
TCA_FLOWER_KEY_CT_FLAGS_RELATED |
TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"only ct_state trk, est, new and rpl are supported for offload"); "only ct_state trk, est, new and rpl are supported for offload");
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -1248,9 +1252,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW; new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY; rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED; unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY; unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0; ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0; ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
@ -1258,6 +1266,20 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0; ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0; ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0; ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0;
ctstate_mask |= uninv ? MLX5_CT_STATE_INVALID_BIT : 0;
if (rel) {
NL_SET_ERR_MSG_MOD(extack,
"matching on ct_state +rel isn't supported");
return -EOPNOTSUPP;
}
if (inv) {
NL_SET_ERR_MSG_MOD(extack,
"matching on ct_state +inv isn't supported");
return -EOPNOTSUPP;
}
if (new) { if (new) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,

View File

@ -36,6 +36,32 @@
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include "en.h" #include "en.h"
#define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};
struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};
struct arfs_tuple { struct arfs_tuple {
__be16 etype; __be16 etype;
u8 ip_proto; u8 ip_proto;
@ -121,7 +147,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) { for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.ft = priv->fs.arfs.arfs_tables[i].ft.t; dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */ /* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest); err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest);
if (err) { if (err) {
@ -141,25 +167,31 @@ static void arfs_destroy_table(struct arfs_table *arfs_t)
mlx5e_destroy_flow_table(&arfs_t->ft); mlx5e_destroy_flow_table(&arfs_t->ft);
} }
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
{ {
int i; int i;
arfs_del_rules(priv);
destroy_workqueue(priv->fs.arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
}
}
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
{
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return; return;
arfs_del_rules(priv); _mlx5e_cleanup_tables(priv);
destroy_workqueue(priv->fs.arfs.wq); kvfree(priv->fs.arfs);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
}
} }
static int arfs_add_default_rule(struct mlx5e_priv *priv, static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type) enum arfs_type type)
{ {
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
struct mlx5e_tir *tir = priv->indir_tir; struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act); MLX5_DECLARE_FLOW_ACT(flow_act);
@ -290,7 +322,7 @@ out:
static int arfs_create_table(struct mlx5e_priv *priv, static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type) enum arfs_type type)
{ {
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft; struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
int err; int err;
@ -330,20 +362,27 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0; return 0;
spin_lock_init(&priv->fs.arfs.arfs_lock); priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
INIT_LIST_HEAD(&priv->fs.arfs.rules); if (!priv->fs.arfs)
priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs.arfs.wq)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&priv->fs.arfs->arfs_lock);
INIT_LIST_HEAD(&priv->fs.arfs->rules);
priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs.arfs->wq)
goto err;
for (i = 0; i < ARFS_NUM_TYPES; i++) { for (i = 0; i < ARFS_NUM_TYPES; i++) {
err = arfs_create_table(priv, i); err = arfs_create_table(priv, i);
if (err) if (err)
goto err; goto err_des;
} }
return 0; return 0;
err_des:
_mlx5e_cleanup_tables(priv);
err: err:
mlx5e_arfs_destroy_tables(priv); kvfree(priv->fs.arfs);
return err; return err;
} }
@ -358,8 +397,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int j; int j;
HLIST_HEAD(del_list); HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock); spin_lock_bh(&priv->fs.arfs->arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) && if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev, rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id, arfs_rule->rxq, arfs_rule->flow_id,
@ -370,7 +409,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break; break;
} }
} }
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule) if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule); mlx5_del_flow_rules(arfs_rule->rule);
@ -387,12 +426,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
int j; int j;
HLIST_HEAD(del_list); HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock); spin_lock_bh(&priv->fs.arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) { mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist); hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list); hlist_add_head(&rule->hlist, &del_list);
} }
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work); cancel_work_sync(&rule->arfs_work);
@ -436,7 +475,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule) struct arfs_rule *arfs_rule)
{ {
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple; struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL; struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
@ -554,9 +593,9 @@ static void arfs_handle_work(struct work_struct *work)
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
spin_lock_bh(&priv->fs.arfs.arfs_lock); spin_lock_bh(&priv->fs.arfs->arfs_lock);
hlist_del(&arfs_rule->hlist); hlist_del(&arfs_rule->hlist);
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs->arfs_lock);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
kfree(arfs_rule); kfree(arfs_rule);
@ -609,7 +648,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst; tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id; rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist, hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port, arfs_hash_bucket(arfs_t, tuple->src_port,
@ -653,7 +692,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id) u16 rxq_index, u32 flow_id)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct arfs_table *arfs_t; struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule; struct arfs_rule *arfs_rule;
struct flow_keys fk; struct flow_keys fk;
@ -687,7 +726,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM; return -ENOMEM;
} }
} }
queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work); queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock); spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id; return arfs_rule->filter_id;
} }

View File

@ -108,6 +108,29 @@ static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
kfree(hn); kfree(hn);
} }
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
DECLARE_BITMAP(active_svlans, VLAN_N_VID);
struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
struct mlx5_flow_handle *trap_rule;
bool cvlan_filter_disabled;
};
unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan)
{
return vlan->active_svlans;
}
struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
{
return vlan->ft.t;
}
static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
{ {
struct net_device *ndev = priv->netdev; struct net_device *ndev = priv->netdev;
@ -119,7 +142,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i; int i;
list_size = 0; list_size = 0;
for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
list_size++; list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@ -136,7 +159,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM; return -ENOMEM;
i = 0; i = 0;
for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) { for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
if (i >= list_size) if (i >= list_size)
break; break;
vlans[i++] = vlan; vlans[i++] = vlan;
@ -163,7 +186,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec) u16 vid, struct mlx5_flow_spec *spec)
{ {
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p; struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act); MLX5_DECLARE_FLOW_ACT(flow_act);
@ -180,24 +203,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
* disabled in match value means both S & C tags * disabled in match value means both S & C tags
* don't exist (untagged of both) * don't exist (untagged of both)
*/ */
rule_p = &priv->fs.vlan.untagged_rule; rule_p = &priv->fs.vlan->untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag); outer_headers.cvlan_tag);
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
rule_p = &priv->fs.vlan.any_cvlan_rule; rule_p = &priv->fs.vlan->any_cvlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag); outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
rule_p = &priv->fs.vlan.any_svlan_rule; rule_p = &priv->fs.vlan->any_svlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag); outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break; break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
rule_p = &priv->fs.vlan.active_svlans_rule[vid]; rule_p = &priv->fs.vlan->active_svlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag); outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
@ -207,7 +230,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
vid); vid);
break; break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
rule_p = &priv->fs.vlan.active_cvlans_rule[vid]; rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag); outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@ -257,33 +280,33 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{ {
switch (rule_type) { switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED: case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
if (priv->fs.vlan.untagged_rule) { if (priv->fs.vlan->untagged_rule) {
mlx5_del_flow_rules(priv->fs.vlan.untagged_rule); mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
priv->fs.vlan.untagged_rule = NULL; priv->fs.vlan->untagged_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
if (priv->fs.vlan.any_cvlan_rule) { if (priv->fs.vlan->any_cvlan_rule) {
mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule); mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
priv->fs.vlan.any_cvlan_rule = NULL; priv->fs.vlan->any_cvlan_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
if (priv->fs.vlan.any_svlan_rule) { if (priv->fs.vlan->any_svlan_rule) {
mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule); mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
priv->fs.vlan.any_svlan_rule = NULL; priv->fs.vlan->any_svlan_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
if (priv->fs.vlan.active_svlans_rule[vid]) { if (priv->fs.vlan->active_svlans_rule[vid]) {
mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]); mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
priv->fs.vlan.active_svlans_rule[vid] = NULL; priv->fs.vlan->active_svlans_rule[vid] = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
if (priv->fs.vlan.active_cvlans_rule[vid]) { if (priv->fs.vlan->active_cvlans_rule[vid]) {
mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]); mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
priv->fs.vlan.active_cvlans_rule[vid] = NULL; priv->fs.vlan->active_cvlans_rule[vid] = NULL;
} }
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
break; break;
@ -330,27 +353,27 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{ {
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
int err; int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
priv->fs.vlan.trap_rule = NULL; priv->fs.vlan->trap_rule = NULL;
netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n", netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
__func__, err); __func__, err);
return err; return err;
} }
priv->fs.vlan.trap_rule = rule; priv->fs.vlan->trap_rule = rule;
return 0; return 0;
} }
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv) void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
{ {
if (priv->fs.vlan.trap_rule) { if (priv->fs.vlan->trap_rule) {
mlx5_del_flow_rules(priv->fs.vlan.trap_rule); mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
priv->fs.vlan.trap_rule = NULL; priv->fs.vlan->trap_rule = NULL;
} }
} }
@ -382,10 +405,10 @@ void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv) void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{ {
if (!priv->fs.vlan.cvlan_filter_disabled) if (!priv->fs.vlan->cvlan_filter_disabled)
return; return;
priv->fs.vlan.cvlan_filter_disabled = false; priv->fs.vlan->cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC) if (priv->netdev->flags & IFF_PROMISC)
return; return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
@ -393,10 +416,10 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv) void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{ {
if (priv->fs.vlan.cvlan_filter_disabled) if (priv->fs.vlan->cvlan_filter_disabled)
return; return;
priv->fs.vlan.cvlan_filter_disabled = true; priv->fs.vlan->cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC) if (priv->netdev->flags & IFF_PROMISC)
return; return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
@ -406,11 +429,11 @@ static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
{ {
int err; int err;
set_bit(vid, priv->fs.vlan.active_cvlans); set_bit(vid, priv->fs.vlan->active_cvlans);
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
if (err) if (err)
clear_bit(vid, priv->fs.vlan.active_cvlans); clear_bit(vid, priv->fs.vlan->active_cvlans);
return err; return err;
} }
@ -420,11 +443,11 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
struct net_device *netdev = priv->netdev; struct net_device *netdev = priv->netdev;
int err; int err;
set_bit(vid, priv->fs.vlan.active_svlans); set_bit(vid, priv->fs.vlan->active_svlans);
err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
if (err) { if (err) {
clear_bit(vid, priv->fs.vlan.active_svlans); clear_bit(vid, priv->fs.vlan->active_svlans);
return err; return err;
} }
@ -456,10 +479,10 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
return 0; /* no vlan table for uplink rep */ return 0; /* no vlan table for uplink rep */
if (be16_to_cpu(proto) == ETH_P_8021Q) { if (be16_to_cpu(proto) == ETH_P_8021Q) {
clear_bit(vid, priv->fs.vlan.active_cvlans); clear_bit(vid, priv->fs.vlan->active_cvlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
} else if (be16_to_cpu(proto) == ETH_P_8021AD) { } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
clear_bit(vid, priv->fs.vlan.active_svlans); clear_bit(vid, priv->fs.vlan->active_svlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
netdev_update_features(dev); netdev_update_features(dev);
} }
@ -473,14 +496,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
} }
for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
if (priv->fs.vlan.cvlan_filter_disabled) if (priv->fs.vlan->cvlan_filter_disabled)
mlx5e_add_any_vid_rules(priv); mlx5e_add_any_vid_rules(priv);
} }
@ -490,11 +513,11 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
} }
for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state))); WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
@ -504,7 +527,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
/* must be called after DESTROY bit is set and /* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed * set_rx_mode is called and flushed
*/ */
if (priv->fs.vlan.cvlan_filter_disabled) if (priv->fs.vlan->cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv); mlx5e_del_any_vid_rules(priv);
} }
@ -1692,10 +1715,15 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
{ {
struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft;
int err; int err;
priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
if (!priv->fs.vlan)
return -ENOMEM;
ft = &priv->fs.vlan->ft;
ft->num_groups = 0; ft->num_groups = 0;
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE; ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
@ -1703,12 +1731,11 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
ft_attr.prio = MLX5E_NIC_PRIO; ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) { if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t); err = PTR_ERR(ft->t);
ft->t = NULL; goto err_free_t;
return err;
} }
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) { if (!ft->g) {
err = -ENOMEM; err = -ENOMEM;
@ -1727,7 +1754,9 @@ err_free_g:
kfree(ft->g); kfree(ft->g);
err_destroy_vlan_table: err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t); mlx5_destroy_flow_table(ft->t);
ft->t = NULL; err_free_t:
kvfree(priv->fs.vlan);
priv->fs.vlan = NULL;
return err; return err;
} }
@ -1735,7 +1764,8 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{ {
mlx5e_del_vlan_rules(priv); mlx5e_del_vlan_rules(priv);
mlx5e_destroy_flow_table(&priv->fs.vlan.ft); mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
kvfree(priv->fs.vlan);
} }
int mlx5e_create_flow_steering(struct mlx5e_priv *priv) int mlx5e_create_flow_steering(struct mlx5e_priv *priv)

View File

@ -3823,7 +3823,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
params = &priv->channels.params; params = &priv->channels.params;
if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) { if (!priv->fs.vlan ||
!bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem /* HW strips the outer C-tag header, this is a problem
* for S-tag traffic. * for S-tag traffic.
*/ */

View File

@ -906,7 +906,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft)) if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft); return ERR_CAST(dest[dest_ix].ft);
} else { } else {
dest[dest_ix].ft = priv->fs.vlan.ft.t; dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
} }
dest_ix++; dest_ix++;
} }
@ -3111,6 +3111,13 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid); return (fsystem_guid == psystem_guid);
} }
static bool same_vf_reps(struct mlx5e_priv *priv,
struct net_device *out_dev)
{
return mlx5e_eswitch_vf_rep(priv->netdev) &&
priv->netdev == out_dev;
}
static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace, static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
const struct flow_action_entry *act, const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow_parse_attr *parse_attr,
@ -3796,6 +3803,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (same_vf_reps(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"can't forward from a VF to itself");
return -EOPNOTSUPP;
}
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv; rpriv = out_priv->ppriv;
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
@ -4740,7 +4753,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL; attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev); attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS; attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
attr.default_ft = priv->fs.vlan.ft.t; attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
tc->chains = mlx5_chains_create(dev, &attr); tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) { if (IS_ERR(tc->chains)) {

View File

@ -1235,7 +1235,7 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
return err; return err;
/* Attach vport to the eswitch rate limiter */ /* Attach vport to the eswitch rate limiter */
esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share); esw_vport_enable_qos(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
if (mlx5_esw_is_manager_vport(esw, vport_num)) if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0; return 0;
@ -2078,8 +2078,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos; ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk; ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted; ivi->trusted = evport->info.trusted;
ivi->min_tx_rate = evport->info.min_rate; ivi->min_tx_rate = evport->qos.min_rate;
ivi->max_tx_rate = evport->info.max_rate; ivi->max_tx_rate = evport->qos.max_rate;
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return 0; return 0;
@ -2319,9 +2319,9 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
int i; int i;
mlx5_esw_for_all_vports(esw, i, evport) { mlx5_esw_for_all_vports(esw, i, evport) {
if (!evport->enabled || evport->info.min_rate < max_guarantee) if (!evport->enabled || evport->qos.min_rate < max_guarantee)
continue; continue;
max_guarantee = evport->info.min_rate; max_guarantee = evport->qos.min_rate;
} }
if (max_guarantee) if (max_guarantee)
@ -2343,8 +2343,8 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
mlx5_esw_for_all_vports(esw, i, evport) { mlx5_esw_for_all_vports(esw, i, evport) {
if (!evport->enabled) if (!evport->enabled)
continue; continue;
vport_min_rate = evport->info.min_rate; vport_min_rate = evport->qos.min_rate;
vport_max_rate = evport->info.max_rate; vport_max_rate = evport->qos.max_rate;
bw_share = 0; bw_share = 0;
if (divider) if (divider)
@ -2391,24 +2391,24 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
if (min_rate == evport->info.min_rate) if (min_rate == evport->qos.min_rate)
goto set_max_rate; goto set_max_rate;
previous_min_rate = evport->info.min_rate; previous_min_rate = evport->qos.min_rate;
evport->info.min_rate = min_rate; evport->qos.min_rate = min_rate;
err = normalize_vports_min_rate(esw); err = normalize_vports_min_rate(esw);
if (err) { if (err) {
evport->info.min_rate = previous_min_rate; evport->qos.min_rate = previous_min_rate;
goto unlock; goto unlock;
} }
set_max_rate: set_max_rate:
if (max_rate == evport->info.max_rate) if (max_rate == evport->qos.max_rate)
goto unlock; goto unlock;
err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share); err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
if (!err) if (!err)
evport->info.max_rate = max_rate; evport->qos.max_rate = max_rate;
unlock: unlock:
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);

View File

@ -118,13 +118,11 @@ struct mlx5_vport_drop_stats {
struct mlx5_vport_info { struct mlx5_vport_info {
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
u16 vlan; u16 vlan;
u8 qos;
u64 node_guid; u64 node_guid;
int link_state; int link_state;
u32 min_rate; u8 qos;
u32 max_rate; u8 spoofchk: 1;
bool spoofchk; u8 trusted: 1;
bool trusted;
}; };
/* Vport context events */ /* Vport context events */
@ -154,6 +152,8 @@ struct mlx5_vport {
bool enabled; bool enabled;
u32 esw_tsar_ix; u32 esw_tsar_ix;
u32 bw_share; u32 bw_share;
u32 min_rate;
u32 max_rate;
} qos; } qos;
bool enabled; bool enabled;

View File

@ -707,7 +707,7 @@ static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
} }
if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) { if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
err = ida_simple_get(&fipsec->halloc, 1, 0, GFP_KERNEL); err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL);
if (err < 0) { if (err < 0) {
context = ERR_PTR(err); context = ERR_PTR(err);
goto exists; goto exists;
@ -758,7 +758,7 @@ delete_hash:
unlock_hash: unlock_hash:
mutex_unlock(&fipsec->sa_hash_lock); mutex_unlock(&fipsec->sa_hash_lock);
if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle); ida_free(&fipsec->halloc, sa_ctx->sa_handle);
exists: exists:
mutex_unlock(&fpga_xfrm->lock); mutex_unlock(&fpga_xfrm->lock);
kfree(sa_ctx); kfree(sa_ctx);
@ -852,7 +852,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action & if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
MLX5_ACCEL_ESP_ACTION_DECRYPT) MLX5_ACCEL_ESP_ACTION_DECRYPT)
ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle); ida_free(&fipsec->halloc, sa_ctx->sa_handle);
mutex_lock(&fipsec->sa_hash_lock); mutex_lock(&fipsec->sa_hash_lock);
WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,

View File

@ -590,7 +590,7 @@ static void del_sw_fte(struct fs_node *node)
&fte->hash, &fte->hash,
rhash_fte); rhash_fte);
WARN_ON(err); WARN_ON(err);
ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); ida_free(&fg->fte_allocator, fte->index - fg->start_index);
kmem_cache_free(steering->ftes_cache, fte); kmem_cache_free(steering->ftes_cache, fte);
} }
@ -640,7 +640,7 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
int index; int index;
int ret; int ret;
index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL); index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
if (index < 0) if (index < 0)
return index; return index;
@ -656,7 +656,7 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return 0; return 0;
err_ida_remove: err_ida_remove:
ida_simple_remove(&fg->fte_allocator, index); ida_free(&fg->fte_allocator, index);
return ret; return ret;
} }

View File

@ -88,12 +88,12 @@ void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index) int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
{ {
int end = dev->roce.reserved_gids.start + int end = dev->roce.reserved_gids.start +
dev->roce.reserved_gids.count; dev->roce.reserved_gids.count - 1;
int index = 0; int index = 0;
index = ida_simple_get(&dev->roce.reserved_gids.ida, index = ida_alloc_range(&dev->roce.reserved_gids.ida,
dev->roce.reserved_gids.start, end, dev->roce.reserved_gids.start, end,
GFP_KERNEL); GFP_KERNEL);
if (index < 0) if (index < 0)
return index; return index;
@ -105,7 +105,7 @@ int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index) void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index)
{ {
mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index); mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index);
ida_simple_remove(&dev->roce.reserved_gids.ida, gid_index); ida_free(&dev->roce.reserved_gids.ida, gid_index);
} }
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev) unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev)

View File

@ -61,7 +61,7 @@ struct fw_page {
u32 function; u32 function;
unsigned long bitmask; unsigned long bitmask;
struct list_head list; struct list_head list;
unsigned free_count; unsigned int free_count;
}; };
enum { enum {

View File

@ -117,6 +117,9 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
bool empty_found = false; bool empty_found = false;
int i; int i;
lockdep_assert_held(&table->rl_lock);
WARN_ON(!table->rl_entry);
for (i = 0; i < table->max_size; i++) { for (i = 0; i < table->max_size; i++) {
if (dedicated) { if (dedicated) {
if (!table->rl_entry[i].refcount) if (!table->rl_entry[i].refcount)
@ -172,38 +175,103 @@ bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
} }
EXPORT_SYMBOL(mlx5_rl_are_equal); EXPORT_SYMBOL(mlx5_rl_are_equal);
static int mlx5_rl_table_get(struct mlx5_rl_table *table)
{
int i;
lockdep_assert_held(&table->rl_lock);
if (table->rl_entry) {
table->refcount++;
return 0;
}
table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
GFP_KERNEL);
if (!table->rl_entry)
return -ENOMEM;
/* The index represents the index in HW rate limit table
* Index 0 is reserved for unlimited rate
*/
for (i = 0; i < table->max_size; i++)
table->rl_entry[i].index = i + 1;
table->refcount++;
return 0;
}
static void mlx5_rl_table_put(struct mlx5_rl_table *table)
{
lockdep_assert_held(&table->rl_lock);
if (--table->refcount)
return;
kfree(table->rl_entry);
table->rl_entry = NULL;
}
static void mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table)
{
int i;
if (!table->rl_entry)
return;
/* Clear all configured rates */
for (i = 0; i < table->max_size; i++)
if (table->rl_entry[i].refcount)
mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false);
kfree(table->rl_entry);
}
static void mlx5_rl_entry_get(struct mlx5_rl_entry *entry)
{
entry->refcount++;
}
static void
mlx5_rl_entry_put(struct mlx5_core_dev *dev, struct mlx5_rl_entry *entry)
{
entry->refcount--;
if (!entry->refcount)
mlx5_set_pp_rate_limit_cmd(dev, entry, false);
}
int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
bool dedicated_entry, u16 *index) bool dedicated_entry, u16 *index)
{ {
struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_table *table = &dev->priv.rl_table;
struct mlx5_rl_entry *entry; struct mlx5_rl_entry *entry;
int err = 0;
u32 rate; u32 rate;
int err;
if (!table->max_size)
return -EOPNOTSUPP;
rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit); rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
mutex_lock(&table->rl_lock);
if (!rate || !mlx5_rl_is_in_range(dev, rate)) { if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n", mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
rate, table->min_rate, table->max_rate); rate, table->min_rate, table->max_rate);
err = -EINVAL; return -EINVAL;
goto out;
} }
mutex_lock(&table->rl_lock);
err = mlx5_rl_table_get(table);
if (err)
goto out;
entry = find_rl_entry(table, rl_in, uid, dedicated_entry); entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
if (!entry) { if (!entry) {
mlx5_core_err(dev, "Max number of %u rates reached\n", mlx5_core_err(dev, "Max number of %u rates reached\n",
table->max_size); table->max_size);
err = -ENOSPC; err = -ENOSPC;
goto out; goto rl_err;
} }
if (entry->refcount) { if (!entry->refcount) {
/* rate already configured */ /* new rate limit */
entry->refcount++;
} else {
memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw)); memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
entry->uid = uid; entry->uid = uid;
/* new rate limit */
err = mlx5_set_pp_rate_limit_cmd(dev, entry, true); err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
if (err) { if (err) {
mlx5_core_err( mlx5_core_err(
@ -214,14 +282,18 @@ int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
burst_upper_bound), burst_upper_bound),
MLX5_GET(set_pp_rate_limit_context, rl_in, MLX5_GET(set_pp_rate_limit_context, rl_in,
typical_packet_size)); typical_packet_size));
goto out; goto rl_err;
} }
entry->refcount = 1;
entry->dedicated = dedicated_entry; entry->dedicated = dedicated_entry;
} }
mlx5_rl_entry_get(entry);
*index = entry->index; *index = entry->index;
mutex_unlock(&table->rl_lock);
return 0;
rl_err:
mlx5_rl_table_put(table);
out: out:
mutex_unlock(&table->rl_lock); mutex_unlock(&table->rl_lock);
return err; return err;
@ -235,10 +307,8 @@ void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
mutex_lock(&table->rl_lock); mutex_lock(&table->rl_lock);
entry = &table->rl_entry[index - 1]; entry = &table->rl_entry[index - 1];
entry->refcount--; mlx5_rl_entry_put(dev, entry);
if (!entry->refcount) mlx5_rl_table_put(table);
/* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry, false);
mutex_unlock(&table->rl_lock); mutex_unlock(&table->rl_lock);
} }
EXPORT_SYMBOL(mlx5_rl_remove_rate_raw); EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
@ -286,12 +356,8 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
goto out; goto out;
} }
mlx5_rl_entry_put(dev, entry);
entry->refcount--; mlx5_rl_table_put(table);
if (!entry->refcount)
/* need to remove rate */
mlx5_set_pp_rate_limit_cmd(dev, entry, false);
out: out:
mutex_unlock(&table->rl_lock); mutex_unlock(&table->rl_lock);
} }
@ -300,31 +366,19 @@ EXPORT_SYMBOL(mlx5_rl_remove_rate);
int mlx5_init_rl_table(struct mlx5_core_dev *dev) int mlx5_init_rl_table(struct mlx5_core_dev *dev)
{ {
struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_table *table = &dev->priv.rl_table;
int i;
mutex_init(&table->rl_lock);
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) { if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
table->max_size = 0; table->max_size = 0;
return 0; return 0;
} }
mutex_init(&table->rl_lock);
/* First entry is reserved for unlimited rate */ /* First entry is reserved for unlimited rate */
table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1; table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate); table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate); table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
GFP_KERNEL);
if (!table->rl_entry)
return -ENOMEM;
/* The index represents the index in HW rate limit table
* Index 0 is reserved for unlimited rate
*/
for (i = 0; i < table->max_size; i++)
table->rl_entry[i].index = i + 1;
/* Index 0 is reserved */
mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n", mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
table->max_size, table->max_size,
table->min_rate >> 10, table->min_rate >> 10,
@ -336,13 +390,10 @@ int mlx5_init_rl_table(struct mlx5_core_dev *dev)
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
{ {
struct mlx5_rl_table *table = &dev->priv.rl_table; struct mlx5_rl_table *table = &dev->priv.rl_table;
int i;
/* Clear all configured rates */ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing))
for (i = 0; i < table->max_size; i++) return;
if (table->rl_entry[i].refcount)
mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i],
false);
kfree(dev->priv.rl_table.rl_entry); mlx5_rl_table_free(dev, table);
mutex_destroy(&table->rl_lock);
} }

View File

@ -517,8 +517,8 @@ struct mlx5_rate_limit {
struct mlx5_rl_entry { struct mlx5_rl_entry {
u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)]; u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
u16 index;
u64 refcount; u64 refcount;
u16 index;
u16 uid; u16 uid;
u8 dedicated : 1; u8 dedicated : 1;
}; };
@ -530,6 +530,7 @@ struct mlx5_rl_table {
u32 max_rate; u32 max_rate;
u32 min_rate; u32 min_rate;
struct mlx5_rl_entry *rl_entry; struct mlx5_rl_entry *rl_entry;
u64 refcount;
}; };
struct mlx5_core_roce { struct mlx5_core_roce {