mlx5-updates-2021-11-16
Updates for mlx5 driver: 1) Support ethtool cq mode 2) Static allocation of mod header object for the common case 3) TC support for when local and remote VTEPs are in the same 4) Create E-Switch QoS objects on demand to save on resources 5) Minor code improvements -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmGUhbkACgkQSD+KveBX +j4qdgf/d6XXNJkcvUSBaUAuPYcO9BjqFDU/UeKycWG2sgcKN0kZ3xv0LhM/I4Bo jBeXp273BslLd5E0eJeeLCmN91TlsHk9ClNoyM7BwnIfylBQZch1GVQWznsnbcM8 z7isCGXYBBYUpmdvt5PNpNmdk3q/e5P9ZoM8UDGWVZsREedI5efrA4bYY8LkM/BE KRv+ok4JoLSfG6oi2qsW0dmrKEBAfWNZw6xJT87CjCljwk1d+3jQQE/t7kRqMMRD hLDbBGa26ZANTQPQWRPQCt/HQFu2N57jSEKaSPKdPM3AGstx5DwdmyvPGkZyZTor YGZiPARvSgfCOPCfa7oJ7xEZj18gBA== =iQhD -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2021-11-16' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2021-11-16 Updates for mlx5 driver: 1) Support ethtool cq mode 2) Static allocation of mod header object for the common case 3) TC support for when local and remote VTEPs are in the same 4) Create E-Switch QoS objects on demand to save on resources 5) Minor code improvements ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
01dd74246c
@ -1148,9 +1148,12 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
|
||||
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
|
||||
struct ethtool_channels *ch);
|
||||
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
|
||||
struct ethtool_coalesce *coal);
|
||||
struct ethtool_coalesce *coal,
|
||||
struct kernel_ethtool_coalesce *kernel_coal);
|
||||
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
||||
struct ethtool_coalesce *coal);
|
||||
struct ethtool_coalesce *coal,
|
||||
struct kernel_ethtool_coalesce *kernel_coal,
|
||||
struct netlink_ext_ack *extack);
|
||||
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
|
||||
struct ethtool_link_ksettings *link_ksettings);
|
||||
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
||||
|
@ -155,3 +155,61 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh)
|
||||
return mh->modify_hdr;
|
||||
}
|
||||
|
||||
char *
|
||||
mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace,
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
|
||||
{
|
||||
int new_num_actions, max_hw_actions;
|
||||
size_t new_sz, old_sz;
|
||||
void *ret;
|
||||
|
||||
if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
|
||||
goto out;
|
||||
|
||||
max_hw_actions = mlx5e_mod_hdr_max_actions(mdev, namespace);
|
||||
new_num_actions = min(max_hw_actions,
|
||||
mod_hdr_acts->actions ?
|
||||
mod_hdr_acts->max_actions * 2 : 1);
|
||||
if (mod_hdr_acts->max_actions == new_num_actions)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
new_sz = MLX5_MH_ACT_SZ * new_num_actions;
|
||||
old_sz = mod_hdr_acts->max_actions * MLX5_MH_ACT_SZ;
|
||||
|
||||
if (mod_hdr_acts->is_static) {
|
||||
ret = kzalloc(new_sz, GFP_KERNEL);
|
||||
if (ret) {
|
||||
memcpy(ret, mod_hdr_acts->actions, old_sz);
|
||||
mod_hdr_acts->is_static = false;
|
||||
}
|
||||
} else {
|
||||
ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
|
||||
if (ret)
|
||||
memset(ret + old_sz, 0, new_sz - old_sz);
|
||||
}
|
||||
if (!ret)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mod_hdr_acts->actions = ret;
|
||||
mod_hdr_acts->max_actions = new_num_actions;
|
||||
|
||||
out:
|
||||
return mod_hdr_acts->actions + (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
|
||||
{
|
||||
if (!mod_hdr_acts->is_static)
|
||||
kfree(mod_hdr_acts->actions);
|
||||
|
||||
mod_hdr_acts->actions = NULL;
|
||||
mod_hdr_acts->num_actions = 0;
|
||||
mod_hdr_acts->max_actions = 0;
|
||||
}
|
||||
|
||||
char *
|
||||
mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos)
|
||||
{
|
||||
return mod_hdr_acts->actions + (pos * MLX5_MH_ACT_SZ);
|
||||
}
|
||||
|
@ -7,14 +7,32 @@
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
|
||||
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
|
||||
|
||||
struct mlx5e_mod_hdr_handle;
|
||||
|
||||
struct mlx5e_tc_mod_hdr_acts {
|
||||
int num_actions;
|
||||
int max_actions;
|
||||
bool is_static;
|
||||
void *actions;
|
||||
};
|
||||
|
||||
#define DECLARE_MOD_HDR_ACTS_ACTIONS(name, len) \
|
||||
u8 name[len][MLX5_MH_ACT_SZ] = {}
|
||||
|
||||
#define DECLARE_MOD_HDR_ACTS(name, acts_arr) \
|
||||
struct mlx5e_tc_mod_hdr_acts name = { \
|
||||
.max_actions = ARRAY_SIZE(acts_arr), \
|
||||
.is_static = true, \
|
||||
.actions = acts_arr, \
|
||||
}
|
||||
|
||||
char *mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace,
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
|
||||
void mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
|
||||
char *mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos);
|
||||
|
||||
struct mlx5e_mod_hdr_handle *
|
||||
mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
|
||||
struct mod_hdr_tbl *tbl,
|
||||
@ -28,4 +46,12 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh);
|
||||
void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl);
|
||||
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl);
|
||||
|
||||
static inline int mlx5e_mod_hdr_max_actions(struct mlx5_core_dev *mdev, int namespace)
|
||||
{
|
||||
if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
|
||||
return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
|
||||
else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
|
||||
return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
|
||||
}
|
||||
|
||||
#endif /* __MLX5E_EN_MOD_HDR_H__ */
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <net/psample.h>
|
||||
#include "en/mapping.h"
|
||||
#include "en/tc/post_act.h"
|
||||
#include "en/mod_hdr.h"
|
||||
#include "sample.h"
|
||||
#include "eswitch.h"
|
||||
#include "en_tc.h"
|
||||
@ -255,12 +256,12 @@ sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
|
||||
goto err_modify_hdr;
|
||||
}
|
||||
|
||||
dealloc_mod_hdr_actions(&mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&mod_acts);
|
||||
return modify_hdr;
|
||||
|
||||
err_modify_hdr:
|
||||
err_post_act:
|
||||
dealloc_mod_hdr_actions(&mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&mod_acts);
|
||||
err_set_regc0:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
@ -36,6 +36,12 @@
|
||||
#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
|
||||
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
|
||||
|
||||
/* Statically allocate modify actions for
|
||||
* ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
|
||||
* This will be increased dynamically if needed (for the ipv6 snat + dnat).
|
||||
*/
|
||||
#define MLX5_CT_MIN_MOD_ACTS 10
|
||||
|
||||
#define ct_dbg(fmt, args...)\
|
||||
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
|
||||
|
||||
@ -609,22 +615,15 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
|
||||
struct flow_action *flow_action = &flow_rule->action;
|
||||
struct mlx5_core_dev *mdev = ct_priv->dev;
|
||||
struct flow_action_entry *act;
|
||||
size_t action_size;
|
||||
char *modact;
|
||||
int err, i;
|
||||
|
||||
action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
|
||||
|
||||
flow_action_for_each(i, act, flow_action) {
|
||||
switch (act->id) {
|
||||
case FLOW_ACTION_MANGLE: {
|
||||
err = alloc_mod_hdr_actions(mdev, ct_priv->ns_type,
|
||||
mod_acts);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
modact = mod_acts->actions +
|
||||
mod_acts->num_actions * action_size;
|
||||
modact = mlx5e_mod_hdr_alloc(mdev, ct_priv->ns_type, mod_acts);
|
||||
if (IS_ERR(modact))
|
||||
return PTR_ERR(modact);
|
||||
|
||||
err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact);
|
||||
if (err)
|
||||
@ -652,7 +651,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
|
||||
struct mlx5e_mod_hdr_handle **mh,
|
||||
u8 zone_restore_id, bool nat)
|
||||
{
|
||||
struct mlx5e_tc_mod_hdr_acts mod_acts = {};
|
||||
DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
|
||||
DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
|
||||
struct flow_action_entry *meta;
|
||||
u16 ct_state = 0;
|
||||
int err;
|
||||
@ -706,11 +706,11 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
|
||||
attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
|
||||
}
|
||||
|
||||
dealloc_mod_hdr_actions(&mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&mod_acts);
|
||||
return 0;
|
||||
|
||||
err_mapping:
|
||||
dealloc_mod_hdr_actions(&mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&mod_acts);
|
||||
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
|
||||
return err;
|
||||
}
|
||||
@ -907,12 +907,9 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
|
||||
struct mlx5_ct_tuple rev_tuple = entry->tuple;
|
||||
struct mlx5_ct_counter *shared_counter;
|
||||
struct mlx5_ct_entry *rev_entry;
|
||||
__be16 tmp_port;
|
||||
|
||||
/* get the reversed tuple */
|
||||
tmp_port = rev_tuple.port.src;
|
||||
rev_tuple.port.src = rev_tuple.port.dst;
|
||||
rev_tuple.port.dst = tmp_port;
|
||||
swap(rev_tuple.port.src, rev_tuple.port.dst);
|
||||
|
||||
if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
||||
__be32 tmp_addr = rev_tuple.ip.src_v4;
|
||||
@ -1445,7 +1442,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
|
||||
}
|
||||
pre_ct->miss_rule = rule;
|
||||
|
||||
dealloc_mod_hdr_actions(&pre_mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&pre_mod_acts);
|
||||
kvfree(spec);
|
||||
return 0;
|
||||
|
||||
@ -1454,7 +1451,7 @@ err_miss_rule:
|
||||
err_flow_rule:
|
||||
mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
|
||||
err_mapping:
|
||||
dealloc_mod_hdr_actions(&pre_mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&pre_mod_acts);
|
||||
kvfree(spec);
|
||||
return err;
|
||||
}
|
||||
@ -1850,14 +1847,14 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
|
||||
}
|
||||
|
||||
attr->ct_attr.ct_flow = ct_flow;
|
||||
dealloc_mod_hdr_actions(&pre_mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&pre_mod_acts);
|
||||
|
||||
return ct_flow->pre_ct_rule;
|
||||
|
||||
err_insert_orig:
|
||||
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
|
||||
err_mapping:
|
||||
dealloc_mod_hdr_actions(&pre_mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&pre_mod_acts);
|
||||
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
|
||||
err_get_chain:
|
||||
kfree(ct_flow->pre_ct_attr);
|
||||
|
@ -103,7 +103,7 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
|
||||
}
|
||||
|
||||
static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
|
||||
struct net_device *mirred_dev,
|
||||
struct net_device *dev,
|
||||
struct mlx5e_tc_tun_route_attr *attr)
|
||||
{
|
||||
struct net_device *route_dev;
|
||||
@ -122,13 +122,13 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
|
||||
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
|
||||
attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
|
||||
} else {
|
||||
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
|
||||
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev);
|
||||
|
||||
if (tunnel && tunnel->get_remote_ifindex)
|
||||
attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev);
|
||||
attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(dev);
|
||||
}
|
||||
|
||||
rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
|
||||
rt = ip_route_output_key(dev_net(dev), &attr->fl.fl4);
|
||||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
|
||||
@ -440,10 +440,10 @@ release_neigh:
|
||||
|
||||
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
|
||||
static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
|
||||
struct net_device *mirred_dev,
|
||||
struct net_device *dev,
|
||||
struct mlx5e_tc_tun_route_attr *attr)
|
||||
{
|
||||
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
|
||||
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev);
|
||||
struct net_device *route_dev;
|
||||
struct net_device *out_dev;
|
||||
struct dst_entry *dst;
|
||||
@ -451,8 +451,8 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
|
||||
int ret;
|
||||
|
||||
if (tunnel && tunnel->get_remote_ifindex)
|
||||
attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev);
|
||||
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
|
||||
attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(dev);
|
||||
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(dev), NULL, &attr->fl.fl6,
|
||||
NULL);
|
||||
if (IS_ERR(dst))
|
||||
return PTR_ERR(dst);
|
||||
@ -708,7 +708,8 @@ release_neigh:
|
||||
|
||||
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct mlx5_flow_attr *flow_attr)
|
||||
struct mlx5_flow_attr *flow_attr,
|
||||
struct net_device *filter_dev)
|
||||
{
|
||||
struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
|
||||
struct mlx5e_tc_int_port *int_port;
|
||||
@ -720,14 +721,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
|
||||
/* Addresses are swapped for decap */
|
||||
attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
|
||||
attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
|
||||
err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
|
||||
err = mlx5e_route_lookup_ipv4_get(priv, filter_dev, &attr);
|
||||
}
|
||||
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
|
||||
else if (flow_attr->tun_ip_version == 6) {
|
||||
/* Addresses are swapped for decap */
|
||||
attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
|
||||
attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
|
||||
err = mlx5e_route_lookup_ipv6_get(priv, priv->netdev, &attr);
|
||||
err = mlx5e_route_lookup_ipv6_get(priv, filter_dev, &attr);
|
||||
}
|
||||
#endif
|
||||
else
|
||||
|
@ -94,7 +94,8 @@ mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
#endif
|
||||
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct mlx5_flow_attr *attr);
|
||||
struct mlx5_flow_attr *attr,
|
||||
struct net_device *filter_dev);
|
||||
|
||||
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
|
||||
struct net_device *netdev);
|
||||
|
@ -1153,7 +1153,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
|
||||
|
||||
tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
|
||||
tbl_time_after = tbl_time_before;
|
||||
err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr);
|
||||
err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr, parse_attr->filter_dev);
|
||||
if (err || !esw_attr->rx_tun_attr->decap_vport)
|
||||
goto out;
|
||||
|
||||
@ -1474,7 +1474,7 @@ static void mlx5e_reoffload_decap(struct mlx5e_priv *priv,
|
||||
|
||||
parse_attr = attr->parse_attr;
|
||||
spec = &parse_attr->spec;
|
||||
err = mlx5e_tc_tun_route_lookup(priv, spec, attr);
|
||||
err = mlx5e_tc_tun_route_lookup(priv, spec, attr, parse_attr->filter_dev);
|
||||
if (err) {
|
||||
mlx5_core_warn(priv->mdev, "Failed to lookup route for flow, %d\n",
|
||||
err);
|
||||
|
@ -511,7 +511,8 @@ static int mlx5e_set_channels(struct net_device *dev,
|
||||
}
|
||||
|
||||
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
|
||||
struct ethtool_coalesce *coal)
|
||||
struct ethtool_coalesce *coal,
|
||||
struct kernel_ethtool_coalesce *kernel_coal)
|
||||
{
|
||||
struct dim_cq_moder *rx_moder, *tx_moder;
|
||||
|
||||
@ -528,6 +529,11 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
|
||||
coal->tx_max_coalesced_frames = tx_moder->pkts;
|
||||
coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
|
||||
|
||||
kernel_coal->use_cqe_mode_rx =
|
||||
MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER);
|
||||
kernel_coal->use_cqe_mode_tx =
|
||||
MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -538,7 +544,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
return mlx5e_ethtool_get_coalesce(priv, coal);
|
||||
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
|
||||
}
|
||||
|
||||
#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
|
||||
@ -578,14 +584,26 @@ mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
|
||||
}
|
||||
}
|
||||
|
||||
/* convert a boolean value of cq_mode to mlx5 period mode
|
||||
* true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE
|
||||
* false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE
|
||||
*/
|
||||
static int cqe_mode_to_period_mode(bool val)
|
||||
{
|
||||
return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
|
||||
}
|
||||
|
||||
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
||||
struct ethtool_coalesce *coal)
|
||||
struct ethtool_coalesce *coal,
|
||||
struct kernel_ethtool_coalesce *kernel_coal,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct dim_cq_moder *rx_moder, *tx_moder;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_params new_params;
|
||||
bool reset_rx, reset_tx;
|
||||
bool reset = true;
|
||||
u8 cq_period_mode;
|
||||
int err = 0;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, cq_moderation))
|
||||
@ -605,6 +623,12 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
|
||||
!MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
new_params = priv->channels.params;
|
||||
|
||||
@ -621,6 +645,18 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
|
||||
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
|
||||
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
|
||||
|
||||
cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx);
|
||||
if (cq_period_mode != rx_moder->cq_period_mode) {
|
||||
mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
|
||||
reset_rx = true;
|
||||
}
|
||||
|
||||
cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx);
|
||||
if (cq_period_mode != tx_moder->cq_period_mode) {
|
||||
mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
|
||||
reset_tx = true;
|
||||
}
|
||||
|
||||
if (reset_rx) {
|
||||
u8 mode = MLX5E_GET_PFLAG(&new_params,
|
||||
MLX5E_PFLAG_RX_CQE_BASED_MODER);
|
||||
@ -656,9 +692,9 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
|
||||
struct kernel_ethtool_coalesce *kernel_coal,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
return mlx5e_ethtool_set_coalesce(priv, coal);
|
||||
return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
|
||||
}
|
||||
|
||||
static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
|
||||
@ -2358,7 +2394,8 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
|
||||
const struct ethtool_ops mlx5e_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
||||
ETHTOOL_COALESCE_MAX_FRAMES |
|
||||
ETHTOOL_COALESCE_USE_ADAPTIVE,
|
||||
ETHTOOL_COALESCE_USE_ADAPTIVE |
|
||||
ETHTOOL_COALESCE_USE_CQE,
|
||||
.get_drvinfo = mlx5e_get_drvinfo,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_link_ext_state = mlx5e_get_link_ext_state,
|
||||
|
@ -258,7 +258,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
return mlx5e_ethtool_get_coalesce(priv, coal);
|
||||
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
|
||||
}
|
||||
|
||||
static int mlx5e_rep_set_coalesce(struct net_device *netdev,
|
||||
@ -268,7 +268,7 @@ static int mlx5e_rep_set_coalesce(struct net_device *netdev,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
return mlx5e_ethtool_set_coalesce(priv, coal);
|
||||
return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
|
||||
}
|
||||
|
||||
static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
|
||||
|
@ -2076,7 +2076,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
|
||||
|
||||
for (i = 0; i < NUM_PTP_CH_STATS; i++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
ptp_ch_stats_desc[i].format);
|
||||
"%s", ptp_ch_stats_desc[i].format);
|
||||
|
||||
if (priv->tx_ptp_opened) {
|
||||
for (tc = 0; tc < priv->max_opened_tc; tc++)
|
||||
|
@ -71,7 +71,6 @@
|
||||
#include "lag/mp.h"
|
||||
|
||||
#define nic_chains(priv) ((priv)->fs.tc.chains)
|
||||
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
|
||||
|
||||
#define MLX5E_TC_TABLE_NUM_GROUPS 4
|
||||
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
|
||||
@ -209,12 +208,9 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
|
||||
char *modact;
|
||||
int err;
|
||||
|
||||
err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
modact = mod_hdr_acts->actions +
|
||||
(mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
|
||||
modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
|
||||
if (IS_ERR(modact))
|
||||
return PTR_ERR(modact);
|
||||
|
||||
/* Firmware has 5bit length field and 0 means 32bits */
|
||||
if (mlen == 32)
|
||||
@ -333,7 +329,7 @@ void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
|
||||
int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
|
||||
char *modact;
|
||||
|
||||
modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
|
||||
modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
|
||||
|
||||
/* Firmware has 5bit length field and 0 means 32bits */
|
||||
if (mlen == 32)
|
||||
@ -1076,7 +1072,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
||||
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
|
||||
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
|
||||
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
|
||||
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@ -1132,16 +1128,16 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
|
||||
}
|
||||
mutex_unlock(&priv->fs.tc.t_lock);
|
||||
|
||||
kvfree(attr->parse_attr);
|
||||
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||
mlx5e_detach_mod_hdr(priv, flow);
|
||||
|
||||
mlx5_fc_destroy(priv->mdev, attr->counter);
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
|
||||
mlx5_fc_destroy(priv->mdev, attr->counter);
|
||||
|
||||
if (flow_flag_test(flow, HAIRPIN))
|
||||
mlx5e_hairpin_flow_del(priv, flow);
|
||||
|
||||
kvfree(attr->parse_attr);
|
||||
kfree(flow->attr);
|
||||
}
|
||||
|
||||
@ -1623,15 +1619,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
|
||||
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
|
||||
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
|
||||
dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
|
||||
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
|
||||
if (vf_tun && attr->modify_hdr)
|
||||
mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
|
||||
else
|
||||
mlx5e_detach_mod_hdr(priv, flow);
|
||||
}
|
||||
kfree(attr->sample_attr);
|
||||
kvfree(attr->parse_attr);
|
||||
kvfree(attr->esw_attr->rx_tun_attr);
|
||||
|
||||
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
|
||||
mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
|
||||
@ -1645,6 +1638,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
|
||||
if (flow_flag_test(flow, L3_TO_L2_DECAP))
|
||||
mlx5e_detach_decap(priv, flow);
|
||||
|
||||
kfree(attr->sample_attr);
|
||||
kvfree(attr->esw_attr->rx_tun_attr);
|
||||
kvfree(attr->parse_attr);
|
||||
kfree(flow->attr);
|
||||
}
|
||||
|
||||
@ -2766,13 +2762,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
|
||||
int i, action_size, first, last, next_z;
|
||||
void *headers_c, *headers_v, *action, *vals_p;
|
||||
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_acts;
|
||||
struct mlx5_fields *f;
|
||||
unsigned long mask, field_mask;
|
||||
int err;
|
||||
int i, first, last, next_z;
|
||||
struct mlx5_fields *f;
|
||||
u8 cmd;
|
||||
|
||||
mod_acts = &parse_attr->mod_hdr_acts;
|
||||
@ -2784,8 +2779,6 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
set_vals = &hdrs[0].vals;
|
||||
add_vals = &hdrs[1].vals;
|
||||
|
||||
action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fields); i++) {
|
||||
bool skip;
|
||||
|
||||
@ -2853,18 +2846,16 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
|
||||
if (err) {
|
||||
action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
|
||||
if (IS_ERR(action)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"too many pedit actions, can't offload");
|
||||
mlx5_core_warn(priv->mdev,
|
||||
"mlx5: parsed %d pedit actions, can't do more\n",
|
||||
mod_acts->num_actions);
|
||||
return err;
|
||||
return PTR_ERR(action);
|
||||
}
|
||||
|
||||
action = mod_acts->actions +
|
||||
(mod_acts->num_actions * action_size);
|
||||
MLX5_SET(set_action_in, action, action_type, cmd);
|
||||
MLX5_SET(set_action_in, action, field, f->field);
|
||||
|
||||
@ -2894,57 +2885,6 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
|
||||
int namespace)
|
||||
{
|
||||
if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
|
||||
return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
|
||||
else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
|
||||
return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
|
||||
}
|
||||
|
||||
int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
|
||||
int namespace,
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
|
||||
{
|
||||
int action_size, new_num_actions, max_hw_actions;
|
||||
size_t new_sz, old_sz;
|
||||
void *ret;
|
||||
|
||||
if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
|
||||
return 0;
|
||||
|
||||
action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
|
||||
|
||||
max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
|
||||
namespace);
|
||||
new_num_actions = min(max_hw_actions,
|
||||
mod_hdr_acts->actions ?
|
||||
mod_hdr_acts->max_actions * 2 : 1);
|
||||
if (mod_hdr_acts->max_actions == new_num_actions)
|
||||
return -ENOSPC;
|
||||
|
||||
new_sz = action_size * new_num_actions;
|
||||
old_sz = mod_hdr_acts->max_actions * action_size;
|
||||
ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
|
||||
if (!ret)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(ret + old_sz, 0, new_sz - old_sz);
|
||||
mod_hdr_acts->actions = ret;
|
||||
mod_hdr_acts->max_actions = new_num_actions;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
|
||||
{
|
||||
kfree(mod_hdr_acts->actions);
|
||||
mod_hdr_acts->actions = NULL;
|
||||
mod_hdr_acts->num_actions = 0;
|
||||
mod_hdr_acts->max_actions = 0;
|
||||
}
|
||||
|
||||
static const struct pedit_headers zero_masks = {};
|
||||
|
||||
static int
|
||||
@ -2967,7 +2907,7 @@ parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
|
||||
if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"The pedit offload action is not supported");
|
||||
goto out_err;
|
||||
@ -3060,7 +3000,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
|
||||
return 0;
|
||||
|
||||
out_dealloc_parsed_actions:
|
||||
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
|
||||
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -3484,12 +3424,12 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* In case all pedit actions are skipped, remove the MOD_HDR flag. */
|
||||
if (parse_attr->mod_hdr_acts.num_actions > 0)
|
||||
return 0;
|
||||
|
||||
/* In case all pedit actions are skipped, remove the MOD_HDR flag. */
|
||||
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
|
||||
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
|
||||
|
||||
if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
|
||||
return 0;
|
||||
@ -4708,7 +4648,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
|
||||
|
||||
err_free:
|
||||
flow_flag_set(flow, FAILED);
|
||||
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
|
||||
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
|
||||
mlx5e_flow_put(priv, flow);
|
||||
out:
|
||||
return err;
|
||||
@ -5008,14 +4948,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
|
||||
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
|
||||
struct tc_cls_matchall_offload *ma)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct netlink_ext_ack *extack = ma->common.extack;
|
||||
|
||||
if (!mlx5_esw_qos_enabled(esw)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (ma->common.prio != 1) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
|
||||
return -EINVAL;
|
||||
|
@ -247,11 +247,6 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct mlx5e_tc_flow *flow);
|
||||
|
||||
int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
|
||||
int namespace,
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
|
||||
void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
|
||||
|
||||
struct mlx5e_tc_flow;
|
||||
u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "fs_core.h"
|
||||
#include "esw/indir_table.h"
|
||||
#include "lib/fs_chains.h"
|
||||
#include "en/mod_hdr.h"
|
||||
|
||||
#define MLX5_ESW_INDIR_TABLE_SIZE 128
|
||||
#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
|
||||
@ -226,7 +227,7 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
|
||||
goto err_handle;
|
||||
}
|
||||
|
||||
dealloc_mod_hdr_actions(&mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&mod_acts);
|
||||
rule->handle = handle;
|
||||
rule->vni = esw_attr->rx_tun_attr->vni;
|
||||
rule->mh = flow_act.modify_hdr;
|
||||
@ -243,7 +244,7 @@ err_table:
|
||||
mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr);
|
||||
err_mod_hdr_alloc:
|
||||
err_mod_hdr_regc1:
|
||||
dealloc_mod_hdr_actions(&mod_acts);
|
||||
mlx5e_mod_hdr_dealloc(&mod_acts);
|
||||
err_mod_hdr_regc0:
|
||||
err_ethertype:
|
||||
kfree(rule);
|
||||
|
@ -522,9 +522,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
|
||||
return PTR_ERR(evport);
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
err = mlx5_esw_qos_set_vport_min_rate(esw, evport, min_rate, NULL);
|
||||
if (!err)
|
||||
err = mlx5_esw_qos_set_vport_max_rate(esw, evport, max_rate, NULL);
|
||||
err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return err;
|
||||
}
|
||||
|
@ -204,10 +204,8 @@ static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divid
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *evport,
|
||||
u32 min_rate,
|
||||
struct netlink_ext_ack *extack)
|
||||
static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
|
||||
u32 min_rate, struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 fw_max_bw_share, previous_min_rate;
|
||||
bool min_rate_supported;
|
||||
@ -231,10 +229,8 @@ int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *evport,
|
||||
u32 max_rate,
|
||||
struct netlink_ext_ack *extack)
|
||||
static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
|
||||
u32 max_rate, struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 act_max_rate = max_rate;
|
||||
bool max_rate_supported;
|
||||
@ -432,16 +428,13 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
|
||||
}
|
||||
|
||||
static struct mlx5_esw_rate_group *
|
||||
esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
|
||||
__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
|
||||
struct mlx5_esw_rate_group *group;
|
||||
u32 divider;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
group = kzalloc(sizeof(*group), GFP_KERNEL);
|
||||
if (!group)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -482,9 +475,32 @@ err_sched_elem:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
|
||||
struct mlx5_esw_rate_group *group,
|
||||
struct netlink_ext_ack *extack)
|
||||
static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack);
|
||||
static void esw_qos_put(struct mlx5_eswitch *esw);
|
||||
|
||||
static struct mlx5_esw_rate_group *
|
||||
esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_esw_rate_group *group;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
err = esw_qos_get(esw, extack);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
group = __esw_qos_create_rate_group(esw, extack);
|
||||
if (IS_ERR(group))
|
||||
esw_qos_put(esw);
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
|
||||
struct mlx5_esw_rate_group *group,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 divider;
|
||||
int err;
|
||||
@ -503,7 +519,21 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
|
||||
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
|
||||
|
||||
trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
|
||||
|
||||
kfree(group);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
|
||||
struct mlx5_esw_rate_group *group,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = __esw_qos_destroy_rate_group(esw, group, extack);
|
||||
esw_qos_put(esw);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -526,7 +556,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
|
||||
return false;
|
||||
}
|
||||
|
||||
void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
|
||||
static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
@ -534,14 +564,10 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
|
||||
return;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
|
||||
return;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (esw->qos.enabled)
|
||||
goto unlock;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
MLX5_SET(scheduling_context, tsar_ctx, element_type,
|
||||
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
|
||||
@ -555,75 +581,93 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
|
||||
&esw->qos.root_tsar_ix);
|
||||
if (err) {
|
||||
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
|
||||
goto unlock;
|
||||
return err;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&esw->qos.groups);
|
||||
if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
|
||||
esw->qos.group0 = esw_qos_create_rate_group(esw, NULL);
|
||||
esw->qos.group0 = __esw_qos_create_rate_group(esw, extack);
|
||||
if (IS_ERR(esw->qos.group0)) {
|
||||
esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
|
||||
PTR_ERR(esw->qos.group0));
|
||||
goto err_group0;
|
||||
}
|
||||
}
|
||||
esw->qos.enabled = true;
|
||||
unlock:
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return;
|
||||
refcount_set(&esw->qos.refcnt, 1);
|
||||
|
||||
return 0;
|
||||
|
||||
err_group0:
|
||||
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
esw->qos.root_tsar_ix);
|
||||
if (err)
|
||||
esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
|
||||
mutex_unlock(&esw->state_lock);
|
||||
if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
esw->qos.root_tsar_ix))
|
||||
esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw)
|
||||
static void esw_qos_destroy(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(esw->dev);
|
||||
int err;
|
||||
|
||||
devlink_rate_nodes_destroy(devlink);
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (!esw->qos.enabled)
|
||||
goto unlock;
|
||||
|
||||
if (esw->qos.group0)
|
||||
esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
|
||||
__esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
|
||||
|
||||
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
esw->qos.root_tsar_ix);
|
||||
if (err)
|
||||
esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
|
||||
|
||||
esw->qos.enabled = false;
|
||||
unlock:
|
||||
mutex_unlock(&esw->state_lock);
|
||||
}
|
||||
|
||||
int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||
u32 max_rate, u32 bw_share)
|
||||
static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
lockdep_assert_held(&esw->state_lock);
|
||||
|
||||
if (!refcount_inc_not_zero(&esw->qos.refcnt)) {
|
||||
/* esw_qos_create() set refcount to 1 only on success.
|
||||
* No need to decrement on failure.
|
||||
*/
|
||||
err = esw_qos_create(esw, extack);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void esw_qos_put(struct mlx5_eswitch *esw)
|
||||
{
|
||||
lockdep_assert_held(&esw->state_lock);
|
||||
if (refcount_dec_and_test(&esw->qos.refcnt))
|
||||
esw_qos_destroy(esw);
|
||||
}
|
||||
|
||||
static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||
u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&esw->state_lock);
|
||||
if (!esw->qos.enabled)
|
||||
if (vport->qos.enabled)
|
||||
return 0;
|
||||
|
||||
if (vport->qos.enabled)
|
||||
return -EEXIST;
|
||||
err = esw_qos_get(esw, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
vport->qos.group = esw->qos.group0;
|
||||
|
||||
err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share);
|
||||
if (!err) {
|
||||
vport->qos.enabled = true;
|
||||
trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
|
||||
}
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
vport->qos.enabled = true;
|
||||
trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
esw_qos_put(esw);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -633,7 +677,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&esw->state_lock);
|
||||
if (!esw->qos.enabled || !vport->qos.enabled)
|
||||
if (!vport->qos.enabled)
|
||||
return;
|
||||
WARN(vport->qos.group && vport->qos.group != esw->qos.group0,
|
||||
"Disabling QoS on port before detaching it from group");
|
||||
@ -645,8 +689,27 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
|
||||
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
|
||||
vport->vport, err);
|
||||
|
||||
vport->qos.enabled = false;
|
||||
memset(&vport->qos, 0, sizeof(vport->qos));
|
||||
trace_mlx5_esw_vport_qos_destroy(vport);
|
||||
|
||||
esw_qos_put(esw);
|
||||
}
|
||||
|
||||
int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||
u32 min_rate, u32 max_rate)
|
||||
{
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&esw->state_lock);
|
||||
err = esw_qos_vport_enable(esw, vport, 0, 0, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL);
|
||||
if (!err)
|
||||
err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
|
||||
@ -654,22 +717,29 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
|
||||
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
|
||||
struct mlx5_vport *vport;
|
||||
u32 bitmask;
|
||||
int err;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport))
|
||||
return PTR_ERR(vport);
|
||||
|
||||
if (!vport->qos.enabled)
|
||||
return -EOPNOTSUPP;
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (!vport->qos.enabled) {
|
||||
/* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
|
||||
err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share, NULL);
|
||||
} else {
|
||||
MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
|
||||
|
||||
MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
|
||||
bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
|
||||
bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
|
||||
err = mlx5_modify_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
ctx,
|
||||
vport->qos.esw_tsar_ix,
|
||||
bitmask);
|
||||
}
|
||||
mutex_unlock(&esw->state_lock);
|
||||
|
||||
return mlx5_modify_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
ctx,
|
||||
vport->qos.esw_tsar_ix,
|
||||
bitmask);
|
||||
return err;
|
||||
}
|
||||
|
||||
#define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */
|
||||
@ -728,7 +798,12 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
|
||||
return err;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
err = mlx5_esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
|
||||
err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
|
||||
unlock:
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return err;
|
||||
}
|
||||
@ -749,7 +824,12 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
|
||||
return err;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
err = mlx5_esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
|
||||
err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
|
||||
unlock:
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return err;
|
||||
}
|
||||
@ -846,7 +926,9 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
|
||||
int err;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
err = esw_qos_vport_update_group(esw, vport, group, extack);
|
||||
err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
|
||||
if (!err)
|
||||
err = esw_qos_vport_update_group(esw, vport, group, extack);
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return err;
|
||||
}
|
||||
|
@ -6,18 +6,8 @@
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
|
||||
int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *evport,
|
||||
u32 min_rate,
|
||||
struct netlink_ext_ack *extack);
|
||||
int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *evport,
|
||||
u32 max_rate,
|
||||
struct netlink_ext_ack *extack);
|
||||
void mlx5_esw_qos_create(struct mlx5_eswitch *esw);
|
||||
void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw);
|
||||
int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||
u32 max_rate, u32 bw_share);
|
||||
int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
|
||||
u32 max_rate, u32 min_rate);
|
||||
void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
|
||||
|
||||
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
|
||||
|
@ -781,9 +781,6 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Attach vport to the eswitch rate limiter */
|
||||
mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
|
||||
|
||||
if (mlx5_esw_is_manager_vport(esw, vport_num))
|
||||
return 0;
|
||||
|
||||
@ -1260,8 +1257,6 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
|
||||
|
||||
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
|
||||
|
||||
mlx5_esw_qos_create(esw);
|
||||
|
||||
esw->mode = mode;
|
||||
|
||||
if (mode == MLX5_ESWITCH_LEGACY) {
|
||||
@ -1290,7 +1285,6 @@ abort:
|
||||
if (mode == MLX5_ESWITCH_OFFLOADS)
|
||||
mlx5_rescan_drivers(esw->dev);
|
||||
|
||||
mlx5_esw_qos_destroy(esw);
|
||||
mlx5_esw_acls_ns_cleanup(esw);
|
||||
return err;
|
||||
}
|
||||
@ -1330,6 +1324,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
|
||||
|
||||
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(esw->dev);
|
||||
int old_mode;
|
||||
|
||||
lockdep_assert_held_write(&esw->mode_lock);
|
||||
@ -1359,7 +1354,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
|
||||
if (old_mode == MLX5_ESWITCH_OFFLOADS)
|
||||
mlx5_rescan_drivers(esw->dev);
|
||||
|
||||
mlx5_esw_qos_destroy(esw);
|
||||
devlink_rate_nodes_destroy(devlink);
|
||||
|
||||
mlx5_esw_acls_ns_cleanup(esw);
|
||||
|
||||
if (clear_vf)
|
||||
@ -1568,6 +1564,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
lockdep_register_key(&esw->mode_lock_key);
|
||||
init_rwsem(&esw->mode_lock);
|
||||
lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key);
|
||||
refcount_set(&esw->qos.refcnt, 0);
|
||||
|
||||
esw->enabled_vports = 0;
|
||||
esw->mode = MLX5_ESWITCH_NONE;
|
||||
@ -1601,6 +1598,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
||||
|
||||
esw->dev->priv.eswitch = NULL;
|
||||
destroy_workqueue(esw->work_queue);
|
||||
WARN_ON(refcount_read(&esw->qos.refcnt));
|
||||
lockdep_unregister_key(&esw->mode_lock_key);
|
||||
mutex_destroy(&esw->state_lock);
|
||||
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
|
||||
@ -1690,82 +1688,6 @@ bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
|
||||
}
|
||||
|
||||
static bool
|
||||
is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return vport_num == MLX5_VPORT_PF ||
|
||||
mlx5_eswitch_is_vf_vport(esw, vport_num) ||
|
||||
mlx5_esw_is_sf_vport(esw, vport_num);
|
||||
}
|
||||
|
||||
int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
|
||||
u8 *hw_addr, int *hw_addr_len,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_eswitch *esw;
|
||||
struct mlx5_vport *vport;
|
||||
int err = -EOPNOTSUPP;
|
||||
u16 vport_num;
|
||||
|
||||
esw = mlx5_devlink_eswitch_get(port->devlink);
|
||||
if (IS_ERR(esw))
|
||||
return PTR_ERR(esw);
|
||||
|
||||
vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
|
||||
if (!is_port_function_supported(esw, vport_num))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Invalid port");
|
||||
return PTR_ERR(vport);
|
||||
}
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (vport->enabled) {
|
||||
ether_addr_copy(hw_addr, vport->info.mac);
|
||||
*hw_addr_len = ETH_ALEN;
|
||||
err = 0;
|
||||
}
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
|
||||
const u8 *hw_addr, int hw_addr_len,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_eswitch *esw;
|
||||
struct mlx5_vport *vport;
|
||||
int err = -EOPNOTSUPP;
|
||||
u16 vport_num;
|
||||
|
||||
esw = mlx5_devlink_eswitch_get(port->devlink);
|
||||
if (IS_ERR(esw)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
|
||||
return PTR_ERR(esw);
|
||||
}
|
||||
|
||||
vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
|
||||
if (!is_port_function_supported(esw, vport_num)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
|
||||
return -EINVAL;
|
||||
}
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Invalid port");
|
||||
return PTR_ERR(vport);
|
||||
}
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (vport->enabled)
|
||||
err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
|
||||
else
|
||||
NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
|
||||
u16 vport, int link_state)
|
||||
{
|
||||
@ -1822,8 +1744,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
|
||||
ivi->qos = evport->info.qos;
|
||||
ivi->spoofchk = evport->info.spoofchk;
|
||||
ivi->trusted = evport->info.trusted;
|
||||
ivi->min_tx_rate = evport->qos.min_rate;
|
||||
ivi->max_tx_rate = evport->qos.max_rate;
|
||||
if (evport->qos.enabled) {
|
||||
ivi->min_tx_rate = evport->qos.min_rate;
|
||||
ivi->max_tx_rate = evport->qos.max_rate;
|
||||
}
|
||||
mutex_unlock(&esw->state_lock);
|
||||
|
||||
return 0;
|
||||
|
@ -308,10 +308,14 @@ struct mlx5_eswitch {
|
||||
atomic64_t user_count;
|
||||
|
||||
struct {
|
||||
bool enabled;
|
||||
u32 root_tsar_ix;
|
||||
struct mlx5_esw_rate_group *group0;
|
||||
struct list_head groups; /* Protected by esw->state_lock */
|
||||
|
||||
/* Protected by esw->state_lock.
|
||||
* Initially 0, meaning no QoS users and QoS is disabled.
|
||||
*/
|
||||
refcount_t refcnt;
|
||||
} qos;
|
||||
|
||||
struct mlx5_esw_bridge_offloads *br_offloads;
|
||||
@ -516,11 +520,6 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
|
||||
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
||||
u16 vport, u16 vlan, u8 qos, u8 set_flags);
|
||||
|
||||
static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
|
||||
{
|
||||
return esw->qos.enabled;
|
||||
}
|
||||
|
||||
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
|
||||
u8 vlan_depth)
|
||||
{
|
||||
|
@ -3862,3 +3862,62 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
|
||||
return vport->metadata;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
|
||||
|
||||
static bool
|
||||
is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return vport_num == MLX5_VPORT_PF ||
|
||||
mlx5_eswitch_is_vf_vport(esw, vport_num) ||
|
||||
mlx5_esw_is_sf_vport(esw, vport_num);
|
||||
}
|
||||
|
||||
int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
|
||||
u8 *hw_addr, int *hw_addr_len,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_eswitch *esw;
|
||||
struct mlx5_vport *vport;
|
||||
u16 vport_num;
|
||||
|
||||
esw = mlx5_devlink_eswitch_get(port->devlink);
|
||||
if (IS_ERR(esw))
|
||||
return PTR_ERR(esw);
|
||||
|
||||
vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
|
||||
if (!is_port_function_supported(esw, vport_num))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Invalid port");
|
||||
return PTR_ERR(vport);
|
||||
}
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
ether_addr_copy(hw_addr, vport->info.mac);
|
||||
*hw_addr_len = ETH_ALEN;
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
|
||||
const u8 *hw_addr, int hw_addr_len,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_eswitch *esw;
|
||||
u16 vport_num;
|
||||
|
||||
esw = mlx5_devlink_eswitch_get(port->devlink);
|
||||
if (IS_ERR(esw)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
|
||||
return PTR_ERR(esw);
|
||||
}
|
||||
|
||||
vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
|
||||
if (!is_port_function_supported(esw, vport_num)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
|
||||
}
|
||||
|
@ -420,6 +420,11 @@ static void print_health_info(struct mlx5_core_dev *dev)
|
||||
if (!ioread8(&h->synd))
|
||||
return;
|
||||
|
||||
if (ioread32be(&h->fw_ver) == 0xFFFFFFFF) {
|
||||
mlx5_log(dev, LOGLEVEL_ERR, "PCI slot is unavailable\n");
|
||||
return;
|
||||
}
|
||||
|
||||
rfr_severity = ioread8(&h->rfr_severity);
|
||||
severity = mlx5_health_get_severity(rfr_severity);
|
||||
mlx5_log(dev, severity, "Health issue observed, %s, severity(%d) %s:\n",
|
||||
|
@ -105,7 +105,7 @@ static int mlx5i_set_coalesce(struct net_device *netdev,
|
||||
{
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
|
||||
|
||||
return mlx5e_ethtool_set_coalesce(priv, coal);
|
||||
return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
|
||||
}
|
||||
|
||||
static int mlx5i_get_coalesce(struct net_device *netdev,
|
||||
@ -115,7 +115,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
|
||||
{
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
|
||||
|
||||
return mlx5e_ethtool_get_coalesce(priv, coal);
|
||||
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
|
||||
}
|
||||
|
||||
static int mlx5i_get_ts_info(struct net_device *netdev,
|
||||
|
@ -479,7 +479,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
|
||||
pool->xa_num_irqs.max = start + size - 1;
|
||||
if (name)
|
||||
snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
|
||||
name);
|
||||
"%s", name);
|
||||
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
|
||||
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
|
||||
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
|
||||
|
Loading…
x
Reference in New Issue
Block a user