mlx5-fixes-2023-08-07

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmTRPIkACgkQSD+KveBX
 +j4ozgf/aS4GktVhN0DkTksN3K8D4QSrYYR+hiW4e7o7xz1K32qw+GRiC2r/FSGh
 XHzOGybuj7+V9TxcOb8NRSqKVtpci4MChQTrWzGutqwtcxU18SyDSo/kEmHfqkT+
 kuH+PDpDpNtcCwr+z3Cb+M22ZjpqwZnWdxfKa9rG+ur9QPTUnBY1+MGGYn6eeMnC
 DD7HiB+q7YnCNbsFHJNp4ZeUVsTWO4gD6aOUkUhXDaBkBTpKwrCUQ+dNMm2AG1/z
 3v4VdtB28BHV6o4QSop7HYk1DEnpTjZt/R+FnC6bZpIu7bDYVqw9p97EoPMvSlMv
 RgorORXPr30WpvWcH6ff2y/A0MmhIw==
 =nDa5
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-fixes-2023-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2023-08-07

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2023-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Add capability check for vnic counters
  net/mlx5: Reload auxiliary devices in pci error handlers
  net/mlx5: Skip clock update work when device is in error state
  net/mlx5: LAG, Check correct bucket when modifying LAG
  net/mlx5e: Unoffload post act rule when handling FIB events
  net/mlx5: Fix devlink controller number for ECVF
  net/mlx5: Allow 0 for total host VFs
  net/mlx5: Return correct EC_VF function ID
  net/mlx5: DR, Fix wrong allocation of modify hdr pattern
  net/mlx5e: TC, Fix internal port memory leak
  net/mlx5e: Take RTNL lock when needed before calling xdp_set_features()
====================

Link: https://lore.kernel.org/r/20230807212607.50883-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-08-08 16:33:18 -07:00
commit b9077ef4c1
11 changed files with 97 additions and 57 deletions

View File

@ -2,6 +2,7 @@
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
#include "reporter_vnic.h"
#include "en_stats.h"
#include "devlink.h"
#define VNIC_ENV_GET64(vnic_env_stats, c) \
@ -36,55 +37,72 @@ int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
if (err)
return err;
err = devlink_fmsg_u64_pair_put(fmsg, "total_error_queues",
VNIC_ENV_GET64(&vnic, total_error_queues));
if (err)
return err;
if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) {
err = devlink_fmsg_u32_pair_put(fmsg, "total_error_queues",
VNIC_ENV_GET(&vnic, total_error_queues));
if (err)
return err;
err = devlink_fmsg_u64_pair_put(fmsg, "send_queue_priority_update_flow",
VNIC_ENV_GET64(&vnic, send_queue_priority_update_flow));
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow",
VNIC_ENV_GET(&vnic,
send_queue_priority_update_flow));
if (err)
return err;
}
err = devlink_fmsg_u64_pair_put(fmsg, "comp_eq_overrun",
VNIC_ENV_GET64(&vnic, comp_eq_overrun));
if (err)
return err;
if (MLX5_CAP_GEN(dev, eq_overrun_count)) {
err = devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun",
VNIC_ENV_GET(&vnic, comp_eq_overrun));
if (err)
return err;
err = devlink_fmsg_u64_pair_put(fmsg, "async_eq_overrun",
VNIC_ENV_GET64(&vnic, async_eq_overrun));
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun",
VNIC_ENV_GET(&vnic, async_eq_overrun));
if (err)
return err;
}
err = devlink_fmsg_u64_pair_put(fmsg, "cq_overrun",
VNIC_ENV_GET64(&vnic, cq_overrun));
if (err)
return err;
if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) {
err = devlink_fmsg_u32_pair_put(fmsg, "cq_overrun",
VNIC_ENV_GET(&vnic, cq_overrun));
if (err)
return err;
}
err = devlink_fmsg_u64_pair_put(fmsg, "invalid_command",
VNIC_ENV_GET64(&vnic, invalid_command));
if (err)
return err;
if (MLX5_CAP_GEN(dev, invalid_command_count)) {
err = devlink_fmsg_u32_pair_put(fmsg, "invalid_command",
VNIC_ENV_GET(&vnic, invalid_command));
if (err)
return err;
}
err = devlink_fmsg_u64_pair_put(fmsg, "quota_exceeded_command",
VNIC_ENV_GET64(&vnic, quota_exceeded_command));
if (err)
return err;
if (MLX5_CAP_GEN(dev, quota_exceeded_count)) {
err = devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command",
VNIC_ENV_GET(&vnic, quota_exceeded_command));
if (err)
return err;
}
err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
VNIC_ENV_GET64(&vnic, nic_receive_steering_discard));
if (err)
return err;
if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) {
err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
VNIC_ENV_GET64(&vnic,
nic_receive_steering_discard));
if (err)
return err;
}
err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail));
if (err)
return err;
if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) {
err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
VNIC_ENV_GET64(&vnic,
generated_pkt_steering_fail));
if (err)
return err;
err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
if (err)
return err;
err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
if (err)
return err;
}
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)

View File

@ -1461,10 +1461,12 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
if (flow_flag_test(flow, SLOW))
if (flow_flag_test(flow, SLOW)) {
mlx5e_tc_unoffload_from_slow_path(esw, flow);
else
} else {
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
mlx5e_tc_unoffload_flow_post_acts(flow);
}
mlx5e_tc_detach_mod_hdr(priv, flow, attr);
attr->modify_hdr = NULL;

View File

@ -5266,6 +5266,7 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_flow_steering *fs;
int err;
@ -5294,9 +5295,19 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_health_create_reporters(priv);
/* If netdev is already registered (e.g. move from uplink to nic profile),
* RTNL lock must be held before triggering netdev notifiers.
*/
if (take_rtnl)
rtnl_lock();
/* update XDP supported features */
mlx5e_set_xdp_feature(netdev);
if (take_rtnl)
rtnl_unlock();
return 0;
}

View File

@ -1943,9 +1943,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
remove_unready_flow(flow);
@ -1966,12 +1964,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
if (esw_attr->int_port)
mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
if (esw_attr->dest_int_port)
mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
@ -4268,6 +4260,7 @@ static void
mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
{
struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
struct mlx5_esw_flow_attr *esw_attr;
if (!attr)
return;
@ -4285,6 +4278,18 @@ mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *a
mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
}
if (mlx5e_is_eswitch_flow(flow)) {
esw_attr = attr->esw_attr;
if (esw_attr->int_port)
mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
esw_attr->int_port);
if (esw_attr->dest_int_port)
mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
esw_attr->dest_int_port);
}
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
free_branch_attr(flow, attr->branch_true);

View File

@ -60,7 +60,7 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
vport_num - 1, false);
}
return dl_port;

View File

@ -574,7 +574,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
for (i = 0; i < ldev->ports; i++) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ldev->v2p_map[i] == ports[i])
if (ldev->v2p_map[idx] == ports[idx])
continue;
dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev,

View File

@ -227,10 +227,15 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
clock = container_of(timer, struct mlx5_clock, timer);
mdev = container_of(clock, struct mlx5_core_dev, clock);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&timer->tc);
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
out:
schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
}

View File

@ -1989,7 +1989,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
mlx5_unload_one(dev, true);
mlx5_unload_one(dev, false);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);

View File

@ -361,7 +361,7 @@ static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16
static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func)
{
return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev)
return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev) + 1
: vport;
}

View File

@ -285,8 +285,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
host_total_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_total_vfs);
kvfree(out);
if (host_total_vfs)
return host_total_vfs;
return host_total_vfs;
}
done:

View File

@ -82,7 +82,7 @@ dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr,
u32 chunk_size;
u32 index;
chunk_size = ilog2(num_of_actions);
chunk_size = ilog2(roundup_pow_of_two(num_of_actions));
/* HW modify action index granularity is at least 64B */
chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);