net/mlx5e: Add LAG warning if bond slave is not lag master
LAG offload can't be enabled if the enslaved PF is not lag master, which is indicated by HCA capabilities bit. It is cleared if more than 64 VFs are configured for this PF. Previously, a data structure is created to store lag info, including PFs to be enslaved, then a handler is registered for netdev notifier. However, this initialization is skipped if PF is not lag master. So PF can't handle CHANGEUPPER event from upper bond device. Even worse, PF is enslaved silently, and LAG offload is not activated. Fix this by registering netdev notifier for PFs which are not lag masters. When CHANGEUPPER event is received, and both physical ports (and only them) on the same NIC are about to be enslaved, a warning is returned for user to know it. Signed-off-by: Jianbo Liu <jianbol@mellanox.com> Reviewed-by: Raed Salem <raeds@mellanox.com> Reviewed-by: Roi Dayan <roid@mellanox.com> Reviewed-by: Jiri Pirko <jiri@nvidia.com> Reviewed-by: Raed Salem <raeds@nvidia.com> Reviewed-by: Roi Dayan <roid@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
parent
1a3c911483
commit
9b412cc35f
@ -271,7 +271,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
|||||||
bool do_bond, roce_lag;
|
bool do_bond, roce_lag;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!dev0 || !dev1)
|
if (!mlx5_lag_is_ready(ldev))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&lag_lock);
|
spin_lock(&lag_lock);
|
||||||
@ -394,6 +394,12 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
|
|||||||
*/
|
*/
|
||||||
is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
|
is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
|
||||||
|
|
||||||
|
if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
|
||||||
|
NL_SET_ERR_MSG_MOD(info->info.extack,
|
||||||
|
"Can't activate LAG offload, PF is configured with more than 64 VFs");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* Lag mode must be activebackup or hash. */
|
/* Lag mode must be activebackup or hash. */
|
||||||
mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
|
mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
|
||||||
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
|
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
|
||||||
@ -450,6 +456,10 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
|
|||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
ldev = container_of(this, struct mlx5_lag, nb);
|
ldev = container_of(this, struct mlx5_lag, nb);
|
||||||
|
|
||||||
|
if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
tracker = ldev->tracker;
|
tracker = ldev->tracker;
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
@ -498,14 +508,14 @@ static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
|
|||||||
kfree(ldev);
|
kfree(ldev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
|
static int mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
|
||||||
struct mlx5_core_dev *dev,
|
struct mlx5_core_dev *dev,
|
||||||
struct net_device *netdev)
|
struct net_device *netdev)
|
||||||
{
|
{
|
||||||
unsigned int fn = PCI_FUNC(dev->pdev->devfn);
|
unsigned int fn = PCI_FUNC(dev->pdev->devfn);
|
||||||
|
|
||||||
if (fn >= MLX5_MAX_PORTS)
|
if (fn >= MLX5_MAX_PORTS)
|
||||||
return;
|
return -EPERM;
|
||||||
|
|
||||||
spin_lock(&lag_lock);
|
spin_lock(&lag_lock);
|
||||||
ldev->pf[fn].dev = dev;
|
ldev->pf[fn].dev = dev;
|
||||||
@ -516,6 +526,8 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
|
|||||||
dev->priv.lag = ldev;
|
dev->priv.lag = ldev;
|
||||||
|
|
||||||
spin_unlock(&lag_lock);
|
spin_unlock(&lag_lock);
|
||||||
|
|
||||||
|
return fn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
|
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
|
||||||
@ -542,11 +554,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
|
|||||||
{
|
{
|
||||||
struct mlx5_lag *ldev = NULL;
|
struct mlx5_lag *ldev = NULL;
|
||||||
struct mlx5_core_dev *tmp_dev;
|
struct mlx5_core_dev *tmp_dev;
|
||||||
int err;
|
int i, err;
|
||||||
|
|
||||||
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
|
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||||
!MLX5_CAP_GEN(dev, lag_master) ||
|
|
||||||
(MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tmp_dev = mlx5_get_next_phys_dev(dev);
|
tmp_dev = mlx5_get_next_phys_dev(dev);
|
||||||
@ -561,7 +571,18 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx5_lag_dev_add_pf(ldev, dev, netdev);
|
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < MLX5_MAX_PORTS; i++) {
|
||||||
|
tmp_dev = ldev->pf[i].dev;
|
||||||
|
if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
|
||||||
|
MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i >= MLX5_MAX_PORTS)
|
||||||
|
ldev->flags |= MLX5_LAG_FLAG_READY;
|
||||||
|
|
||||||
if (!ldev->nb.notifier_call) {
|
if (!ldev->nb.notifier_call) {
|
||||||
ldev->nb.notifier_call = mlx5_lag_netdev_event;
|
ldev->nb.notifier_call = mlx5_lag_netdev_event;
|
||||||
@ -592,6 +613,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
|
|||||||
|
|
||||||
mlx5_lag_dev_remove_pf(ldev, dev);
|
mlx5_lag_dev_remove_pf(ldev, dev);
|
||||||
|
|
||||||
|
ldev->flags &= ~MLX5_LAG_FLAG_READY;
|
||||||
|
|
||||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||||
if (ldev->pf[i].dev)
|
if (ldev->pf[i].dev)
|
||||||
break;
|
break;
|
||||||
|
@ -16,6 +16,7 @@ enum {
|
|||||||
MLX5_LAG_FLAG_ROCE = 1 << 0,
|
MLX5_LAG_FLAG_ROCE = 1 << 0,
|
||||||
MLX5_LAG_FLAG_SRIOV = 1 << 1,
|
MLX5_LAG_FLAG_SRIOV = 1 << 1,
|
||||||
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
|
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
|
||||||
|
MLX5_LAG_FLAG_READY = 1 << 3,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
|
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
|
||||||
@ -59,6 +60,12 @@ __mlx5_lag_is_active(struct mlx5_lag *ldev)
|
|||||||
return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
|
return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
mlx5_lag_is_ready(struct mlx5_lag *ldev)
|
||||||
|
{
|
||||||
|
return ldev->flags & MLX5_LAG_FLAG_READY;
|
||||||
|
}
|
||||||
|
|
||||||
void mlx5_modify_lag(struct mlx5_lag *ldev,
|
void mlx5_modify_lag(struct mlx5_lag *ldev,
|
||||||
struct lag_tracker *tracker);
|
struct lag_tracker *tracker);
|
||||||
int mlx5_activate_lag(struct mlx5_lag *ldev,
|
int mlx5_activate_lag(struct mlx5_lag *ldev,
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
|
|
||||||
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
|
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
|
||||||
{
|
{
|
||||||
if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
|
if (!mlx5_lag_is_ready(ldev))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
|
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
|
||||||
|
Loading…
Reference in New Issue
Block a user