Eswitch has two users: IB and ETH. They both register repersentors when mlx5 interface is added, and unregister the repersentors when mlx5 interface is removed. Ideally, each driver should only deal with the entities which are unique to itself. However, current IB and ETH drivers have to perform the following eswitch operations: 1. When registering, specify how many vports to register. This number is the same for both drivers which is the total available vport numbers. 2. When unregistering, specify the number of registered vports to do unregister. Also, unload the repersentors which are already loaded. It's unnecessary for eswitch driver to hands out the control of above operations to individual driver users, as they're not unique to each driver. Instead, such operations should be centralized to eswitch driver. This consolidates eswitch control flow, and simplified IB and ETH driver. This patch doesn't change any functionality. Signed-off-by: Bodong Wang <bodong@mellanox.com> Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
160 lines
4.0 KiB
C
160 lines
4.0 KiB
C
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
|
/*
|
|
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
|
|
*/
|
|
|
|
#include <linux/mlx5/vport.h>
|
|
#include "ib_rep.h"
|
|
#include "srq.h"
|
|
|
|
static const struct mlx5_ib_profile vf_rep_profile = {
|
|
STAGE_CREATE(MLX5_IB_STAGE_INIT,
|
|
mlx5_ib_stage_init_init,
|
|
mlx5_ib_stage_init_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
|
|
mlx5_ib_stage_rep_flow_db_init,
|
|
NULL),
|
|
STAGE_CREATE(MLX5_IB_STAGE_CAPS,
|
|
mlx5_ib_stage_caps_init,
|
|
NULL),
|
|
STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
|
|
mlx5_ib_stage_rep_non_default_cb,
|
|
NULL),
|
|
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
|
|
mlx5_ib_stage_rep_roce_init,
|
|
mlx5_ib_stage_rep_roce_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
|
|
mlx5_init_srq_table,
|
|
mlx5_cleanup_srq_table),
|
|
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
|
|
mlx5_ib_stage_dev_res_init,
|
|
mlx5_ib_stage_dev_res_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
|
|
mlx5_ib_stage_counters_init,
|
|
mlx5_ib_stage_counters_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
|
|
mlx5_ib_stage_bfrag_init,
|
|
mlx5_ib_stage_bfrag_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
|
|
NULL,
|
|
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
|
|
mlx5_ib_stage_ib_reg_init,
|
|
mlx5_ib_stage_ib_reg_cleanup),
|
|
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
|
|
mlx5_ib_stage_post_ib_reg_umr_init,
|
|
NULL),
|
|
};
|
|
|
|
static int
|
|
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|
{
|
|
const struct mlx5_ib_profile *profile;
|
|
struct mlx5_ib_dev *ibdev;
|
|
|
|
if (rep->vport == MLX5_VPORT_UPLINK)
|
|
profile = &uplink_rep_profile;
|
|
else
|
|
profile = &vf_rep_profile;
|
|
|
|
ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
|
|
if (!ibdev)
|
|
return -ENOMEM;
|
|
|
|
ibdev->rep = rep;
|
|
ibdev->mdev = dev;
|
|
ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
|
|
MLX5_CAP_GEN(dev, num_vhca_ports));
|
|
if (!__mlx5_ib_add(ibdev, profile))
|
|
return -EINVAL;
|
|
|
|
rep->rep_if[REP_IB].priv = ibdev;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
|
{
|
|
struct mlx5_ib_dev *dev;
|
|
|
|
if (!rep->rep_if[REP_IB].priv)
|
|
return;
|
|
|
|
dev = mlx5_ib_rep_to_dev(rep);
|
|
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
|
|
rep->rep_if[REP_IB].priv = NULL;
|
|
ib_dealloc_device(&dev->ib_dev);
|
|
}
|
|
|
|
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
|
|
{
|
|
return mlx5_ib_rep_to_dev(rep);
|
|
}
|
|
|
|
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
|
|
{
|
|
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
|
struct mlx5_eswitch_rep_if rep_if = {};
|
|
|
|
rep_if.load = mlx5_ib_vport_rep_load;
|
|
rep_if.unload = mlx5_ib_vport_rep_unload;
|
|
rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
|
|
|
|
mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB);
|
|
}
|
|
|
|
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
|
|
{
|
|
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
|
|
|
mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
|
|
}
|
|
|
|
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
|
|
{
|
|
return mlx5_eswitch_mode(esw);
|
|
}
|
|
|
|
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
|
|
int vport_index)
|
|
{
|
|
return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
|
|
}
|
|
|
|
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
|
|
int vport_index)
|
|
{
|
|
return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
|
|
}
|
|
|
|
struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
|
|
{
|
|
return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
|
|
}
|
|
|
|
struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
|
|
{
|
|
return mlx5_eswitch_vport_rep(esw, vport);
|
|
}
|
|
|
|
int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
|
|
struct mlx5_ib_sq *sq)
|
|
{
|
|
struct mlx5_flow_handle *flow_rule;
|
|
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
|
|
|
|
if (!dev->rep)
|
|
return 0;
|
|
|
|
flow_rule =
|
|
mlx5_eswitch_add_send_to_vport_rule(esw,
|
|
dev->rep->vport,
|
|
sq->base.mqp.qpn);
|
|
if (IS_ERR(flow_rule))
|
|
return PTR_ERR(flow_rule);
|
|
sq->flow_rule = flow_rule;
|
|
|
|
return 0;
|
|
}
|