mlx5-updates-2021-12-21
1) From Shay Drory: Devlink user knobs to control device's EQ size This series provides knobs which will enable users to minimize memory consumption of mlx5 Functions (PF/VF/SF). mlx5 exposes two new generic devlink params for EQ size configuration and uses devlink generic param max_macs. LINK: https://lore.kernel.org/netdev/20211208141722.13646-1-shayd@nvidia.com/ 2) From Tariq and Lama, allocate software channel objects and statistics of a mlx5 netdevice private data dynamically upon first demand to save on memory. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmHClsoACgkQSD+KveBX +j5d6gf+NKH8mQd6Aa/Gt4Y2DtS7GzN+dPD+2MokuT2YSWU8kVGlMnC01MTE2V2s jiOUC6ZEbayx2ORzd58XlcfAMEZz2WH8VGLXTdmM3niv13D7AueSsUP5SVK7Oamk h0bwzOV8CE1Ru5s3Q3zPaLBqTWN+TmyG42HNwYyD8GZT7O5q8iBfor97N2KN5U5u rhxzFcD2jfhtYxACkjea6RTN9CTM7l9FS4+DIjhu53PAOBaOZLj8NKSYUqP6I17L ITJDd9za4Nq/YiB2yU4UVNkDEXsuXWoqnTNsL1oEUPForEomJsnikHV9ywzyKVMn SbWdZOr5C14UBH9wyyauEBBv5oLB7w== =cotW -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2021-12-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2021-12-21 1) From Shay Drory: Devlink user knobs to control device's EQ size This series provides knobs which will enable users to minimize memory consumption of mlx5 Functions (PF/VF/SF). mlx5 exposes two new generic devlink params for EQ size configuration and uses devlink generic param max_macs. LINK: https://lore.kernel.org/netdev/20211208141722.13646-1-shayd@nvidia.com/ 2) From Tariq and Lama, allocate software channel objects and statistics of a mlx5 netdevice private data dynamically upon first demand to save on memory. * tag 'mlx5-updates-2021-12-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5e: Take packet_merge params directly from the RX res struct net/mlx5e: Allocate per-channel stats dynamically at first usage net/mlx5e: Use dynamic per-channel allocations in stats net/mlx5e: Allow profile-specific limitation on max num of channels net/mlx5e: Save memory by using dynamic allocation in netdev priv net/mlx5e: Add profile indications for PTP and QOS HTB features net/mlx5e: Use bitmap field for profile features net/mlx5: Remove the repeated declaration net/mlx5: Let user configure max_macs generic param devlink: Clarifies max_macs generic devlink param net/mlx5: Let user configure event_eq_size param devlink: Add new "event_eq_size" generic device param net/mlx5: Let user configure io_eq_size param devlink: Add new "io_eq_size" generic device param ==================== Link: https://lore.kernel.org/r/20211222031604.14540-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
5de24da1b3
@ -118,8 +118,10 @@ own name.
|
||||
errors.
|
||||
* - ``max_macs``
|
||||
- u32
|
||||
- Specifies the maximum number of MAC addresses per ethernet port of
|
||||
this device.
|
||||
- Typically macvlan, vlan net devices mac are also programmed in their
|
||||
parent netdevice's Function rx filter. This parameter limit the
|
||||
maximum number of unicast mac address filters to receive traffic from
|
||||
per ethernet port of this device.
|
||||
* - ``region_snapshot_enable``
|
||||
- Boolean
|
||||
- Enable capture of ``devlink-region`` snapshots.
|
||||
@ -129,3 +131,9 @@ own name.
|
||||
will NACK any attempt of other host to reset the device. This parameter
|
||||
is useful for setups where a device is shared by different hosts, such
|
||||
as multi-host setup.
|
||||
* - ``io_eq_size``
|
||||
- u32
|
||||
- Control the size of I/O completion EQs.
|
||||
* - ``event_eq_size``
|
||||
- u32
|
||||
- Control the size of asynchronous control events EQ.
|
||||
|
@ -14,8 +14,18 @@ Parameters
|
||||
|
||||
* - Name
|
||||
- Mode
|
||||
- Validation
|
||||
* - ``enable_roce``
|
||||
- driverinit
|
||||
* - ``io_eq_size``
|
||||
- driverinit
|
||||
- The range is between 64 and 4096.
|
||||
* - ``event_eq_size``
|
||||
- driverinit
|
||||
- The range is between 64 and 4096.
|
||||
* - ``max_macs``
|
||||
- driverinit
|
||||
- The range is between 1 and 2^31. Only power of 2 values are supported.
|
||||
|
||||
The ``mlx5`` driver also implements the following driver-specific
|
||||
parameters.
|
||||
|
@ -546,6 +546,13 @@ static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
|
||||
union devlink_param_value val,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static const struct devlink_param mlx5_devlink_params[] = {
|
||||
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
|
||||
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
|
||||
@ -570,6 +577,10 @@ static const struct devlink_param mlx5_devlink_params[] = {
|
||||
DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
|
||||
mlx5_devlink_enable_remote_dev_reset_get,
|
||||
mlx5_devlink_enable_remote_dev_reset_set, NULL),
|
||||
DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
|
||||
NULL, NULL, mlx5_devlink_eq_depth_validate),
|
||||
DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
|
||||
NULL, NULL, mlx5_devlink_eq_depth_validate),
|
||||
};
|
||||
|
||||
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
|
||||
@ -608,6 +619,16 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
|
||||
value);
|
||||
}
|
||||
#endif
|
||||
|
||||
value.vu32 = MLX5_COMP_EQ_SIZE;
|
||||
devlink_param_driverinit_value_set(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
|
||||
value);
|
||||
|
||||
value.vu32 = MLX5_NUM_ASYNC_EQE;
|
||||
devlink_param_driverinit_value_set(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
|
||||
value);
|
||||
}
|
||||
|
||||
static const struct devlink_param enable_eth_param =
|
||||
@ -752,6 +773,66 @@ static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
|
||||
mlx5_devlink_eth_param_unregister(devlink);
|
||||
}
|
||||
|
||||
static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
|
||||
union devlink_param_value val,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
|
||||
if (val.vu32 == 0) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "max_macs value must be greater than 0");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(val.vu32)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Only power of 2 values are supported for max_macs");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ilog2(val.vu32) >
|
||||
MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "max_macs value is out of the supported range");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct devlink_param max_uc_list_param =
|
||||
DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
|
||||
NULL, NULL, mlx5_devlink_max_uc_list_validate);
|
||||
|
||||
static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
union devlink_param_value value;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
|
||||
return 0;
|
||||
|
||||
err = devlink_param_register(devlink, &max_uc_list_param);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list);
|
||||
devlink_param_driverinit_value_set(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
|
||||
value);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5_devlink_max_uc_list_param_unregister(struct devlink *devlink)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
|
||||
if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
|
||||
return;
|
||||
|
||||
devlink_param_unregister(devlink, &max_uc_list_param);
|
||||
}
|
||||
|
||||
#define MLX5_TRAP_DROP(_id, _group_id) \
|
||||
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
|
||||
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
|
||||
@ -811,6 +892,10 @@ int mlx5_devlink_register(struct devlink *devlink)
|
||||
if (err)
|
||||
goto auxdev_reg_err;
|
||||
|
||||
err = mlx5_devlink_max_uc_list_param_register(devlink);
|
||||
if (err)
|
||||
goto max_uc_list_err;
|
||||
|
||||
err = mlx5_devlink_traps_register(devlink);
|
||||
if (err)
|
||||
goto traps_reg_err;
|
||||
@ -821,6 +906,8 @@ int mlx5_devlink_register(struct devlink *devlink)
|
||||
return 0;
|
||||
|
||||
traps_reg_err:
|
||||
mlx5_devlink_max_uc_list_param_unregister(devlink);
|
||||
max_uc_list_err:
|
||||
mlx5_devlink_auxdev_params_unregister(devlink);
|
||||
auxdev_reg_err:
|
||||
devlink_params_unregister(devlink, mlx5_devlink_params,
|
||||
@ -831,6 +918,7 @@ auxdev_reg_err:
|
||||
void mlx5_devlink_unregister(struct devlink *devlink)
|
||||
{
|
||||
mlx5_devlink_traps_unregister(devlink);
|
||||
mlx5_devlink_max_uc_list_param_unregister(devlink);
|
||||
mlx5_devlink_auxdev_params_unregister(devlink);
|
||||
devlink_params_unregister(devlink, mlx5_devlink_params,
|
||||
ARRAY_SIZE(mlx5_devlink_params));
|
||||
|
@ -145,7 +145,6 @@ struct page_pool;
|
||||
|
||||
#define MLX5E_MIN_NUM_CHANNELS 0x1
|
||||
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
|
||||
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
|
||||
#define MLX5E_TX_CQ_POLL_BUDGET 128
|
||||
#define MLX5E_TX_XSK_POLL_BUDGET 64
|
||||
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
|
||||
@ -875,10 +874,8 @@ struct mlx5e_trap;
|
||||
|
||||
struct mlx5e_priv {
|
||||
/* priv data path fields - start */
|
||||
/* +1 for port ptp ts */
|
||||
struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
|
||||
MLX5E_QOS_MAX_LEAF_NODES];
|
||||
int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
|
||||
struct mlx5e_txqsq **txq2sq;
|
||||
int **channel_tc2realtxq;
|
||||
int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
|
||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||
struct mlx5e_dcbx_dp dcbx_dp;
|
||||
@ -893,7 +890,7 @@ struct mlx5e_priv {
|
||||
struct mlx5e_channels channels;
|
||||
u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
|
||||
struct mlx5e_rx_res *rx_res;
|
||||
u32 tx_rates[MLX5E_MAX_NUM_SQS];
|
||||
u32 *tx_rates;
|
||||
|
||||
struct mlx5e_flow_steering fs;
|
||||
|
||||
@ -909,7 +906,7 @@ struct mlx5e_priv {
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_trap *en_trap;
|
||||
struct mlx5e_stats stats;
|
||||
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
|
||||
struct mlx5e_channel_stats **channel_stats;
|
||||
struct mlx5e_channel_stats trap_stats;
|
||||
struct mlx5e_ptp_stats ptp_stats;
|
||||
u16 stats_nch;
|
||||
@ -956,6 +953,12 @@ struct mlx5e_rx_handlers {
|
||||
|
||||
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
|
||||
|
||||
enum mlx5e_profile_feature {
|
||||
MLX5E_PROFILE_FEATURE_PTP_RX,
|
||||
MLX5E_PROFILE_FEATURE_PTP_TX,
|
||||
MLX5E_PROFILE_FEATURE_QOS_HTB,
|
||||
};
|
||||
|
||||
struct mlx5e_profile {
|
||||
int (*init)(struct mlx5_core_dev *mdev,
|
||||
struct net_device *netdev);
|
||||
@ -969,14 +972,18 @@ struct mlx5e_profile {
|
||||
int (*update_rx)(struct mlx5e_priv *priv);
|
||||
void (*update_stats)(struct mlx5e_priv *priv);
|
||||
void (*update_carrier)(struct mlx5e_priv *priv);
|
||||
int (*max_nch_limit)(struct mlx5_core_dev *mdev);
|
||||
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
|
||||
mlx5e_stats_grp_t *stats_grps;
|
||||
const struct mlx5e_rx_handlers *rx_handlers;
|
||||
int max_tc;
|
||||
u8 rq_groups;
|
||||
bool rx_ptp_support;
|
||||
u32 features;
|
||||
};
|
||||
|
||||
#define mlx5e_profile_feature_cap(profile, feature) \
|
||||
((profile)->features & (MLX5E_PROFILE_FEATURE_## feature))
|
||||
|
||||
void mlx5e_build_ptys2ethtool_map(void);
|
||||
|
||||
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
|
||||
@ -1188,8 +1195,7 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
|
||||
struct mlx5_core_dev *mdev);
|
||||
void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
|
||||
struct net_device *
|
||||
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
|
||||
unsigned int txqs, unsigned int rxqs);
|
||||
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
|
||||
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
|
||||
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
|
||||
|
@ -20,7 +20,7 @@ mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch,
|
||||
struct mlx5e_channel_stats *stats;
|
||||
int tc;
|
||||
|
||||
stats = &priv->channel_stats[ch];
|
||||
stats = priv->channel_stats[ch];
|
||||
data->rx_packets = stats->rq.packets;
|
||||
data->rx_bytes = stats->rq.bytes;
|
||||
|
||||
|
@ -768,7 +768,7 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_ptp_fs *ptp_fs;
|
||||
|
||||
if (!priv->profile->rx_ptp_support)
|
||||
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
|
||||
return 0;
|
||||
|
||||
ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
|
||||
@ -783,7 +783,7 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
|
||||
|
||||
if (!priv->profile->rx_ptp_support)
|
||||
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
|
||||
return;
|
||||
|
||||
mlx5e_ptp_rx_unset_fs(priv);
|
||||
@ -794,7 +794,7 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
|
||||
{
|
||||
struct mlx5e_ptp *c = priv->channels.ptp;
|
||||
|
||||
if (!priv->profile->rx_ptp_support)
|
||||
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
|
||||
return 0;
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
|
@ -37,7 +37,6 @@ struct mlx5e_rx_res {
|
||||
/* API for rx_res_rss_* */
|
||||
|
||||
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
|
||||
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
|
||||
unsigned int init_nch)
|
||||
{
|
||||
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
|
||||
@ -52,7 +51,7 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
|
||||
init_pkt_merge_param);
|
||||
&res->pkt_merge_param);
|
||||
if (err)
|
||||
goto err_rss_free;
|
||||
|
||||
@ -277,8 +276,7 @@ struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
|
||||
return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
|
||||
const struct mlx5e_packet_merge_param *init_pkt_merge_param)
|
||||
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
|
||||
{
|
||||
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
|
||||
struct mlx5e_tir_builder *builder;
|
||||
@ -309,7 +307,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
|
||||
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
|
||||
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
|
||||
inner_ft_support);
|
||||
mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
|
||||
mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
|
||||
mlx5e_tir_builder_build_direct(builder);
|
||||
|
||||
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
|
||||
@ -339,7 +337,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
|
||||
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
|
||||
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
|
||||
inner_ft_support);
|
||||
mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
|
||||
mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
|
||||
mlx5e_tir_builder_build_direct(builder);
|
||||
|
||||
err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
|
||||
@ -454,11 +452,11 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
|
||||
res->pkt_merge_param = *init_pkt_merge_param;
|
||||
init_rwsem(&res->pkt_merge_param_sem);
|
||||
|
||||
err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
|
||||
err = mlx5e_rx_res_rss_init_def(res, init_nch);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param);
|
||||
err = mlx5e_rx_res_channels_init(res);
|
||||
if (err)
|
||||
goto err_rss_destroy;
|
||||
|
||||
|
@ -67,7 +67,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
|
||||
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
rq->xdpsq = &c->rq_xdpsq;
|
||||
rq->xsk_pool = pool;
|
||||
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
|
||||
rq->stats = &c->priv->channel_stats[c->ix]->xskrq;
|
||||
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
|
||||
rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
|
||||
err = mlx5e_rq_set_handlers(rq, params, xsk);
|
||||
|
@ -611,7 +611,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
|
||||
priv_rx->rxq = rxq;
|
||||
priv_rx->sk = sk;
|
||||
|
||||
priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
|
||||
priv_rx->rq_stats = &priv->channel_stats[rxq]->rq;
|
||||
priv_rx->sw_stats = &priv->tls->sw_stats;
|
||||
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
|
||||
|
||||
|
@ -556,7 +556,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
|
||||
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
|
||||
if (IS_ERR(rule)) {
|
||||
err = PTR_ERR(rule);
|
||||
priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
|
||||
priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
|
||||
mlx5e_dbg(HW, priv,
|
||||
"%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
|
||||
__func__, arfs_rule->filter_id, arfs_rule->rxq,
|
||||
|
@ -1934,7 +1934,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
|
||||
if (curr_val == new_val)
|
||||
return 0;
|
||||
|
||||
if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
|
||||
if (new_val && !mlx5e_profile_feature_cap(priv->profile, PTP_RX) && rx_filter) {
|
||||
netdev_err(priv->netdev,
|
||||
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
|
||||
return -EINVAL;
|
||||
|
@ -479,7 +479,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
|
||||
rq->mdev = mdev;
|
||||
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
rq->xdpsq = &c->rq_xdpsq;
|
||||
rq->stats = &c->priv->channel_stats[c->ix].rq;
|
||||
rq->stats = &c->priv->channel_stats[c->ix]->rq;
|
||||
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
|
||||
err = mlx5e_rq_set_handlers(rq, params, NULL);
|
||||
if (err)
|
||||
@ -1161,10 +1161,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
|
||||
sq->xsk_pool = xsk_pool;
|
||||
|
||||
sq->stats = sq->xsk_pool ?
|
||||
&c->priv->channel_stats[c->ix].xsksq :
|
||||
&c->priv->channel_stats[c->ix]->xsksq :
|
||||
is_redirect ?
|
||||
&c->priv->channel_stats[c->ix].xdpsq :
|
||||
&c->priv->channel_stats[c->ix].rq_xdpsq;
|
||||
&c->priv->channel_stats[c->ix]->xdpsq :
|
||||
&c->priv->channel_stats[c->ix]->rq_xdpsq;
|
||||
|
||||
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
||||
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
|
||||
@ -1928,7 +1928,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
|
||||
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
|
||||
params, &cparam->txq_sq, &c->sq[tc], tc,
|
||||
qos_queue_group_id,
|
||||
&c->priv->channel_stats[c->ix].sq[tc]);
|
||||
&c->priv->channel_stats[c->ix]->sq[tc]);
|
||||
if (err)
|
||||
goto err_close_sqs;
|
||||
}
|
||||
@ -2176,6 +2176,30 @@ static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
|
||||
return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
|
||||
}
|
||||
|
||||
static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
|
||||
{
|
||||
if (ix > priv->stats_nch) {
|
||||
netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix,
|
||||
priv->stats_nch);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (priv->channel_stats[ix])
|
||||
return 0;
|
||||
|
||||
/* Asymmetric dynamic memory allocation.
|
||||
* Freed in mlx5e_priv_arrays_free, not on channel closure.
|
||||
*/
|
||||
mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix);
|
||||
priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!priv->channel_stats[ix])
|
||||
return -ENOMEM;
|
||||
priv->stats_nch++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_channel_param *cparam,
|
||||
@ -2193,6 +2217,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5e_channel_stats_alloc(priv, ix, cpu);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!c)
|
||||
return -ENOMEM;
|
||||
@ -2207,7 +2235,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
|
||||
c->num_tc = mlx5e_get_dcb_num_tc(params);
|
||||
c->xdp = !!params->xdp_prog;
|
||||
c->stats = &priv->channel_stats[ix].ch;
|
||||
c->stats = &priv->channel_stats[ix]->ch;
|
||||
c->aff_mask = irq_get_effective_affinity_mask(irq);
|
||||
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
|
||||
|
||||
@ -3371,7 +3399,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->stats_nch; i++) {
|
||||
struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
|
||||
struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
|
||||
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
|
||||
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
|
||||
int j;
|
||||
@ -4038,7 +4066,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (!priv->profile->rx_ptp_support)
|
||||
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
|
||||
err = mlx5e_hwstamp_config_no_ptp_rx(priv,
|
||||
config.rx_filter != HWTSTAMP_FILTER_NONE);
|
||||
else
|
||||
@ -5093,9 +5121,23 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
|
||||
.stats_grps = mlx5e_nic_stats_grps,
|
||||
.stats_grps_num = mlx5e_nic_stats_grps_num,
|
||||
.rx_ptp_support = true,
|
||||
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
|
||||
BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
|
||||
BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
|
||||
};
|
||||
|
||||
static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
|
||||
const struct mlx5e_profile *profile)
|
||||
{
|
||||
int nch;
|
||||
|
||||
nch = mlx5e_get_max_num_channels(mdev);
|
||||
|
||||
if (profile->max_nch_limit)
|
||||
nch = min_t(int, nch, profile->max_nch_limit(mdev));
|
||||
return nch;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
|
||||
const struct mlx5e_profile *profile)
|
||||
@ -5104,7 +5146,7 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
|
||||
unsigned int max_nch, tmp;
|
||||
|
||||
/* core resources */
|
||||
max_nch = mlx5e_get_max_num_channels(mdev);
|
||||
max_nch = mlx5e_profile_max_num_channels(mdev, profile);
|
||||
|
||||
/* netdev rx queues */
|
||||
tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
|
||||
@ -5128,12 +5170,17 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
|
||||
struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev)
|
||||
{
|
||||
int nch, num_txqs, node, i;
|
||||
|
||||
num_txqs = netdev->num_tx_queues;
|
||||
nch = mlx5e_calc_max_nch(mdev, netdev, profile);
|
||||
node = dev_to_node(mlx5_core_dma_dev(mdev));
|
||||
|
||||
/* priv init */
|
||||
priv->mdev = mdev;
|
||||
priv->netdev = netdev;
|
||||
priv->msglevel = MLX5E_MSG_LEVEL;
|
||||
priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile);
|
||||
priv->stats_nch = priv->max_nch;
|
||||
priv->max_nch = nch;
|
||||
priv->max_opened_tc = 1;
|
||||
|
||||
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
|
||||
@ -5150,11 +5197,46 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
|
||||
if (!priv->wq)
|
||||
goto err_free_cpumask;
|
||||
|
||||
priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
|
||||
if (!priv->txq2sq)
|
||||
goto err_destroy_workqueue;
|
||||
|
||||
priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
|
||||
if (!priv->tx_rates)
|
||||
goto err_free_txq2sq;
|
||||
|
||||
priv->channel_tc2realtxq =
|
||||
kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node);
|
||||
if (!priv->channel_tc2realtxq)
|
||||
goto err_free_tx_rates;
|
||||
|
||||
for (i = 0; i < nch; i++) {
|
||||
priv->channel_tc2realtxq[i] =
|
||||
kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq),
|
||||
GFP_KERNEL, node);
|
||||
if (!priv->channel_tc2realtxq[i])
|
||||
goto err_free_channel_tc2realtxq;
|
||||
}
|
||||
|
||||
priv->channel_stats =
|
||||
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
|
||||
if (!priv->channel_stats)
|
||||
goto err_free_channel_tc2realtxq;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_channel_tc2realtxq:
|
||||
while (--i >= 0)
|
||||
kfree(priv->channel_tc2realtxq[i]);
|
||||
kfree(priv->channel_tc2realtxq);
|
||||
err_free_tx_rates:
|
||||
kfree(priv->tx_rates);
|
||||
err_free_txq2sq:
|
||||
kfree(priv->txq2sq);
|
||||
err_destroy_workqueue:
|
||||
destroy_workqueue(priv->wq);
|
||||
err_free_cpumask:
|
||||
free_cpumask_var(priv->scratchpad.cpumask);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -5166,6 +5248,14 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
|
||||
if (!priv->mdev)
|
||||
return;
|
||||
|
||||
for (i = 0; i < priv->stats_nch; i++)
|
||||
kvfree(priv->channel_stats[i]);
|
||||
kfree(priv->channel_stats);
|
||||
for (i = 0; i < priv->max_nch; i++)
|
||||
kfree(priv->channel_tc2realtxq[i]);
|
||||
kfree(priv->channel_tc2realtxq);
|
||||
kfree(priv->tx_rates);
|
||||
kfree(priv->txq2sq);
|
||||
destroy_workqueue(priv->wq);
|
||||
free_cpumask_var(priv->scratchpad.cpumask);
|
||||
|
||||
@ -5181,13 +5271,44 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
|
||||
memset(priv, 0, sizeof(*priv));
|
||||
}
|
||||
|
||||
static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
|
||||
const struct mlx5e_profile *profile)
|
||||
{
|
||||
unsigned int nch, ptp_txqs, qos_txqs;
|
||||
|
||||
nch = mlx5e_profile_max_num_channels(mdev, profile);
|
||||
|
||||
ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) &&
|
||||
mlx5e_profile_feature_cap(profile, PTP_TX) ?
|
||||
profile->max_tc : 0;
|
||||
|
||||
qos_txqs = mlx5_qos_is_supported(mdev) &&
|
||||
mlx5e_profile_feature_cap(profile, QOS_HTB) ?
|
||||
mlx5e_qos_max_leaf_nodes(mdev) : 0;
|
||||
|
||||
return nch * profile->max_tc + ptp_txqs + qos_txqs;
|
||||
}
|
||||
|
||||
static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
|
||||
const struct mlx5e_profile *profile)
|
||||
{
|
||||
unsigned int nch;
|
||||
|
||||
nch = mlx5e_profile_max_num_channels(mdev, profile);
|
||||
|
||||
return nch * profile->rq_groups;
|
||||
}
|
||||
|
||||
struct net_device *
|
||||
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
|
||||
unsigned int txqs, unsigned int rxqs)
|
||||
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
unsigned int txqs, rxqs;
|
||||
int err;
|
||||
|
||||
txqs = mlx5e_get_max_num_txqs(mdev, profile);
|
||||
rxqs = mlx5e_get_max_num_rxqs(mdev, profile);
|
||||
|
||||
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
|
||||
if (!netdev) {
|
||||
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
|
||||
@ -5432,22 +5553,10 @@ static int mlx5e_probe(struct auxiliary_device *adev,
|
||||
struct mlx5_core_dev *mdev = edev->mdev;
|
||||
struct net_device *netdev;
|
||||
pm_message_t state = {};
|
||||
unsigned int txqs, rxqs, ptp_txqs = 0;
|
||||
struct mlx5e_priv *priv;
|
||||
int qos_sqs = 0;
|
||||
int err;
|
||||
int nch;
|
||||
|
||||
if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
|
||||
ptp_txqs = profile->max_tc;
|
||||
|
||||
if (mlx5_qos_is_supported(mdev))
|
||||
qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
|
||||
|
||||
nch = mlx5e_get_max_num_channels(mdev);
|
||||
txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
|
||||
rxqs = nch * profile->rq_groups;
|
||||
netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);
|
||||
netdev = mlx5e_create_netdev(mdev, profile);
|
||||
if (!netdev) {
|
||||
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
|
||||
return -ENOMEM;
|
||||
|
@ -591,6 +591,12 @@ bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
|
||||
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
|
||||
}
|
||||
|
||||
static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return (1 << MLX5_CAP_GEN(mdev, log_max_tir)) /
|
||||
mlx5_eswitch_get_total_vports(mdev);
|
||||
}
|
||||
|
||||
static void mlx5e_build_rep_params(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
@ -1113,7 +1119,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
|
||||
.stats_grps = mlx5e_rep_stats_grps,
|
||||
.stats_grps_num = mlx5e_rep_stats_grps_num,
|
||||
.rx_ptp_support = false,
|
||||
.max_nch_limit = mlx5e_rep_max_nch_limit,
|
||||
};
|
||||
|
||||
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
|
||||
@ -1134,7 +1140,6 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
|
||||
.stats_grps = mlx5e_ul_rep_stats_grps,
|
||||
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
|
||||
.rx_ptp_support = false,
|
||||
};
|
||||
|
||||
/* e-Switch vport representors */
|
||||
@ -1185,14 +1190,10 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||
struct devlink_port *dl_port;
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_priv *priv;
|
||||
unsigned int txqs, rxqs;
|
||||
int nch, err;
|
||||
int err;
|
||||
|
||||
profile = &mlx5e_rep_profile;
|
||||
nch = mlx5e_get_max_num_channels(dev);
|
||||
txqs = nch * profile->max_tc;
|
||||
rxqs = nch * profile->rq_groups;
|
||||
netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
|
||||
netdev = mlx5e_create_netdev(dev, profile);
|
||||
if (!netdev) {
|
||||
mlx5_core_warn(dev,
|
||||
"Failed to create representor netdev for vport %d\n",
|
||||
|
@ -2189,7 +2189,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
|
||||
|
||||
priv = mlx5i_epriv(netdev);
|
||||
tstamp = &priv->tstamp;
|
||||
stats = &priv->channel_stats[rq->ix].rq;
|
||||
stats = rq->stats;
|
||||
|
||||
flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
|
||||
g = (flags_rqpn >> 28) & 3;
|
||||
|
@ -463,7 +463,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
|
||||
|
||||
for (i = 0; i < priv->stats_nch; i++) {
|
||||
struct mlx5e_channel_stats *channel_stats =
|
||||
&priv->channel_stats[i];
|
||||
priv->channel_stats[i];
|
||||
int j;
|
||||
|
||||
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
|
||||
@ -2197,21 +2197,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (j = 0; j < NUM_CH_STATS; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
|
||||
ch_stats_desc, j);
|
||||
|
||||
for (i = 0; i < max_nch; i++) {
|
||||
for (j = 0; j < NUM_RQ_STATS; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
|
||||
rq_stats_desc, j);
|
||||
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
|
||||
xskrq_stats_desc, j);
|
||||
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
|
||||
rq_xdpsq_stats_desc, j);
|
||||
}
|
||||
|
||||
@ -2219,17 +2219,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
|
||||
for (i = 0; i < max_nch; i++)
|
||||
for (j = 0; j < NUM_SQ_STATS; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
|
||||
sq_stats_desc, j);
|
||||
|
||||
for (i = 0; i < max_nch; i++) {
|
||||
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
|
||||
xsksq_stats_desc, j);
|
||||
for (j = 0; j < NUM_XDPSQ_STATS; j++)
|
||||
data[idx++] =
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
|
||||
MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
|
||||
xdpsq_stats_desc, j);
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "lib/clock.h"
|
||||
#include "diag/fw_tracer.h"
|
||||
#include "mlx5_irq.h"
|
||||
#include "devlink.h"
|
||||
|
||||
enum {
|
||||
MLX5_EQE_OWNER_INIT_VAL = 0x1,
|
||||
@ -622,6 +623,20 @@ static void cleanup_async_eq(struct mlx5_core_dev *dev,
|
||||
name, err);
|
||||
}
|
||||
|
||||
static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
union devlink_param_value val;
|
||||
int err;
|
||||
|
||||
err = devlink_param_driverinit_value_get(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
|
||||
&val);
|
||||
if (!err)
|
||||
return val.vu32;
|
||||
mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
|
||||
return MLX5_NUM_ASYNC_EQE;
|
||||
}
|
||||
static int create_async_eqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
@ -646,7 +661,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
|
||||
|
||||
param = (struct mlx5_eq_param) {
|
||||
.irq_index = MLX5_IRQ_EQ_CTRL,
|
||||
.nent = MLX5_NUM_ASYNC_EQE,
|
||||
.nent = async_eq_depth_devlink_param_get(dev),
|
||||
};
|
||||
|
||||
gather_async_events_mask(dev, param.mask);
|
||||
@ -796,6 +811,21 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
union devlink_param_value val;
|
||||
int err;
|
||||
|
||||
err = devlink_param_driverinit_value_get(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
|
||||
&val);
|
||||
if (!err)
|
||||
return val.vu32;
|
||||
mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
|
||||
return MLX5_COMP_EQ_SIZE;
|
||||
}
|
||||
|
||||
static int create_comp_eqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
@ -807,7 +837,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
|
||||
|
||||
INIT_LIST_HEAD(&table->comp_eqs_list);
|
||||
ncomp_eqs = table->num_comp_eqs;
|
||||
nent = MLX5_COMP_EQ_SIZE;
|
||||
nent = comp_eq_depth_devlink_param_get(dev);
|
||||
for (i = 0; i < ncomp_eqs; i++) {
|
||||
struct mlx5_eq_param param = {};
|
||||
int vecidx = i;
|
||||
|
@ -343,9 +343,6 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
|
||||
|
||||
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
|
||||
|
||||
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
|
||||
int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
|
||||
|
||||
/* E-Switch API */
|
||||
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
|
||||
|
@ -117,7 +117,7 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
|
||||
struct mlx5e_channel_stats *channel_stats;
|
||||
struct mlx5e_rq_stats *rq_stats;
|
||||
|
||||
channel_stats = &priv->channel_stats[i];
|
||||
channel_stats = priv->channel_stats[i];
|
||||
rq_stats = &channel_stats->rq;
|
||||
|
||||
s.rx_packets += rq_stats->packets;
|
||||
@ -449,7 +449,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
|
||||
.stats_grps = mlx5i_stats_grps,
|
||||
.stats_grps_num = mlx5i_stats_grps_num,
|
||||
.rx_ptp_support = false,
|
||||
};
|
||||
|
||||
/* mlx5i netdev NDos */
|
||||
|
@ -350,7 +350,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
|
||||
.rx_handlers = &mlx5i_rx_handlers,
|
||||
.max_tc = MLX5I_MAX_NUM_TC,
|
||||
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
|
||||
.rx_ptp_support = false,
|
||||
};
|
||||
|
||||
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
|
||||
|
@ -484,10 +484,26 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
|
||||
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
|
||||
}
|
||||
|
||||
static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
union devlink_param_value val;
|
||||
int err;
|
||||
|
||||
err = devlink_param_driverinit_value_get(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
|
||||
&val);
|
||||
if (!err)
|
||||
return val.vu32;
|
||||
mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
|
||||
{
|
||||
struct mlx5_profile *prof = &dev->profile;
|
||||
void *set_hca_cap;
|
||||
int max_uc_list;
|
||||
int err;
|
||||
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
|
||||
@ -561,6 +577,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
|
||||
if (MLX5_CAP_GEN(dev, roce_rw_supported))
|
||||
MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
|
||||
|
||||
max_uc_list = max_uc_list_get_devlink_param(dev);
|
||||
if (max_uc_list > 0)
|
||||
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
|
||||
ilog2(max_uc_list));
|
||||
|
||||
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
|
||||
}
|
||||
|
||||
|
@ -459,6 +459,8 @@ enum devlink_param_generic_id {
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
|
||||
DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
|
||||
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
|
||||
|
||||
/* add new param generic ids above here*/
|
||||
__DEVLINK_PARAM_GENERIC_ID_MAX,
|
||||
@ -511,6 +513,12 @@ enum devlink_param_generic_id {
|
||||
#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME "enable_iwarp"
|
||||
#define DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE DEVLINK_PARAM_TYPE_BOOL
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME "io_eq_size"
|
||||
#define DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME "event_eq_size"
|
||||
#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
|
||||
{ \
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
|
||||
|
@ -4466,6 +4466,16 @@ static const struct devlink_param devlink_param_generic[] = {
|
||||
.name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE,
|
||||
},
|
||||
{
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
|
||||
.name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE,
|
||||
},
|
||||
{
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
|
||||
.name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static int devlink_param_generic_verify(const struct devlink_param *param)
|
||||
|
Loading…
x
Reference in New Issue
Block a user