mlx5-fixes-2022-07-28
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmLi9TwACgkQSD+KveBX +j7CmQf/Xim+h2fA1pEYz/dThY6amiFnJyQ1ymvPq/QUQV8Sey+XGNZwl/zNXdIv 1Xu3QMkVfZRBUM8MJUQLWi6njKz7URHjicBEab0uqkc+kTrMK0sgG9Zot6TQNFkG plI/SLPKe2z8D3I8CNMyGsQZ5vLSrHrlPlkHYOpiGNati37Ws1dxmvUXkZUzf7bc 9euPzM+2nOS5Hl8/l8Fjr2XUksPYjFZ7DYp7xcQ3kkyqD9/9KyUUbVd6AinSB7Ql OUfbYmEU0xqc1Q+vQPyVzBhqzfapH92IlqqsEKpFIj7gJ8GqoQuyZG7OazMIKczD HCE3+XtnlYh6b6OxN7sW/Fgt6v1QcQ== =f/Pl -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2022-07-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2022-07-28 This series provides bug fixes to mlx5 driver. * tag 'mlx5-fixes-2022-07-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5: Fix driver use of uninitialized timeout net/mlx5: DR, Fix SMFS steering info dump format net/mlx5: Adjust log_max_qp to be 18 at most net/mlx5e: Modify slow path rules to go to slow fdb net/mlx5e: Fix calculations related to max MPWQE size net/mlx5e: xsk: Account for XSK RQ UMRs when calculating ICOSQ size net/mlx5e: Fix the value of MLX5E_MAX_RQ_NUM_MTTS net/mlx5e: TC, Fix post_act to not match on in_port metadata net/mlx5e: Remove WARN_ON when trying to offload an unsupported TLS cipher/version ==================== Link: https://lore.kernel.org/r/20220728204640.139990-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
9e98f8c770
@ -109,7 +109,7 @@ struct page_pool;
|
||||
#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
|
||||
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
|
||||
#define MLX5E_MAX_RQ_NUM_MTTS \
|
||||
((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
|
||||
(ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
|
||||
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
|
||||
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
|
||||
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
|
||||
@ -174,8 +174,8 @@ struct page_pool;
|
||||
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
|
||||
|
||||
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
|
||||
MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \
|
||||
<< MLX5_MKEY_BSF_OCTO_SIZE)
|
||||
MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
|
||||
mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))
|
||||
|
||||
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
|
||||
|
||||
@ -233,7 +233,7 @@ static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
|
||||
MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
|
||||
}
|
||||
|
||||
static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
|
||||
static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
|
||||
{
|
||||
/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
|
||||
* Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
|
||||
@ -242,11 +242,12 @@ static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
|
||||
* than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
|
||||
* cache-aligned.
|
||||
*/
|
||||
#if L1_CACHE_BYTES < 128
|
||||
return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
|
||||
#else
|
||||
return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2);
|
||||
u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
|
||||
|
||||
#if L1_CACHE_BYTES >= 128
|
||||
wqebbs = ALIGN_DOWN(wqebbs, 2);
|
||||
#endif
|
||||
return wqebbs;
|
||||
}
|
||||
|
||||
struct mlx5e_tx_wqe {
|
||||
@ -455,7 +456,7 @@ struct mlx5e_txqsq {
|
||||
struct netdev_queue *txq;
|
||||
u32 sqn;
|
||||
u16 stop_room;
|
||||
u16 max_sq_mpw_wqebbs;
|
||||
u8 max_sq_mpw_wqebbs;
|
||||
u8 min_inline_mode;
|
||||
struct device *pdev;
|
||||
__be32 mkey_be;
|
||||
@ -570,7 +571,7 @@ struct mlx5e_xdpsq {
|
||||
struct device *pdev;
|
||||
__be32 mkey_be;
|
||||
u16 stop_room;
|
||||
u16 max_sq_mpw_wqebbs;
|
||||
u8 max_sq_mpw_wqebbs;
|
||||
u8 min_inline_mode;
|
||||
unsigned long state;
|
||||
unsigned int hw_mtu;
|
||||
|
@ -790,8 +790,20 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
|
||||
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
|
||||
|
||||
wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
|
||||
|
||||
/* If XDP program is attached, XSK may be turned on at any time without
|
||||
* restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
|
||||
* both regular RQ and XSK RQ.
|
||||
* Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it
|
||||
* doesn't affect its return value, as long as params->xdp_prog != NULL,
|
||||
* so we can just multiply by 2.
|
||||
*/
|
||||
if (params->xdp_prog)
|
||||
wqebbs *= 2;
|
||||
|
||||
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
|
||||
wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
|
||||
|
||||
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
|
||||
}
|
||||
|
||||
|
@ -128,6 +128,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
|
||||
post_attr->inner_match_level = MLX5_MATCH_NONE;
|
||||
post_attr->outer_match_level = MLX5_MATCH_NONE;
|
||||
post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
|
||||
post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
|
||||
|
||||
handle->ns_type = post_act->ns_type;
|
||||
/* Splits were handled before post action */
|
||||
|
@ -54,7 +54,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int err;
|
||||
|
||||
if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
|
||||
if (!mlx5e_ktls_type_check(mdev, crypto_info))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
|
||||
|
@ -230,10 +230,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
|
||||
}
|
||||
|
||||
static void
|
||||
esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
|
||||
struct mlx5_flow_act *flow_act,
|
||||
struct mlx5_fs_chains *chains,
|
||||
int i)
|
||||
esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
|
||||
struct mlx5_fs_chains *chains, int i)
|
||||
{
|
||||
if (mlx5_chains_ignore_flow_level_supported(chains))
|
||||
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
|
||||
@ -241,6 +239,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
|
||||
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
|
||||
}
|
||||
|
||||
static void
|
||||
esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
|
||||
struct mlx5_eswitch *esw, int i)
|
||||
{
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
|
||||
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
|
||||
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
dest[i].ft = esw->fdb_table.offloads.slow_fdb;
|
||||
}
|
||||
|
||||
static int
|
||||
esw_setup_chain_dest(struct mlx5_flow_destination *dest,
|
||||
struct mlx5_flow_act *flow_act,
|
||||
@ -475,8 +483,11 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
|
||||
} else if (attr->dest_ft) {
|
||||
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
|
||||
(*i)++;
|
||||
} else if (mlx5e_tc_attr_flags_skip(attr->flags)) {
|
||||
esw_setup_slow_path_dest(dest, flow_act, chains, *i);
|
||||
} else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
|
||||
esw_setup_slow_path_dest(dest, flow_act, esw, *i);
|
||||
(*i)++;
|
||||
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
|
||||
esw_setup_accept_dest(dest, flow_act, chains, *i);
|
||||
(*i)++;
|
||||
} else if (attr->dest_chain) {
|
||||
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
|
||||
|
@ -32,20 +32,17 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
|
||||
dev->timeouts->to[type] = val;
|
||||
}
|
||||
|
||||
void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
|
||||
int mlx5_tout_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
|
||||
tout_set(dev, tout_def_sw_val[i], i);
|
||||
}
|
||||
|
||||
int mlx5_tout_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
|
||||
if (!dev->timeouts)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
|
||||
tout_set(dev, tout_def_sw_val[i], i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
|
||||
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
|
||||
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
|
||||
void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
|
||||
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
|
||||
|
||||
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
|
||||
|
@ -524,7 +524,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
|
||||
|
||||
/* Check log_max_qp from HCA caps to set in current profile */
|
||||
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
|
||||
prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
|
||||
prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
|
||||
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
|
||||
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
|
||||
prof->log_max_qp,
|
||||
@ -1023,8 +1023,6 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
|
||||
if (mlx5_core_is_pf(dev))
|
||||
pcie_print_link_status(dev->pdev);
|
||||
|
||||
mlx5_tout_set_def_val(dev);
|
||||
|
||||
/* wait for firmware to accept initialization segments configurations
|
||||
*/
|
||||
err = wait_fw_init(dev, timeout,
|
||||
|
@ -21,10 +21,11 @@ enum dr_dump_rec_type {
|
||||
DR_DUMP_REC_TYPE_TABLE_TX = 3102,
|
||||
|
||||
DR_DUMP_REC_TYPE_MATCHER = 3200,
|
||||
DR_DUMP_REC_TYPE_MATCHER_MASK = 3201,
|
||||
DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201,
|
||||
DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
|
||||
DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
|
||||
DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
|
||||
DR_DUMP_REC_TYPE_MATCHER_MASK = 3205,
|
||||
|
||||
DR_DUMP_REC_TYPE_RULE = 3300,
|
||||
DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
|
||||
@ -114,13 +115,15 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
|
||||
break;
|
||||
case DR_ACTION_TYP_FT:
|
||||
if (action->dest_tbl->is_fw_tbl)
|
||||
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
|
||||
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
|
||||
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
|
||||
rule_id, action->dest_tbl->fw_tbl.id);
|
||||
rule_id, action->dest_tbl->fw_tbl.id,
|
||||
-1);
|
||||
else
|
||||
seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
|
||||
seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
|
||||
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
|
||||
rule_id, action->dest_tbl->tbl->table_id);
|
||||
rule_id, action->dest_tbl->tbl->table_id,
|
||||
DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
|
||||
|
||||
break;
|
||||
case DR_ACTION_TYP_CTR:
|
||||
|
Loading…
x
Reference in New Issue
Block a user