mlx5-fixes-2023-07-05
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmSlrvAACgkQSD+KveBX +j4EcggAsZlHQHRaA9re/5Fr8VV/YNmf+eLM6/2F6CD1sLcwWEsmDXkqZpL/wqVv bcP/dE/ehiMd1FI6XmEb9aGZY29mQBoItwCnxeUrK75d8CQib/pH87VkQdT9Uf6W RVqPk2rOL778sTy6V/UZDecfmZB5XfEoOO6f0YP/j2t8HxmfdetBN0orE0iRQBmO +0W8X5bIfCmGyWdQzOJB4a+S7Wi1JACr6CIOdNtADuZ7C7kKiPWrvEl7DMHlyX3Y TyrE//piSJuxaHwdMNBHPsXK5ZYyn7ZJjqeCA+Kd+lKMJlJEO7R+TkTOP0RKE++a ZdDetCqLKegQPpWZgjGzei7PpSpaFA== =cNgk -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2023-07-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2023-07-05 This series provides bug fixes to mlx5 driver. * tag 'mlx5-fixes-2023-07-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5e: RX, Fix page_pool page fragment tracking for XDP net/mlx5: Query hca_cap_2 only when supported net/mlx5e: TC, CT: Offload ct clear only once net/mlx5e: Check for NOT_READY flag state after locking net/mlx5: Register a unique thermal zone per device net/mlx5e: RX, Fix flush and close release flow of regular rq for legacy rq net/mlx5e: fix memory leak in mlx5e_ptp_open net/mlx5e: fix memory leak in mlx5e_fs_tt_redirect_any_create net/mlx5e: fix double free in mlx5e_destroy_flow_table ==================== Link: https://lore.kernel.org/r/20230705175757.284614-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
4863b57bfd
@ -594,7 +594,7 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
|
||||
|
||||
err = fs_any_create_table(fs);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_free_any;
|
||||
|
||||
err = fs_any_enable(fs);
|
||||
if (err)
|
||||
@ -606,8 +606,8 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
|
||||
|
||||
err_destroy_table:
|
||||
fs_any_destroy_table(fs_any);
|
||||
|
||||
kfree(fs_any);
|
||||
err_free_any:
|
||||
mlx5e_fs_set_any(fs, NULL);
|
||||
kfree(fs_any);
|
||||
return err;
|
||||
}
|
||||
|
@ -729,8 +729,10 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
|
||||
|
||||
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
|
||||
cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
|
||||
if (!c || !cparams)
|
||||
return -ENOMEM;
|
||||
if (!c || !cparams) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
c->priv = priv;
|
||||
c->mdev = priv->mdev;
|
||||
|
@ -1545,7 +1545,8 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
|
||||
|
||||
attr->ct_attr.ct_action |= act->ct.action; /* So we can have clear + ct */
|
||||
attr->ct_attr.zone = act->ct.zone;
|
||||
attr->ct_attr.nf_ft = act->ct.flow_table;
|
||||
if (!(act->ct.action & TCA_CT_ACT_CLEAR))
|
||||
attr->ct_attr.nf_ft = act->ct.flow_table;
|
||||
attr->ct_attr.act_miss_cookie = act->miss_cookie;
|
||||
|
||||
return 0;
|
||||
@ -1990,6 +1991,9 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *att
|
||||
if (!priv)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (attr->ct_attr.offloaded)
|
||||
return 0;
|
||||
|
||||
if (attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR) {
|
||||
err = mlx5_tc_ct_entry_set_registers(priv, &attr->parse_attr->mod_hdr_acts,
|
||||
0, 0, 0, 0);
|
||||
@ -1999,11 +2003,15 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *att
|
||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
}
|
||||
|
||||
if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
|
||||
if (!attr->ct_attr.nf_ft) { /* means only ct clear action, and not ct_clear,ct() */
|
||||
attr->ct_attr.offloaded = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&priv->control_lock);
|
||||
err = __mlx5_tc_ct_flow_offload(priv, attr);
|
||||
if (!err)
|
||||
attr->ct_attr.offloaded = true;
|
||||
mutex_unlock(&priv->control_lock);
|
||||
|
||||
return err;
|
||||
@ -2021,7 +2029,7 @@ void
|
||||
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
|
||||
struct mlx5_flow_attr *attr)
|
||||
{
|
||||
if (!attr->ct_attr.ft) /* no ct action, return */
|
||||
if (!attr->ct_attr.offloaded) /* no ct action, return */
|
||||
return;
|
||||
if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
|
||||
return;
|
||||
|
@ -29,6 +29,7 @@ struct mlx5_ct_attr {
|
||||
u32 ct_labels_id;
|
||||
u32 act_miss_mapping;
|
||||
u64 act_miss_cookie;
|
||||
bool offloaded;
|
||||
struct mlx5_ct_ft *ft;
|
||||
};
|
||||
|
||||
|
@ -662,8 +662,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
|
||||
/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
|
||||
* as we know this is a page_pool page.
|
||||
*/
|
||||
page_pool_put_defragged_page(page->pp,
|
||||
page, -1, true);
|
||||
page_pool_recycle_direct(page->pp, page);
|
||||
} while (++n < num);
|
||||
|
||||
break;
|
||||
|
@ -190,6 +190,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in || !ft->g) {
|
||||
kfree(ft->g);
|
||||
ft->g = NULL;
|
||||
kvfree(in);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -390,10 +390,18 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
|
||||
{
|
||||
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
|
||||
|
||||
if (rq->xsk_pool)
|
||||
if (rq->xsk_pool) {
|
||||
mlx5e_xsk_free_rx_wqe(wi);
|
||||
else
|
||||
} else {
|
||||
mlx5e_free_rx_wqe(rq, wi);
|
||||
|
||||
/* Avoid a second release of the wqe pages: dealloc is called
|
||||
* for the same missing wqes on regular RQ flush and on regular
|
||||
* RQ close. This happens when XSK RQs come into play.
|
||||
*/
|
||||
for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
|
||||
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
|
||||
@ -1743,11 +1751,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
|
||||
|
||||
prog = rcu_dereference(rq->xdp_prog);
|
||||
if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
|
||||
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
|
||||
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
|
||||
struct mlx5e_wqe_frag_info *pwi;
|
||||
|
||||
for (pwi = head_wi; pwi < wi; pwi++)
|
||||
pwi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
|
||||
pwi->frag_page->frags++;
|
||||
}
|
||||
return NULL; /* page/packet was consumed by XDP */
|
||||
}
|
||||
@ -1817,12 +1825,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
rq, wi, cqe, cqe_bcnt);
|
||||
if (!skb) {
|
||||
/* probably for XDP */
|
||||
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
|
||||
/* do not return page to cache,
|
||||
* it will be returned on XDP_TX completion.
|
||||
*/
|
||||
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
|
||||
}
|
||||
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
|
||||
wi->frag_page->frags++;
|
||||
goto wq_cyc_pop;
|
||||
}
|
||||
|
||||
@ -1868,12 +1872,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
rq, wi, cqe, cqe_bcnt);
|
||||
if (!skb) {
|
||||
/* probably for XDP */
|
||||
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
|
||||
/* do not return page to cache,
|
||||
* it will be returned on XDP_TX completion.
|
||||
*/
|
||||
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
|
||||
}
|
||||
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
|
||||
wi->frag_page->frags++;
|
||||
goto wq_cyc_pop;
|
||||
}
|
||||
|
||||
@ -2052,12 +2052,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
|
||||
if (prog) {
|
||||
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
|
||||
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
|
||||
int i;
|
||||
struct mlx5e_frag_page *pfp;
|
||||
|
||||
for (i = 0; i < sinfo->nr_frags; i++)
|
||||
/* non-atomic */
|
||||
__set_bit(page_idx + i, wi->skip_release_bitmap);
|
||||
return NULL;
|
||||
for (pfp = head_page; pfp < frag_page; pfp++)
|
||||
pfp->frags++;
|
||||
|
||||
wi->linear_page.frags++;
|
||||
}
|
||||
mlx5e_page_release_fragmented(rq, &wi->linear_page);
|
||||
return NULL; /* page/packet was consumed by XDP */
|
||||
@ -2155,7 +2155,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
||||
cqe_bcnt, &mxbuf);
|
||||
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
|
||||
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
|
||||
__set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */
|
||||
frag_page->frags++;
|
||||
return NULL; /* page/packet was consumed by XDP */
|
||||
}
|
||||
|
||||
|
@ -1639,7 +1639,8 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
|
||||
uplink_priv = &rpriv->uplink_priv;
|
||||
|
||||
mutex_lock(&uplink_priv->unready_flows_lock);
|
||||
unready_flow_del(flow);
|
||||
if (flow_flag_test(flow, NOT_READY))
|
||||
unready_flow_del(flow);
|
||||
mutex_unlock(&uplink_priv->unready_flows_lock);
|
||||
}
|
||||
|
||||
@ -1932,8 +1933,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
|
||||
esw_attr = attr->esw_attr;
|
||||
mlx5e_put_flow_tunnel_id(flow);
|
||||
|
||||
if (flow_flag_test(flow, NOT_READY))
|
||||
remove_unready_flow(flow);
|
||||
remove_unready_flow(flow);
|
||||
|
||||
if (mlx5e_is_offloaded_flow(flow)) {
|
||||
if (flow_flag_test(flow, SLOW))
|
||||
|
@ -807,6 +807,9 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
|
||||
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
|
||||
vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
|
||||
|
||||
if (!MLX5_CAP_GEN_MAX(esw->dev, hca_cap_2))
|
||||
goto out_free;
|
||||
|
||||
memset(query_ctx, 0, query_out_sz);
|
||||
err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
|
||||
MLX5_CAP_GENERAL_2);
|
||||
|
@ -68,14 +68,19 @@ static struct thermal_zone_device_ops mlx5_thermal_ops = {
|
||||
|
||||
int mlx5_thermal_init(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
char data[THERMAL_NAME_LENGTH];
|
||||
struct mlx5_thermal *thermal;
|
||||
struct thermal_zone_device *tzd;
|
||||
const char *data = "mlx5";
|
||||
int err;
|
||||
|
||||
tzd = thermal_zone_get_zone_by_name(data);
|
||||
if (!IS_ERR(tzd))
|
||||
if (!mlx5_core_is_pf(mdev) && !mlx5_core_is_ecpf(mdev))
|
||||
return 0;
|
||||
|
||||
err = snprintf(data, sizeof(data), "mlx5_%s", dev_name(mdev->device));
|
||||
if (err < 0 || err >= sizeof(data)) {
|
||||
mlx5_core_err(mdev, "Failed to setup thermal zone name, %d\n", err);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
thermal = kzalloc(sizeof(*thermal), GFP_KERNEL);
|
||||
if (!thermal)
|
||||
return -ENOMEM;
|
||||
@ -89,10 +94,10 @@ int mlx5_thermal_init(struct mlx5_core_dev *mdev)
|
||||
&mlx5_thermal_ops,
|
||||
NULL, 0, MLX5_THERMAL_POLL_INT_MSEC);
|
||||
if (IS_ERR(thermal->tzdev)) {
|
||||
dev_err(mdev->device, "Failed to register thermal zone device (%s) %ld\n",
|
||||
data, PTR_ERR(thermal->tzdev));
|
||||
err = PTR_ERR(thermal->tzdev);
|
||||
mlx5_core_err(mdev, "Failed to register thermal zone device (%s) %d\n", data, err);
|
||||
kfree(thermal);
|
||||
return -EINVAL;
|
||||
return err;
|
||||
}
|
||||
|
||||
mdev->thermal = thermal;
|
||||
|
Loading…
x
Reference in New Issue
Block a user