net/mlx5e: Introduce the mlx5e_flush_rq function

[ Upstream commit d9ba64deb2f1ad58eb3067c7485518f3e96559ee ]

Add a function to flush an RQ: clean up descriptors, release pages and
reset the RQ. This procedure is used by the recovery flow, and it will
also be used in a following commit to free some memory when switching a
channel to the XSK mode.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Stable-dep-of: 1e66220948df ("net/mlx5e: Update rx ring hw mtu upon each rx-fcs flag change")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Maxim Mikityanskiy 2022-09-30 09:29:02 -07:00 committed by Greg Kroah-Hartman
parent e4e4e93d31
commit 31172267ba
3 changed files with 29 additions and 24 deletions

View File

@ -1002,7 +1002,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);

View File

@ -129,34 +129,13 @@ out:
return err;
}
static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
{
struct net_device *dev = rq->netdev;
int err;
err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
if (err) {
netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
return err;
}
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err) {
netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
return err;
}
return 0;
}
static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
{
struct mlx5e_rq *rq = ctx;
int err;
mlx5e_deactivate_rq(rq);
mlx5e_free_rx_descs(rq);
err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
err = mlx5e_flush_rq(rq, MLX5_RQC_STATE_ERR);
clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
if (err)
return err;

View File

@ -672,7 +672,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
return err;
}
int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5_core_dev *mdev = rq->mdev;
@ -701,6 +701,32 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
return err;
}
static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
{
struct net_device *dev = rq->netdev;
int err;
err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
if (err) {
netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
return err;
}
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err) {
netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
return err;
}
return 0;
}
int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
{
mlx5e_free_rx_descs(rq);
return mlx5e_rq_to_ready(rq, curr_state);
}
static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{
struct mlx5_core_dev *mdev = rq->mdev;