net/mlx5e: Allow reporting of checksum unnecessary
[ Upstream commit b856df28f9230a47669efbdd57896084caadb2b3 ] Currently we practically never report checksum unnecessary, because for all IP packets we take the checksum complete path. Enable non-default runs with reprorting checksum unnecessary, using an ethtool private flag. This can be useful for performance evals and other explorations. Required by downstream patch which fixes XDP checksum. Fixes: 86994156c736 ("net/mlx5e: XDP fast RX drop bpf programs support") Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
8da68f79b3
commit
79e972a89c
@ -210,6 +210,7 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
|
||||
"tx_cqe_moder",
|
||||
"rx_cqe_compress",
|
||||
"rx_striding_rq",
|
||||
"rx_no_csum_complete",
|
||||
};
|
||||
|
||||
enum mlx5e_priv_flag {
|
||||
@ -217,6 +218,7 @@ enum mlx5e_priv_flag {
|
||||
MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
|
||||
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
|
||||
MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
|
||||
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
|
||||
};
|
||||
|
||||
#define MLX5E_SET_PFLAG(params, pflag, enable) \
|
||||
@ -298,6 +300,7 @@ struct mlx5e_dcbx_dp {
|
||||
enum {
|
||||
MLX5E_RQ_STATE_ENABLED,
|
||||
MLX5E_RQ_STATE_AM,
|
||||
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
|
||||
};
|
||||
|
||||
struct mlx5e_cq {
|
||||
|
@ -1510,6 +1510,27 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_channels *channels = &priv->channels;
|
||||
struct mlx5e_channel *c;
|
||||
int i;
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < channels->num; i++) {
|
||||
c = channels->c[i];
|
||||
if (enable)
|
||||
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
|
||||
else
|
||||
__clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_handle_pflag(struct net_device *netdev,
|
||||
u32 wanted_flags,
|
||||
enum mlx5e_priv_flag flag,
|
||||
@ -1561,6 +1582,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
|
||||
err = mlx5e_handle_pflag(netdev, pflags,
|
||||
MLX5E_PFLAG_RX_STRIDING_RQ,
|
||||
set_pflag_rx_striding_rq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = mlx5e_handle_pflag(netdev, pflags,
|
||||
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
|
||||
set_pflag_rx_no_csum_complete);
|
||||
|
||||
out:
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
@ -934,6 +934,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
|
||||
if (params->rx_dim_enabled)
|
||||
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
|
||||
|
||||
if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)
|
||||
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_rq:
|
||||
@ -4533,6 +4536,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
|
||||
params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
|
||||
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
|
||||
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
|
||||
|
||||
/* RQ */
|
||||
/* Prefer Striding RQ, unless any of the following holds:
|
||||
|
@ -754,6 +754,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
|
||||
goto csum_unnecessary;
|
||||
|
||||
/* CQE csum doesn't cover padding octets in short ethernet
|
||||
* frames. And the pad field is appended prior to calculating
|
||||
* and appending the FCS field.
|
||||
|
Loading…
x
Reference in New Issue
Block a user