net/mlx5e: XDP, Allow non-linear single-segment frames in XDP TX MPWQE
Under a few restrictions, TX MPWQE feature can serve multiple TX packets in a single TX descriptor. It requires each of the packets to have a single scatter entry / segment. Today we allow only linear frames to use this feature, although there's no real problem with non-linear ones where the whole packet reside in the first fragment. Expand the XDP TX MPWQE feature support to include such frames. This is in preparation for the downstream patch, in which we will generate such non-linear frames. Signed-off-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
124d0d8daf
commit
63abf14e13
@ -405,18 +405,35 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
|
||||
{
|
||||
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
|
||||
struct mlx5e_xdpsq_stats *stats = sq->stats;
|
||||
struct mlx5e_xmit_data *p = xdptxd;
|
||||
struct mlx5e_xmit_data tmp;
|
||||
|
||||
if (xdptxd->has_frags) {
|
||||
/* MPWQE is enabled, but a multi-buffer packet is queued for
|
||||
* transmission. MPWQE can't send fragmented packets, so close
|
||||
* the current session and fall back to a regular WQE.
|
||||
*/
|
||||
if (unlikely(sq->mpwqe.wqe))
|
||||
mlx5e_xdp_mpwqe_complete(sq);
|
||||
return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
|
||||
struct mlx5e_xmit_data_frags *xdptxdf =
|
||||
container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
|
||||
|
||||
if (!!xdptxd->len + xdptxdf->sinfo->nr_frags > 1) {
|
||||
/* MPWQE is enabled, but a multi-buffer packet is queued for
|
||||
* transmission. MPWQE can't send fragmented packets, so close
|
||||
* the current session and fall back to a regular WQE.
|
||||
*/
|
||||
if (unlikely(sq->mpwqe.wqe))
|
||||
mlx5e_xdp_mpwqe_complete(sq);
|
||||
return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
|
||||
}
|
||||
if (!xdptxd->len) {
|
||||
skb_frag_t *frag = &xdptxdf->sinfo->frags[0];
|
||||
|
||||
tmp.data = skb_frag_address(frag);
|
||||
tmp.len = skb_frag_size(frag);
|
||||
tmp.dma_addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[0] :
|
||||
page_pool_get_dma_addr(skb_frag_page(frag)) +
|
||||
skb_frag_off(frag);
|
||||
p = &tmp;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(xdptxd->len > sq->hw_mtu)) {
|
||||
if (unlikely(p->len > sq->hw_mtu)) {
|
||||
stats->err++;
|
||||
return false;
|
||||
}
|
||||
@ -434,7 +451,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
|
||||
mlx5e_xdp_mpwqe_session_start(sq);
|
||||
}
|
||||
|
||||
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
|
||||
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
|
||||
|
||||
if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
|
||||
mlx5e_xdp_mpwqe_complete(sq);
|
||||
|
Loading…
x
Reference in New Issue
Block a user