diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 7f6d3b82c29b..557d7daac2d3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -669,6 +669,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct bpf_prog *xdp_prog; int cq_ring = cq->ring; bool doorbell_pending; + bool xdp_redir_flush; struct mlx4_cqe *cqe; struct xdp_buff xdp; int polled = 0; @@ -682,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud xdp_prog = rcu_dereference_bh(ring->xdp_prog); xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq); doorbell_pending = false; + xdp_redir_flush = false; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of @@ -790,6 +792,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud switch (act) { case XDP_PASS: break; + case XDP_REDIRECT: + if (xdp_do_redirect(dev, &xdp, xdp_prog) >= 0) { + xdp_redir_flush = true; + frags[0].page = NULL; + goto next; + } + trace_xdp_exception(dev, xdp_prog, act); + goto xdp_drop_no_cnt; case XDP_TX: if (likely(!mlx4_en_xmit_frame(ring, frags, priv, length, cq_ring, @@ -897,6 +907,9 @@ next: break; } + if (xdp_redir_flush) + xdp_do_flush(); + if (likely(polled)) { if (doorbell_pending) { priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;