net: Tree wide: Replace xdp_do_flush_map() with xdp_do_flush().
xdp_do_flush_map() is deprecated and new code should use xdp_do_flush() instead. Replace xdp_do_flush_map() with xdp_do_flush(). Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> Cc: Clark Wang <xiaoning.wang@nxp.com> Cc: Claudiu Manoil <claudiu.manoil@nxp.com> Cc: David Arinzon <darinzon@amazon.com> Cc: Edward Cree <ecree.xilinx@gmail.com> Cc: Felix Fietkau <nbd@nbd.name> Cc: Grygorii Strashko <grygorii.strashko@ti.com> Cc: Jassi Brar <jaswinder.singh@linaro.org> Cc: Jesse Brandeburg <jesse.brandeburg@intel.com> Cc: John Crispin <john@phrozen.org> Cc: Leon Romanovsky <leon@kernel.org> Cc: Lorenzo Bianconi <lorenzo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Marcin Wojtas <mw@semihalf.com> Cc: Mark Lee <Mark-MC.Lee@mediatek.com> Cc: Matthias Brugger <matthias.bgg@gmail.com> Cc: NXP Linux Team <linux-imx@nxp.com> Cc: Noam Dagan <ndagan@amazon.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Saeed Bishara <saeedb@amazon.com> Cc: Saeed Mahameed <saeedm@nvidia.com> Cc: Sean Wang <sean.wang@mediatek.com> Cc: Shay Agroskin <shayagr@amazon.com> Cc: Shenwei Wang <shenwei.wang@nxp.com> Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com> Cc: Tony Nguyen <anthony.l.nguyen@intel.com> Cc: Vladimir Oltean <vladimir.oltean@nxp.com> Cc: Wei Fang <wei.fang@nxp.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Arthur Kiyanovski <akiyano@amazon.com> Acked-by: Toke Høiland-Jørgensen <toke@redhat.com> Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Acked-by: Martin Habets <habetsm.xilinx@gmail.com> Acked-by: Jesper Dangaard Brouer <hawk@kernel.org> Link: https://lore.kernel.org/r/20230908143215.869913-2-bigeasy@linutronix.de Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
788f63c4dc
commit
7f04bd109d
@ -1828,7 +1828,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
|
||||
}
|
||||
|
||||
if (xdp_flags & ENA_XDP_REDIRECT)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
return work_done;
|
||||
|
||||
|
@ -1655,7 +1655,7 @@ out:
|
||||
rx_ring->stats.bytes += rx_byte_cnt;
|
||||
|
||||
if (xdp_redirect_frm_cnt)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (xdp_tx_frm_cnt)
|
||||
enetc_update_tx_ring_tail(tx_ring);
|
||||
|
@ -1832,7 +1832,7 @@ rx_processing_done:
|
||||
rxq->bd.cur = bdp;
|
||||
|
||||
if (xdp_result & FEC_ENET_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
return pkt_received;
|
||||
}
|
||||
|
@ -2405,7 +2405,7 @@ void i40e_update_rx_stats(struct i40e_ring *rx_ring,
|
||||
void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
|
||||
{
|
||||
if (xdp_res & I40E_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (xdp_res & I40E_XDP_TX) {
|
||||
struct i40e_ring *xdp_ring =
|
||||
|
@ -450,7 +450,7 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
|
||||
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
|
||||
|
||||
if (xdp_res & ICE_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (xdp_res & ICE_XDP_TX) {
|
||||
if (static_branch_unlikely(&ice_xdp_locking_key))
|
||||
|
@ -2421,7 +2421,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
|
||||
}
|
||||
|
||||
if (xdp_xmit & IXGBE_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (xdp_xmit & IXGBE_XDP_TX) {
|
||||
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
|
||||
|
@ -351,7 +351,7 @@ construct_skb:
|
||||
}
|
||||
|
||||
if (xdp_xmit & IXGBE_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (xdp_xmit & IXGBE_XDP_TX) {
|
||||
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
|
||||
|
@ -2520,7 +2520,7 @@ next:
|
||||
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
|
||||
|
||||
if (ps.xdp_redirect)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (ps.rx_packets)
|
||||
mvneta_update_stats(pp, &ps);
|
||||
|
@ -4027,7 +4027,7 @@ err_drop_frame:
|
||||
}
|
||||
|
||||
if (xdp_ret & MVPP2_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (ps.rx_packets) {
|
||||
struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
|
||||
|
@ -2211,7 +2211,7 @@ rx_done:
|
||||
net_dim(ð->rx_dim, dim_sample);
|
||||
|
||||
if (xdp_flush)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
return done;
|
||||
}
|
||||
|
@ -893,7 +893,7 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
|
||||
mlx5e_xmit_xdp_doorbell(xdpsq);
|
||||
|
||||
if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
__clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
|
||||
}
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
|
||||
nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
|
||||
|
||||
if (xdp_redir)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (tx_ring->wr_ptr_add)
|
||||
nfp_net_tx_xmit_more_flush(tx_ring);
|
||||
|
@ -1260,7 +1260,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
spent = efx_process_channel(channel, budget);
|
||||
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (spent < budget) {
|
||||
if (efx_channel_has_rx_queue(channel) &&
|
||||
|
@ -1285,7 +1285,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
spent = efx_process_channel(channel, budget);
|
||||
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (spent < budget) {
|
||||
if (efx_channel_has_rx_queue(channel) &&
|
||||
|
@ -780,7 +780,7 @@ static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
|
||||
u16 pkts)
|
||||
{
|
||||
if (xdp_res & NETSEC_XDP_REDIR)
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
|
||||
if (xdp_res & NETSEC_XDP_TX)
|
||||
netsec_xdp_ring_tx_db(priv, pkts);
|
||||
|
@ -1360,7 +1360,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
|
||||
* particular hardware is sharing a common queue, so the
|
||||
* incoming device might change per packet.
|
||||
*/
|
||||
xdp_do_flush_map();
|
||||
xdp_do_flush();
|
||||
break;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(ndev, prog, act);
|
||||
|
Loading…
Reference in New Issue
Block a user