Merge branch 'bpf-remove-xdp_do_flush_map'

Sebastian Andrzej Siewior says:

====================
bpf: Remove xdp_do_flush_map().

I had #1 split in several patches per vendor and then decided to merge
it. I can repost it with one patch per vendor if this preferred.
====================

Link: https://lore.kernel.org/r/20230908143215.869913-1-bigeasy@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-10-03 07:34:53 -07:00
commit e643597346
17 changed files with 16 additions and 22 deletions

View File

@ -1828,7 +1828,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
}
if (xdp_flags & ENA_XDP_REDIRECT)
xdp_do_flush_map();
xdp_do_flush();
return work_done;

View File

@ -1655,7 +1655,7 @@ out:
rx_ring->stats.bytes += rx_byte_cnt;
if (xdp_redirect_frm_cnt)
xdp_do_flush_map();
xdp_do_flush();
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);

View File

@ -1832,7 +1832,7 @@ rx_processing_done:
rxq->bd.cur = bdp;
if (xdp_result & FEC_ENET_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
return pkt_received;
}

View File

@ -2405,7 +2405,7 @@ void i40e_update_rx_stats(struct i40e_ring *rx_ring,
void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
{
if (xdp_res & I40E_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
if (xdp_res & I40E_XDP_TX) {
struct i40e_ring *xdp_ring =

View File

@ -450,7 +450,7 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))

View File

@ -2421,7 +2421,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
if (xdp_xmit & IXGBE_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);

View File

@ -351,7 +351,7 @@ construct_skb:
}
if (xdp_xmit & IXGBE_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);

View File

@ -2520,7 +2520,7 @@ next:
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
xdp_do_flush_map();
xdp_do_flush();
if (ps.rx_packets)
mvneta_update_stats(pp, &ps);

View File

@ -4027,7 +4027,7 @@ err_drop_frame:
}
if (xdp_ret & MVPP2_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
if (ps.rx_packets) {
struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);

View File

@ -2211,7 +2211,7 @@ rx_done:
net_dim(&eth->rx_dim, dim_sample);
if (xdp_flush)
xdp_do_flush_map();
xdp_do_flush();
return done;
}

View File

@ -893,7 +893,7 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
mlx5e_xmit_xdp_doorbell(xdpsq);
if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
xdp_do_flush_map();
xdp_do_flush();
__clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
}
}

View File

@ -256,7 +256,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
if (xdp_redir)
xdp_do_flush_map();
xdp_do_flush();
if (tx_ring->wr_ptr_add)
nfp_net_tx_xmit_more_flush(tx_ring);

View File

@ -1260,7 +1260,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
xdp_do_flush_map();
xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&

View File

@ -1285,7 +1285,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
xdp_do_flush_map();
xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&

View File

@ -780,7 +780,7 @@ static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
u16 pkts)
{
if (xdp_res & NETSEC_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
if (xdp_res & NETSEC_XDP_TX)
netsec_xdp_ring_tx_db(priv, pkts);

View File

@ -1360,7 +1360,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
* particular hardware is sharing a common queue, so the
* incoming device might change per packet.
*/
xdp_do_flush_map();
xdp_do_flush();
break;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);

View File

@ -1025,12 +1025,6 @@ int xdp_do_redirect_frame(struct net_device *dev,
struct bpf_prog *prog);
void xdp_do_flush(void);
/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
* it is no longer only flushing maps. Keep this define for compatibility
* until all drivers are updated - do not use xdp_do_flush_map() in new code!
*/
#define xdp_do_flush_map xdp_do_flush
void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET