net: ena: Refactor napi functions
This patch focuses on changes to the XDP part of the napi polling routine. 1. Update the `napi_comp` stat only when napi is actually complete. 2. Simplify the code by using a function pointer to the right napi routine (XDP vs non-XDP path) 3. Remove unnecessary local variables. 4. Adjust a debug print to show the processed XDP frame index rather than the pointer. Signed-off-by: David Arinzon <darinzon@amazon.com> Link: https://lore.kernel.org/r/20240101190855.18739-8-darinzon@amazon.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
436c793585
commit
b626fd9627
@ -1754,18 +1754,28 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
|
||||
static void ena_init_napi_in_range(struct ena_adapter *adapter,
|
||||
int first_index, int count)
|
||||
{
|
||||
int (*napi_handler)(struct napi_struct *napi, int budget);
|
||||
int i;
|
||||
|
||||
for (i = first_index; i < first_index + count; i++) {
|
||||
struct ena_napi *napi = &adapter->ena_napi[i];
|
||||
struct ena_ring *rx_ring, *tx_ring;
|
||||
|
||||
netif_napi_add(adapter->netdev, &napi->napi,
|
||||
ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
|
||||
memset(napi, 0, sizeof(*napi));
|
||||
|
||||
rx_ring = &adapter->rx_ring[i];
|
||||
tx_ring = &adapter->tx_ring[i];
|
||||
|
||||
napi_handler = ena_io_poll;
|
||||
if (ENA_IS_XDP_INDEX(adapter, i))
|
||||
napi_handler = ena_xdp_io_poll;
|
||||
|
||||
netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
|
||||
|
||||
if (!ENA_IS_XDP_INDEX(adapter, i))
|
||||
napi->rx_ring = &adapter->rx_ring[i];
|
||||
napi->rx_ring = rx_ring;
|
||||
|
||||
napi->tx_ring = &adapter->tx_ring[i];
|
||||
napi->tx_ring = tx_ring;
|
||||
napi->qid = i;
|
||||
}
|
||||
}
|
||||
|
@ -385,23 +385,22 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
|
||||
break;
|
||||
|
||||
tx_info = &tx_ring->tx_buffer_info[req_id];
|
||||
xdpf = tx_info->xdpf;
|
||||
|
||||
tx_info->xdpf = NULL;
|
||||
tx_info->last_jiffies = 0;
|
||||
ena_unmap_tx_buff(tx_ring, tx_info);
|
||||
|
||||
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
|
||||
"tx_poll: q %d skb %p completed\n", tx_ring->qid,
|
||||
xdpf);
|
||||
xdpf = tx_info->xdpf;
|
||||
tx_info->xdpf = NULL;
|
||||
ena_unmap_tx_buff(tx_ring, tx_info);
|
||||
xdp_return_frame(xdpf);
|
||||
|
||||
tx_pkts++;
|
||||
total_done += tx_info->tx_descs;
|
||||
|
||||
xdp_return_frame(xdpf);
|
||||
tx_ring->free_ids[next_to_clean] = req_id;
|
||||
next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
|
||||
tx_ring->ring_size);
|
||||
|
||||
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
|
||||
"tx_poll: q %d pkt #%d req_id %d\n", tx_ring->qid, tx_pkts, req_id);
|
||||
}
|
||||
|
||||
tx_ring->next_to_clean = next_to_clean;
|
||||
@ -421,22 +420,19 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
|
||||
int ena_xdp_io_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
|
||||
u32 xdp_work_done, xdp_budget;
|
||||
struct ena_ring *tx_ring;
|
||||
int napi_comp_call = 0;
|
||||
u32 work_done;
|
||||
int ret;
|
||||
|
||||
tx_ring = ena_napi->tx_ring;
|
||||
|
||||
xdp_budget = budget;
|
||||
|
||||
if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
|
||||
test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
|
||||
napi_complete_done(napi, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
xdp_work_done = ena_clean_xdp_irq(tx_ring, xdp_budget);
|
||||
work_done = ena_clean_xdp_irq(tx_ring, budget);
|
||||
|
||||
/* If the device is about to reset or down, avoid unmask
|
||||
* the interrupt and return 0 so NAPI won't reschedule
|
||||
@ -444,18 +440,19 @@ int ena_xdp_io_poll(struct napi_struct *napi, int budget)
|
||||
if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags))) {
|
||||
napi_complete_done(napi, 0);
|
||||
ret = 0;
|
||||
} else if (xdp_budget > xdp_work_done) {
|
||||
napi_comp_call = 1;
|
||||
if (napi_complete_done(napi, xdp_work_done))
|
||||
} else if (budget > work_done) {
|
||||
ena_increase_stat(&tx_ring->tx_stats.napi_comp, 1,
|
||||
&tx_ring->syncp);
|
||||
if (napi_complete_done(napi, work_done))
|
||||
ena_unmask_interrupt(tx_ring, NULL);
|
||||
|
||||
ena_update_ring_numa_node(tx_ring, NULL);
|
||||
ret = xdp_work_done;
|
||||
ret = work_done;
|
||||
} else {
|
||||
ret = xdp_budget;
|
||||
ret = budget;
|
||||
}
|
||||
|
||||
u64_stats_update_begin(&tx_ring->syncp);
|
||||
tx_ring->tx_stats.napi_comp += napi_comp_call;
|
||||
tx_ring->tx_stats.tx_poll++;
|
||||
u64_stats_update_end(&tx_ring->syncp);
|
||||
tx_ring->tx_stats.last_napi_jiffies = jiffies;
|
||||
|
Loading…
x
Reference in New Issue
Block a user