net: call skb_defer_free_flush() from __napi_busy_loop()
skb_defer_free_flush() is currently called from net_rx_action() and napi_threaded_poll(). We should also call it from __napi_busy_loop() otherwise there is the risk the percpu queue can grow until an IPI is forced from skb_attempt_defer_free() adding a latency spike. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Samiullah Khawaja <skhawaja@google.com> Acked-by: Stanislav Fomichev <sdf@google.com> Reviewed-by: Jiri Pirko <jiri@nvidia.com> Link: https://lore.kernel.org/r/20240227210105.3815474-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
9912362205
commit
1200097fa8
@ -6173,6 +6173,27 @@ struct napi_struct *napi_by_id(unsigned int napi_id)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void skb_defer_free_flush(struct softnet_data *sd)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb, *next;
|
||||||
|
|
||||||
|
/* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
|
||||||
|
if (!READ_ONCE(sd->defer_list))
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock(&sd->defer_lock);
|
||||||
|
skb = sd->defer_list;
|
||||||
|
sd->defer_list = NULL;
|
||||||
|
sd->defer_count = 0;
|
||||||
|
spin_unlock(&sd->defer_lock);
|
||||||
|
|
||||||
|
while (skb != NULL) {
|
||||||
|
next = skb->next;
|
||||||
|
napi_consume_skb(skb, 1);
|
||||||
|
skb = next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_NET_RX_BUSY_POLL)
|
#if defined(CONFIG_NET_RX_BUSY_POLL)
|
||||||
|
|
||||||
static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
|
static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
|
||||||
@ -6297,6 +6318,7 @@ count:
|
|||||||
if (work > 0)
|
if (work > 0)
|
||||||
__NET_ADD_STATS(dev_net(napi->dev),
|
__NET_ADD_STATS(dev_net(napi->dev),
|
||||||
LINUX_MIB_BUSYPOLLRXPACKETS, work);
|
LINUX_MIB_BUSYPOLLRXPACKETS, work);
|
||||||
|
skb_defer_free_flush(this_cpu_ptr(&softnet_data));
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
if (!loop_end || loop_end(loop_end_arg, start_time))
|
if (!loop_end || loop_end(loop_end_arg, start_time))
|
||||||
@ -6726,27 +6748,6 @@ static int napi_thread_wait(struct napi_struct *napi)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void skb_defer_free_flush(struct softnet_data *sd)
|
|
||||||
{
|
|
||||||
struct sk_buff *skb, *next;
|
|
||||||
|
|
||||||
/* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
|
|
||||||
if (!READ_ONCE(sd->defer_list))
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock(&sd->defer_lock);
|
|
||||||
skb = sd->defer_list;
|
|
||||||
sd->defer_list = NULL;
|
|
||||||
sd->defer_count = 0;
|
|
||||||
spin_unlock(&sd->defer_lock);
|
|
||||||
|
|
||||||
while (skb != NULL) {
|
|
||||||
next = skb->next;
|
|
||||||
napi_consume_skb(skb, 1);
|
|
||||||
skb = next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int napi_threaded_poll(void *data)
|
static int napi_threaded_poll(void *data)
|
||||||
{
|
{
|
||||||
struct napi_struct *napi = data;
|
struct napi_struct *napi = data;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user