net: make napi_threaded_poll() aware of sd->defer_list
If we call skb_defer_free_flush() from napi_threaded_poll(), we can avoid to raise IPI from skb_attempt_defer_free() when the list becomes too big. This allows napi_threaded_poll() to rely less on softirqs, and lowers latency caused by a too big list. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e6f50edfef
commit
a1aaee7f8f
@ -6622,6 +6622,7 @@ static void skb_defer_free_flush(struct softnet_data *sd)
|
||||
static int napi_threaded_poll(void *data)
|
||||
{
|
||||
struct napi_struct *napi = data;
|
||||
struct softnet_data *sd;
|
||||
void *have;
|
||||
|
||||
while (!napi_thread_wait(napi)) {
|
||||
@ -6629,11 +6630,13 @@ static int napi_threaded_poll(void *data)
|
||||
bool repoll = false;
|
||||
|
||||
local_bh_disable();
|
||||
sd = this_cpu_ptr(&softnet_data);
|
||||
|
||||
have = netpoll_poll_lock(napi);
|
||||
__napi_poll(napi, &repoll);
|
||||
netpoll_poll_unlock(have);
|
||||
|
||||
skb_defer_free_flush(sd);
|
||||
local_bh_enable();
|
||||
|
||||
if (!repoll)
|
||||
|
Loading…
x
Reference in New Issue
Block a user