net: Fix locking in flush_backlog
Need to take spinlocks when dequeuing from input_pkt_queue in flush_backlog. Also, flush_backlog can now be called directly from netdev_run_todo. Signed-off-by: Tom Herbert <therbert@google.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ec43b1a64a
commit
e51d739ab7
@ -2766,17 +2766,19 @@ int netif_receive_skb(struct sk_buff *skb)
|
||||
EXPORT_SYMBOL(netif_receive_skb);
|
||||
|
||||
/* Network device is going away, flush any packets still pending */
|
||||
static void flush_backlog(void *arg)
|
||||
static void flush_backlog(struct net_device *dev, int cpu)
|
||||
{
|
||||
struct net_device *dev = arg;
|
||||
struct softnet_data *queue = &__get_cpu_var(softnet_data);
|
||||
struct softnet_data *queue = &per_cpu(softnet_data, cpu);
|
||||
struct sk_buff *skb, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->input_pkt_queue.lock, flags);
|
||||
skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
|
||||
if (skb->dev == dev) {
|
||||
__skb_unlink(skb, &queue->input_pkt_queue);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags);
|
||||
}
|
||||
|
||||
static int napi_gro_complete(struct sk_buff *skb)
|
||||
@ -5545,6 +5547,7 @@ void netdev_run_todo(void)
|
||||
while (!list_empty(&list)) {
|
||||
struct net_device *dev
|
||||
= list_first_entry(&list, struct net_device, todo_list);
|
||||
int i;
|
||||
list_del(&dev->todo_list);
|
||||
|
||||
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
|
||||
@ -5556,7 +5559,8 @@ void netdev_run_todo(void)
|
||||
|
||||
dev->reg_state = NETREG_UNREGISTERED;
|
||||
|
||||
on_each_cpu(flush_backlog, dev, 1);
|
||||
for_each_online_cpu(i)
|
||||
flush_backlog(dev, i);
|
||||
|
||||
netdev_wait_allrefs(dev);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user