Merge branch 'net-add-preliminary-netdev-refcount-tracking'

Eric Dumazet says:

====================
net: add preliminary netdev refcount tracking

Two first patches add a generic infrastructure, that will be used
to get tracking of refcount increments/decrements.

The general idea is to be able to precisely pair each decrement with
a corresponding prior increment. Both share a cookie, basically
a pointer to private data storing stack traces.

The third patch adds dev_hold_track() and dev_put_track() helpers
(CONFIG_NET_DEV_REFCNT_TRACKER)

Then a series of 20 patches converts some dev_hold()/dev_put()
pairs to new hepers : dev_hold_track() and dev_put_track().

Hopefully this will be used by developpers and syzbot to
root cause bugs that cause netdevice dismantles freezes.

With CONFIG_PCPU_DEV_REFCNT=n option, we were able to detect
some class of bugs, but too late (when too many dev_put()
were happening).

Another series will be sent after this one is merged.

v3: moved NET_DEV_REFCNT_TRACKER to net/Kconfig.debug
    added "depends on DEBUG_KERNEL && STACKTRACE_SUPPORT"
    to hopefully get rid of kbuild reports for ARCH=nios2
    Reworded patch 3 changelog.
    Added missing htmldocs (Jakub)

v2: added four additional patches,
    added netdev_tracker_alloc() and netdev_tracker_free()
    addressed build error (kernel bots),
    use GFP_ATOMIC in test_ref_tracker_timer_func()
====================

Link: https://lore.kernel.org/r/20211205042217.982127-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-12-06 16:06:03 -08:00
commit 4c375272fb
42 changed files with 520 additions and 62 deletions

View File

@ -721,7 +721,7 @@ restart:
__netpoll_cleanup(&nt->np);
spin_lock_irqsave(&target_list_lock, flags);
dev_put(nt->np.dev);
dev_put_track(nt->np.dev, &nt->np.dev_tracker);
nt->np.dev = NULL;
nt->enabled = false;
stopped = true;

View File

@ -24,6 +24,8 @@ struct ipv4_devconf {
struct in_device {
struct net_device *dev;
netdevice_tracker dev_tracker;
refcount_t refcnt;
int dead;
struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */

View File

@ -12,6 +12,7 @@
/**
* struct vif_device - interface representor for multicast routing
* @dev: network device being used
* @dev_tracker: refcount tracker for @dev reference
* @bytes_in: statistic; bytes ingressing
* @bytes_out: statistic; bytes egresing
* @pkt_in: statistic; packets ingressing
@ -26,6 +27,7 @@
*/
struct vif_device {
struct net_device *dev;
netdevice_tracker dev_tracker;
unsigned long bytes_in, bytes_out;
unsigned long pkt_in, pkt_out;
unsigned long rate_limit;

View File

@ -48,6 +48,7 @@
#include <uapi/linux/pkt_cls.h>
#include <linux/hashtable.h>
#include <linux/rbtree.h>
#include <linux/ref_tracker.h>
struct netpoll_info;
struct device;
@ -300,6 +301,12 @@ enum netdev_state_t {
};
#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
typedef struct ref_tracker *netdevice_tracker;
#else
typedef struct {} netdevice_tracker;
#endif
struct gro_list {
struct list_head list;
int count;
@ -579,6 +586,8 @@ struct netdev_queue {
* read-mostly part
*/
struct net_device *dev;
netdevice_tracker dev_tracker;
struct Qdisc __rcu *qdisc;
struct Qdisc *qdisc_sleeping;
#ifdef CONFIG_SYSFS
@ -734,6 +743,8 @@ struct netdev_rx_queue {
#endif
struct kobject kobj;
struct net_device *dev;
netdevice_tracker dev_tracker;
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
@ -1865,6 +1876,7 @@ enum netdev_ml_priv_type {
* @proto_down_reason: reason a netdev interface is held down
* @pcpu_refcnt: Number of references to this device
* @dev_refcnt: Number of references to this device
* @refcnt_tracker: Tracker directory for tracked references to this device
* @todo_list: Delayed register/unregister
* @link_watch_list: XXX: need comments on this one
*
@ -1938,6 +1950,7 @@ enum netdev_ml_priv_type {
* keep a list of interfaces to be deleted.
*
* @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
* @linkwatch_dev_tracker: refcount tracker used by linkwatch.
*
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
@ -2178,6 +2191,7 @@ struct net_device {
#else
refcount_t dev_refcnt;
#endif
struct ref_tracker_dir refcnt_tracker;
struct list_head link_watch_list;
@ -2267,6 +2281,7 @@ struct net_device {
struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
u8 dev_addr_shadow[MAX_ADDR_LEN];
netdevice_tracker linkwatch_dev_tracker;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@ -3805,6 +3820,7 @@ void netdev_run_todo(void);
* @dev: network device
*
* Release reference to device to allow it to be freed.
* Try using dev_put_track() instead.
*/
static inline void dev_put(struct net_device *dev)
{
@ -3822,6 +3838,7 @@ static inline void dev_put(struct net_device *dev)
* @dev: network device
*
* Hold reference to device to keep it from being freed.
* Try using dev_hold_track() instead.
*/
static inline void dev_hold(struct net_device *dev)
{
@ -3834,6 +3851,57 @@ static inline void dev_hold(struct net_device *dev)
}
}
static inline void netdev_tracker_alloc(struct net_device *dev,
netdevice_tracker *tracker, gfp_t gfp)
{
#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
#endif
}
static inline void netdev_tracker_free(struct net_device *dev,
netdevice_tracker *tracker)
{
#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
ref_tracker_free(&dev->refcnt_tracker, tracker);
#endif
}
static inline void dev_hold_track(struct net_device *dev,
netdevice_tracker *tracker, gfp_t gfp)
{
if (dev) {
dev_hold(dev);
netdev_tracker_alloc(dev, tracker, gfp);
}
}
static inline void dev_put_track(struct net_device *dev,
netdevice_tracker *tracker)
{
if (dev) {
netdev_tracker_free(dev, tracker);
dev_put(dev);
}
}
static inline void dev_replace_track(struct net_device *odev,
struct net_device *ndev,
netdevice_tracker *tracker,
gfp_t gfp)
{
#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
if (odev)
ref_tracker_free(&odev->refcnt_tracker, tracker);
#endif
dev_hold(ndev);
dev_put(odev);
#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
if (ndev)
ref_tracker_alloc(&ndev->refcnt_tracker, tracker, gfp);
#endif
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
* and _off may be called from IRQ context, but it is caller
* who is responsible for serialization of these calls.

View File

@ -24,6 +24,7 @@ union inet_addr {
struct netpoll {
struct net_device *dev;
netdevice_tracker dev_tracker;
char dev_name[IFNAMSIZ];
const char *name;

View File

@ -0,0 +1,73 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#ifndef _LINUX_REF_TRACKER_H
#define _LINUX_REF_TRACKER_H
#include <linux/refcount.h>
#include <linux/types.h>
#include <linux/spinlock.h>
struct ref_tracker;
struct ref_tracker_dir {
#ifdef CONFIG_REF_TRACKER
spinlock_t lock;
unsigned int quarantine_avail;
refcount_t untracked;
struct list_head list; /* List of active trackers */
struct list_head quarantine; /* List of dead trackers */
#endif
};
#ifdef CONFIG_REF_TRACKER
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
unsigned int quarantine_count)
{
INIT_LIST_HEAD(&dir->list);
INIT_LIST_HEAD(&dir->quarantine);
spin_lock_init(&dir->lock);
dir->quarantine_avail = quarantine_count;
refcount_set(&dir->untracked, 1);
}
void ref_tracker_dir_exit(struct ref_tracker_dir *dir);
void ref_tracker_dir_print(struct ref_tracker_dir *dir,
unsigned int display_limit);
int ref_tracker_alloc(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp, gfp_t gfp);
int ref_tracker_free(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp);
#else /* CONFIG_REF_TRACKER */
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
unsigned int quarantine_count)
{
}
static inline void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
{
}
static inline void ref_tracker_dir_print(struct ref_tracker_dir *dir,
unsigned int display_limit)
{
}
static inline int ref_tracker_alloc(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp,
gfp_t gfp)
{
return 0;
}
static inline int ref_tracker_free(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp)
{
return 0;
}
#endif
#endif /* _LINUX_REF_TRACKER_H */

View File

@ -664,13 +664,17 @@ struct devlink_health_reporter_ops {
* @trap_name: Trap name.
* @trap_group_name: Trap group name.
* @input_dev: Input netdevice.
* @dev_tracker: refcount tracker for @input_dev.
* @fa_cookie: Flow action user cookie.
* @trap_type: Trap type.
*/
struct devlink_trap_metadata {
const char *trap_name;
const char *trap_group_name;
struct net_device *input_dev;
netdevice_tracker dev_tracker;
const struct flow_action_cookie *fa_cookie;
enum devlink_trap_type trap_type;
};

View File

@ -77,6 +77,7 @@ struct dst_entry {
#ifndef CONFIG_64BIT
atomic_t __refcnt; /* 32-bit offset 64 */
#endif
netdevice_tracker dev_tracker;
};
struct dst_metrics {

View File

@ -25,6 +25,7 @@ struct failover_ops {
struct failover {
struct list_head list;
struct net_device __rcu *failover_dev;
netdevice_tracker dev_tracker;
struct failover_ops __rcu *ops;
};

View File

@ -160,6 +160,7 @@ struct ipv6_devstat {
struct inet6_dev {
struct net_device *dev;
netdevice_tracker dev_tracker;
struct list_head addr_list;

View File

@ -46,6 +46,7 @@ struct __ip6_tnl_parm {
struct ip6_tnl {
struct ip6_tnl __rcu *next; /* next tunnel in list */
struct net_device *dev; /* virtual device associated with tunnel */
netdevice_tracker dev_tracker;
struct net *net; /* netns for packet i/o */
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */

View File

@ -104,7 +104,10 @@ struct metadata_dst;
struct ip_tunnel {
struct ip_tunnel __rcu *next;
struct hlist_node hash_node;
struct net_device *dev;
netdevice_tracker dev_tracker;
struct net *net; /* netns for packet i/o */
unsigned long err_time; /* Time when the last ICMP error

View File

@ -70,6 +70,7 @@ enum {
struct neigh_parms {
possible_net_t net;
struct net_device *dev;
netdevice_tracker dev_tracker;
struct list_head list;
int (*neigh_setup)(struct neighbour *);
struct neigh_table *tbl;
@ -158,6 +159,7 @@ struct neighbour {
struct list_head managed_list;
struct rcu_head rcu;
struct net_device *dev;
netdevice_tracker dev_tracker;
u8 primary_key[0];
} __randomize_layout;
@ -173,6 +175,7 @@ struct pneigh_entry {
struct pneigh_entry *next;
possible_net_t net;
struct net_device *dev;
netdevice_tracker dev_tracker;
u32 flags;
u8 protocol;
u8 key[];

View File

@ -125,7 +125,7 @@ struct Qdisc {
spinlock_t seqlock;
struct rcu_head rcu;
netdevice_tracker dev_tracker;
/* private data */
long privdata[] ____cacheline_aligned;
};

View File

@ -680,6 +680,11 @@ config STACK_HASH_ORDER
Select the hash size as a power of 2 for the stackdepot hash table.
Choose a lower value to reduce the memory impact.
config REF_TRACKER
bool
depends on STACKTRACE_SUPPORT
select STACKDEPOT
config SBITMAP
bool

View File

@ -598,6 +598,11 @@ config DEBUG_MISC
Say Y here if you need to enable miscellaneous debug code that should
be under a more specific debug option but isn't.
menu "Networking Debugging"
source "net/Kconfig.debug"
endmenu # "Networking Debugging"
menu "Memory Debugging"
@ -2106,6 +2111,16 @@ config BACKTRACE_SELF_TEST
Say N if you are unsure.
config TEST_REF_TRACKER
tristate "Self test for reference tracker"
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
select REF_TRACKER
help
This option provides a kernel module performing tests
using reference tracker infrastructure.
Say N if you are unsure.
config RBTREE_TEST
tristate "Red-Black tree test"
depends on DEBUG_KERNEL

View File

@ -101,7 +101,7 @@ obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
obj-$(CONFIG_TEST_HMM) += test_hmm.o
obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
#
# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
@ -270,6 +270,8 @@ obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
KCOV_INSTRUMENT_stackdepot.o := n
obj-$(CONFIG_REF_TRACKER) += ref_tracker.o
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
fdt_empty_tree.o fdt_addresses.o
$(foreach file, $(libfdt_files), \

140
lib/ref_tracker.c Normal file
View File

@ -0,0 +1,140 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/export.h>
#include <linux/ref_tracker.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#define REF_TRACKER_STACK_ENTRIES 16
struct ref_tracker {
struct list_head head; /* anchor into dir->list or dir->quarantine */
bool dead;
depot_stack_handle_t alloc_stack_handle;
depot_stack_handle_t free_stack_handle;
};
void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
{
struct ref_tracker *tracker, *n;
unsigned long flags;
bool leak = false;
spin_lock_irqsave(&dir->lock, flags);
list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
list_del(&tracker->head);
kfree(tracker);
dir->quarantine_avail++;
}
list_for_each_entry_safe(tracker, n, &dir->list, head) {
pr_err("leaked reference.\n");
if (tracker->alloc_stack_handle)
stack_depot_print(tracker->alloc_stack_handle);
leak = true;
list_del(&tracker->head);
kfree(tracker);
}
spin_unlock_irqrestore(&dir->lock, flags);
WARN_ON_ONCE(leak);
WARN_ON_ONCE(refcount_read(&dir->untracked) != 1);
}
EXPORT_SYMBOL(ref_tracker_dir_exit);
void ref_tracker_dir_print(struct ref_tracker_dir *dir,
unsigned int display_limit)
{
struct ref_tracker *tracker;
unsigned long flags;
unsigned int i = 0;
spin_lock_irqsave(&dir->lock, flags);
list_for_each_entry(tracker, &dir->list, head) {
if (i < display_limit) {
pr_err("leaked reference.\n");
if (tracker->alloc_stack_handle)
stack_depot_print(tracker->alloc_stack_handle);
i++;
} else {
break;
}
}
spin_unlock_irqrestore(&dir->lock, flags);
}
EXPORT_SYMBOL(ref_tracker_dir_print);
int ref_tracker_alloc(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp,
gfp_t gfp)
{
unsigned long entries[REF_TRACKER_STACK_ENTRIES];
struct ref_tracker *tracker;
unsigned int nr_entries;
unsigned long flags;
*trackerp = tracker = kzalloc(sizeof(*tracker), gfp | __GFP_NOFAIL);
if (unlikely(!tracker)) {
pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
refcount_inc(&dir->untracked);
return -ENOMEM;
}
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
nr_entries = filter_irq_stacks(entries, nr_entries);
tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp);
spin_lock_irqsave(&dir->lock, flags);
list_add(&tracker->head, &dir->list);
spin_unlock_irqrestore(&dir->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ref_tracker_alloc);
int ref_tracker_free(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp)
{
unsigned long entries[REF_TRACKER_STACK_ENTRIES];
struct ref_tracker *tracker = *trackerp;
depot_stack_handle_t stack_handle;
unsigned int nr_entries;
unsigned long flags;
if (!tracker) {
refcount_dec(&dir->untracked);
return -EEXIST;
}
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
nr_entries = filter_irq_stacks(entries, nr_entries);
stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC);
spin_lock_irqsave(&dir->lock, flags);
if (tracker->dead) {
pr_err("reference already released.\n");
if (tracker->alloc_stack_handle) {
pr_err("allocated in:\n");
stack_depot_print(tracker->alloc_stack_handle);
}
if (tracker->free_stack_handle) {
pr_err("freed in:\n");
stack_depot_print(tracker->free_stack_handle);
}
spin_unlock_irqrestore(&dir->lock, flags);
WARN_ON_ONCE(1);
return -EINVAL;
}
tracker->dead = true;
tracker->free_stack_handle = stack_handle;
list_move_tail(&tracker->head, &dir->quarantine);
if (!dir->quarantine_avail) {
tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head);
list_del(&tracker->head);
} else {
dir->quarantine_avail--;
tracker = NULL;
}
spin_unlock_irqrestore(&dir->lock, flags);
kfree(tracker);
return 0;
}
EXPORT_SYMBOL_GPL(ref_tracker_free);

115
lib/test_ref_tracker.c Normal file
View File

@ -0,0 +1,115 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Referrence tracker self test.
*
* Copyright (c) 2021 Eric Dumazet <edumazet@google.com>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ref_tracker.h>
#include <linux/slab.h>
#include <linux/timer.h>
static struct ref_tracker_dir ref_dir;
static struct ref_tracker *tracker[20];
#define TRT_ALLOC(X) static noinline void \
alloctest_ref_tracker_alloc##X(struct ref_tracker_dir *dir, \
struct ref_tracker **trackerp) \
{ \
ref_tracker_alloc(dir, trackerp, GFP_KERNEL); \
}
TRT_ALLOC(1)
TRT_ALLOC(2)
TRT_ALLOC(3)
TRT_ALLOC(4)
TRT_ALLOC(5)
TRT_ALLOC(6)
TRT_ALLOC(7)
TRT_ALLOC(8)
TRT_ALLOC(9)
TRT_ALLOC(10)
TRT_ALLOC(11)
TRT_ALLOC(12)
TRT_ALLOC(13)
TRT_ALLOC(14)
TRT_ALLOC(15)
TRT_ALLOC(16)
TRT_ALLOC(17)
TRT_ALLOC(18)
TRT_ALLOC(19)
#undef TRT_ALLOC
static noinline void
alloctest_ref_tracker_free(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp)
{
ref_tracker_free(dir, trackerp);
}
static struct timer_list test_ref_tracker_timer;
static atomic_t test_ref_timer_done = ATOMIC_INIT(0);
static void test_ref_tracker_timer_func(struct timer_list *t)
{
ref_tracker_alloc(&ref_dir, &tracker[0], GFP_ATOMIC);
atomic_set(&test_ref_timer_done, 1);
}
static int __init test_ref_tracker_init(void)
{
int i;
ref_tracker_dir_init(&ref_dir, 100);
timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0);
mod_timer(&test_ref_tracker_timer, jiffies + 1);
alloctest_ref_tracker_alloc1(&ref_dir, &tracker[1]);
alloctest_ref_tracker_alloc2(&ref_dir, &tracker[2]);
alloctest_ref_tracker_alloc3(&ref_dir, &tracker[3]);
alloctest_ref_tracker_alloc4(&ref_dir, &tracker[4]);
alloctest_ref_tracker_alloc5(&ref_dir, &tracker[5]);
alloctest_ref_tracker_alloc6(&ref_dir, &tracker[6]);
alloctest_ref_tracker_alloc7(&ref_dir, &tracker[7]);
alloctest_ref_tracker_alloc8(&ref_dir, &tracker[8]);
alloctest_ref_tracker_alloc9(&ref_dir, &tracker[9]);
alloctest_ref_tracker_alloc10(&ref_dir, &tracker[10]);
alloctest_ref_tracker_alloc11(&ref_dir, &tracker[11]);
alloctest_ref_tracker_alloc12(&ref_dir, &tracker[12]);
alloctest_ref_tracker_alloc13(&ref_dir, &tracker[13]);
alloctest_ref_tracker_alloc14(&ref_dir, &tracker[14]);
alloctest_ref_tracker_alloc15(&ref_dir, &tracker[15]);
alloctest_ref_tracker_alloc16(&ref_dir, &tracker[16]);
alloctest_ref_tracker_alloc17(&ref_dir, &tracker[17]);
alloctest_ref_tracker_alloc18(&ref_dir, &tracker[18]);
alloctest_ref_tracker_alloc19(&ref_dir, &tracker[19]);
/* free all trackers but first 0 and 1. */
for (i = 2; i < ARRAY_SIZE(tracker); i++)
alloctest_ref_tracker_free(&ref_dir, &tracker[i]);
/* Attempt to free an already freed tracker. */
alloctest_ref_tracker_free(&ref_dir, &tracker[2]);
while (!atomic_read(&test_ref_timer_done))
msleep(1);
/* This should warn about tracker[0] & tracker[1] being not freed. */
ref_tracker_dir_exit(&ref_dir);
return 0;
}
static void __exit test_ref_tracker_exit(void)
{
}
module_init(test_ref_tracker_init);
module_exit(test_ref_tracker_exit);
MODULE_LICENSE("GPL v2");

10
net/Kconfig.debug Normal file
View File

@ -0,0 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_DEV_REFCNT_TRACKER
bool "Enable net device refcount tracking"
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
select REF_TRACKER
default n
help
Enable debugging feature to track device references.
This adds memory and cpu costs.

View File

@ -6537,6 +6537,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
struct netdev_adjacent {
struct net_device *dev;
netdevice_tracker dev_tracker;
/* upper master flag, there can only be one master device per list */
bool master;
@ -7301,7 +7302,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
adj->ref_nr = 1;
adj->private = private;
adj->ignore = false;
dev_hold(adj_dev);
dev_hold_track(adj_dev, &adj->dev_tracker, GFP_KERNEL);
pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
@ -7330,8 +7331,8 @@ remove_symlinks:
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
free_adj:
dev_put_track(adj_dev, &adj->dev_tracker);
kfree(adj);
dev_put(adj_dev);
return ret;
}
@ -7372,7 +7373,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
list_del_rcu(&adj->list);
pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
adj_dev->name, dev->name, adj_dev->name);
dev_put(adj_dev);
dev_put_track(adj_dev, &adj->dev_tracker);
kfree_rcu(adj, rcu);
}
@ -9864,6 +9865,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
netdev_unregister_timeout_secs * HZ)) {
pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
dev->name, refcnt);
ref_tracker_dir_print(&dev->refcnt_tracker, 10);
warning_time = jiffies;
}
}
@ -10154,6 +10156,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev = PTR_ALIGN(p, NETDEV_ALIGN);
dev->padded = (char *)dev - (char *)p;
ref_tracker_dir_init(&dev->refcnt_tracker, 128);
#ifdef CONFIG_PCPU_DEV_REFCNT
dev->pcpu_refcnt = alloc_percpu(int);
if (!dev->pcpu_refcnt)
@ -10270,6 +10273,7 @@ void free_netdev(struct net_device *dev)
list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
netif_napi_del(p);
ref_tracker_dir_exit(&dev->refcnt_tracker);
#ifdef CONFIG_PCPU_DEV_REFCNT
free_percpu(dev->pcpu_refcnt);
dev->pcpu_refcnt = NULL;

View File

@ -313,6 +313,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
int err;
struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
const struct net_device_ops *ops;
netdevice_tracker dev_tracker;
if (!dev)
return -ENODEV;
@ -381,10 +382,10 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
return -ENODEV;
if (!netif_is_bridge_master(dev))
return -EOPNOTSUPP;
dev_hold(dev);
dev_hold_track(dev, &dev_tracker, GFP_KERNEL);
rtnl_unlock();
err = br_ioctl_call(net, netdev_priv(dev), cmd, ifr, NULL);
dev_put(dev);
dev_put_track(dev, &dev_tracker);
rtnl_lock();
return err;

View File

@ -850,7 +850,7 @@ net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata)
}
hw_metadata->input_dev = metadata->input_dev;
dev_hold(hw_metadata->input_dev);
dev_hold_track(hw_metadata->input_dev, &hw_metadata->dev_tracker, GFP_ATOMIC);
return hw_metadata;
@ -864,9 +864,9 @@ free_hw_metadata:
}
static void
net_dm_hw_metadata_free(const struct devlink_trap_metadata *hw_metadata)
net_dm_hw_metadata_free(struct devlink_trap_metadata *hw_metadata)
{
dev_put(hw_metadata->input_dev);
dev_put_track(hw_metadata->input_dev, &hw_metadata->dev_tracker);
kfree(hw_metadata->fa_cookie);
kfree(hw_metadata->trap_name);
kfree(hw_metadata->trap_group_name);

View File

@ -49,7 +49,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
unsigned short flags)
{
dst->dev = dev;
dev_hold(dev);
dev_hold_track(dev, &dst->dev_tracker, GFP_ATOMIC);
dst->ops = ops;
dst_init_metrics(dst, dst_default_metrics.metrics, true);
dst->expires = 0UL;
@ -117,7 +117,7 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
if (dst->ops->destroy)
dst->ops->destroy(dst);
dev_put(dst->dev);
dev_put_track(dst->dev, &dst->dev_tracker);
lwtstate_put(dst->lwtstate);
@ -159,8 +159,8 @@ void dst_dev_put(struct dst_entry *dst)
dst->input = dst_discard;
dst->output = dst_discard_out;
dst->dev = blackhole_netdev;
dev_hold(dst->dev);
dev_put(dev);
dev_replace_track(dev, blackhole_netdev, &dst->dev_tracker,
GFP_ATOMIC);
}
EXPORT_SYMBOL(dst_dev_put);

View File

@ -252,7 +252,7 @@ struct failover *failover_register(struct net_device *dev,
return ERR_PTR(-ENOMEM);
rcu_assign_pointer(failover->ops, ops);
dev_hold(dev);
dev_hold_track(dev, &failover->dev_tracker, GFP_KERNEL);
dev->priv_flags |= IFF_FAILOVER;
rcu_assign_pointer(failover->failover_dev, dev);
@ -285,7 +285,7 @@ void failover_unregister(struct failover *failover)
failover_dev->name);
failover_dev->priv_flags &= ~IFF_FAILOVER;
dev_put(failover_dev);
dev_put_track(failover_dev, &failover->dev_tracker);
spin_lock(&failover_lock);
list_del(&failover->list);

View File

@ -109,7 +109,7 @@ static void linkwatch_add_event(struct net_device *dev)
spin_lock_irqsave(&lweventlist_lock, flags);
if (list_empty(&dev->link_watch_list)) {
list_add_tail(&dev->link_watch_list, &lweventlist);
dev_hold(dev);
dev_hold_track(dev, &dev->linkwatch_dev_tracker, GFP_ATOMIC);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
}
@ -166,7 +166,7 @@ static void linkwatch_do_dev(struct net_device *dev)
netdev_state_change(dev);
}
dev_put(dev);
dev_put_track(dev, &dev->linkwatch_dev_tracker);
}
static void __linkwatch_run_queue(int urgent_only)

View File

@ -624,7 +624,7 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey,
memcpy(n->primary_key, pkey, key_len);
n->dev = dev;
dev_hold(dev);
dev_hold_track(dev, &n->dev_tracker, GFP_ATOMIC);
/* Protocol specific setup. */
if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
@ -771,10 +771,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
n->dev = dev;
dev_hold(dev);
dev_hold_track(dev, &n->dev_tracker, GFP_KERNEL);
if (tbl->pconstructor && tbl->pconstructor(n)) {
dev_put(dev);
dev_put_track(dev, &n->dev_tracker);
kfree(n);
n = NULL;
goto out;
@ -806,7 +806,7 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
write_unlock_bh(&tbl->lock);
if (tbl->pdestructor)
tbl->pdestructor(n);
dev_put(n->dev);
dev_put_track(n->dev, &n->dev_tracker);
kfree(n);
return 0;
}
@ -839,7 +839,7 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
n->next = NULL;
if (tbl->pdestructor)
tbl->pdestructor(n);
dev_put(n->dev);
dev_put_track(n->dev, &n->dev_tracker);
kfree(n);
}
return -ENOENT;
@ -880,7 +880,7 @@ void neigh_destroy(struct neighbour *neigh)
if (dev->netdev_ops->ndo_neigh_destroy)
dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
dev_put(dev);
dev_put_track(dev, &neigh->dev_tracker);
neigh_parms_put(neigh->parms);
neigh_dbg(2, "neigh %p is destroyed\n", neigh);
@ -1666,13 +1666,13 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
refcount_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
dev_hold(dev);
dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL);
p->dev = dev;
write_pnet(&p->net, net);
p->sysctl_table = NULL;
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
dev_put(dev);
dev_put_track(dev, &p->dev_tracker);
kfree(p);
return NULL;
}
@ -1703,7 +1703,7 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
list_del(&parms->list);
parms->dead = 1;
write_unlock_bh(&tbl->lock);
dev_put(parms->dev);
dev_put_track(parms->dev, &parms->dev_tracker);
call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
}
EXPORT_SYMBOL(neigh_parms_release);

View File

@ -1004,7 +1004,7 @@ static void rx_queue_release(struct kobject *kobj)
#endif
memset(kobj, 0, sizeof(*kobj));
dev_put(queue->dev);
dev_put_track(queue->dev, &queue->dev_tracker);
}
static const void *rx_queue_namespace(struct kobject *kobj)
@ -1044,7 +1044,7 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
/* Kobject_put later will trigger rx_queue_release call which
* decreases dev refcount: Take that reference here
*/
dev_hold(queue->dev);
dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL);
kobj->kset = dev->queues_kset;
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
@ -1607,7 +1607,7 @@ static void netdev_queue_release(struct kobject *kobj)
struct netdev_queue *queue = to_netdev_queue(kobj);
memset(kobj, 0, sizeof(*kobj));
dev_put(queue->dev);
dev_put_track(queue->dev, &queue->dev_tracker);
}
static const void *netdev_queue_namespace(struct kobject *kobj)
@ -1647,7 +1647,7 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
/* Kobject_put later will trigger netdev_queue_release call
* which decreases dev refcount: Take that reference here
*/
dev_hold(queue->dev);
dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL);
kobj->kset = dev->queues_kset;
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,

View File

@ -776,7 +776,7 @@ put_noaddr:
err = __netpoll_setup(np, ndev);
if (err)
goto put;
netdev_tracker_alloc(ndev, &np->dev_tracker, GFP_KERNEL);
rtnl_unlock();
return 0;
@ -853,7 +853,7 @@ void netpoll_cleanup(struct netpoll *np)
if (!np->dev)
goto out;
__netpoll_cleanup(np);
dev_put(np->dev);
dev_put_track(np->dev, &np->dev_tracker);
np->dev = NULL;
out:
rtnl_unlock();

View File

@ -1989,6 +1989,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
struct ethtool_value id;
static bool busy;
const struct ethtool_ops *ops = dev->ethtool_ops;
netdevice_tracker dev_tracker;
int rc;
if (!ops->set_phys_id)
@ -2008,7 +2009,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
* removal of the device.
*/
busy = true;
dev_hold(dev);
dev_hold_track(dev, &dev_tracker, GFP_KERNEL);
rtnl_unlock();
if (rc == 0) {
@ -2032,7 +2033,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
}
rtnl_lock();
dev_put(dev);
dev_put_track(dev, &dev_tracker);
busy = false;
(void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);

View File

@ -243,7 +243,7 @@ void in_dev_finish_destroy(struct in_device *idev)
#ifdef NET_REFCNT_DEBUG
pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
#endif
dev_put(dev);
dev_put_track(dev, &idev->dev_tracker);
if (!idev->dead)
pr_err("Freeing alive in_device %p\n", idev);
else
@ -271,7 +271,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
dev_disable_lro(dev);
/* Reference in_dev->dev */
dev_hold(dev);
dev_hold_track(dev, &in_dev->dev_tracker, GFP_KERNEL);
/* Account for reference dev->ip_ptr (below) */
refcount_set(&in_dev->refcnt, 1);

View File

@ -696,7 +696,7 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
dev_put(dev);
dev_put_track(dev, &v->dev_tracker);
return 0;
}
@ -896,6 +896,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
v->dev = dev;
netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
if (v->flags & VIFF_REGISTER)
mrt->mroute_reg_vif_num = vifi;
if (vifi+1 > mrt->maxvif)

View File

@ -1531,8 +1531,9 @@ void rt_flush_dev(struct net_device *dev)
if (rt->dst.dev != dev)
continue;
rt->dst.dev = blackhole_netdev;
dev_hold(rt->dst.dev);
dev_put(dev);
dev_replace_track(dev, blackhole_netdev,
&rt->dst.dev_tracker,
GFP_ATOMIC);
}
spin_unlock_bh(&ul->lock);
}
@ -2819,7 +2820,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
new->output = dst_discard_out;
new->dev = net->loopback_dev;
dev_hold(new->dev);
dev_hold_track(new->dev, &new->dev_tracker, GFP_ATOMIC);
rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;

View File

@ -405,13 +405,13 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
if (ndev->cnf.forwarding)
dev_disable_lro(dev);
/* We refer to the device */
dev_hold(dev);
dev_hold_track(dev, &ndev->dev_tracker, GFP_KERNEL);
if (snmp6_alloc_dev(ndev) < 0) {
netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
__func__);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
dev_put(dev);
dev_put_track(dev, &ndev->dev_tracker);
kfree(ndev);
return ERR_PTR(err);
}

View File

@ -263,7 +263,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
#ifdef NET_REFCNT_DEBUG
pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
#endif
dev_put(dev);
dev_put_track(dev, &idev->dev_tracker);
if (!idev->dead) {
pr_warn("Freeing alive inet6 device %p\n", idev);
return;

View File

@ -403,7 +403,7 @@ static void ip6erspan_tunnel_uninit(struct net_device *dev)
ip6erspan_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
dst_cache_reset(&t->dst_cache);
dev_put(dev);
dev_put_track(dev, &t->dev_tracker);
}
static void ip6gre_tunnel_uninit(struct net_device *dev)
@ -416,7 +416,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
if (ign->fb_tunnel_dev == dev)
WRITE_ONCE(ign->fb_tunnel_dev, NULL);
dst_cache_reset(&t->dst_cache);
dev_put(dev);
dev_put_track(dev, &t->dev_tracker);
}
@ -1496,7 +1496,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
}
ip6gre_tnl_init_features(dev);
dev_hold(dev);
dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL);
return 0;
cleanup_dst_cache_init:
@ -1888,7 +1888,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip6erspan_tnl_link_config(tunnel, 1);
dev_hold(dev);
dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL);
return 0;
cleanup_dst_cache_init:

View File

@ -383,7 +383,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
else
ip6_tnl_unlink(ip6n, t);
dst_cache_reset(&t->dst_cache);
dev_put(dev);
dev_put_track(dev, &t->dev_tracker);
}
/**
@ -1883,7 +1883,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
dev_hold(dev);
dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL);
return 0;
destroy_dst:

View File

@ -293,7 +293,7 @@ static void vti6_dev_uninit(struct net_device *dev)
RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
else
vti6_tnl_unlink(ip6n, t);
dev_put(dev);
dev_put_track(dev, &t->dev_tracker);
}
static int vti6_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
@ -934,7 +934,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
dev_hold(dev);
dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL);
return 0;
}

View File

@ -746,7 +746,7 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
if ((v->flags & MIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
dev_put(dev);
dev_put_track(dev, &v->dev_tracker);
return 0;
}
@ -919,6 +919,7 @@ static int mif6_add(struct net *net, struct mr_table *mrt,
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
v->dev = dev;
netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
#ifdef CONFIG_IPV6_PIMSM_V2
if (v->flags & MIFF_REGISTER)
mrt->mroute_reg_vif_num = vifi;

View File

@ -182,8 +182,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
if (rt_dev == dev) {
rt->dst.dev = blackhole_netdev;
dev_hold(rt->dst.dev);
dev_put(rt_dev);
dev_replace_track(rt_dev, blackhole_netdev,
&rt->dst.dev_tracker,
GFP_ATOMIC);
}
}
spin_unlock_bh(&ul->lock);
@ -592,6 +593,7 @@ struct __rt6_probe_work {
struct work_struct work;
struct in6_addr target;
struct net_device *dev;
netdevice_tracker dev_tracker;
};
static void rt6_probe_deferred(struct work_struct *w)
@ -602,7 +604,7 @@ static void rt6_probe_deferred(struct work_struct *w)
addrconf_addr_solict_mult(&work->target, &mcaddr);
ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
dev_put(work->dev);
dev_put_track(work->dev, &work->dev_tracker);
kfree(work);
}
@ -656,7 +658,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
} else {
INIT_WORK(&work->work, rt6_probe_deferred);
work->target = *nh_gw;
dev_hold(dev);
dev_hold_track(dev, &work->dev_tracker, GFP_KERNEL);
work->dev = dev;
schedule_work(&work->work);
}

View File

@ -521,7 +521,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
ipip6_tunnel_del_prl(tunnel, NULL);
}
dst_cache_reset(&tunnel->dst_cache);
dev_put(dev);
dev_put_track(dev, &tunnel->dev_tracker);
}
static int ipip6_err(struct sk_buff *skb, u32 info)
@ -1463,7 +1463,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
dev->tstats = NULL;
return err;
}
dev_hold(dev);
dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL);
return 0;
}

View File

@ -973,7 +973,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
dev_hold(dev);
dev_hold_track(dev, &sch->dev_tracker, GFP_KERNEL);
refcount_set(&sch->refcnt, 1);
return sch;
@ -1073,7 +1073,7 @@ static void qdisc_destroy(struct Qdisc *qdisc)
ops->destroy(qdisc);
module_put(ops->owner);
dev_put(qdisc_dev(qdisc));
dev_put_track(qdisc_dev(qdisc), &qdisc->dev_tracker);
trace_qdisc_destroy(qdisc);