2022-05-10 11:53:41 +03:00
// SPDX-License-Identifier: GPL-2.0
2008-04-30 11:55:01 +04:00
/*
* Generic infrastructure for lifetime debugging of objects .
*
* Copyright ( C ) 2008 , Thomas Gleixner < tglx @ linutronix . de >
*/
2014-06-05 03:06:04 +04:00
# define pr_fmt(fmt) "ODEBUG: " fmt
2008-04-30 11:55:01 +04:00
# include <linux/debugobjects.h>
# include <linux/interrupt.h>
2009-10-07 17:09:06 +04:00
# include <linux/sched.h>
2017-02-08 20:51:37 +03:00
# include <linux/sched/task_stack.h>
2008-04-30 11:55:01 +04:00
# include <linux/seq_file.h>
# include <linux/debugfs.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
# include <linux/slab.h>
2008-04-30 11:55:01 +04:00
# include <linux/hash.h>
2017-08-14 16:52:13 +03:00
# include <linux/kmemleak.h>
2020-09-08 09:27:09 +03:00
# include <linux/cpu.h>
2008-04-30 11:55:01 +04:00
# define ODEBUG_HASH_BITS 14
# define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
2016-01-27 17:37:58 +03:00
# define ODEBUG_POOL_SIZE 1024
2008-04-30 11:55:01 +04:00
# define ODEBUG_POOL_MIN_LEVEL 256
2019-05-20 17:14:46 +03:00
# define ODEBUG_POOL_PERCPU_SIZE 64
2019-05-20 17:14:47 +03:00
# define ODEBUG_BATCH_SIZE 16
2008-04-30 11:55:01 +04:00
# define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
# define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
# define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
2019-05-20 17:14:49 +03:00
/*
* We limit the freeing of debug objects via workqueue at a maximum
* frequency of 10 Hz and about 1024 objects for each freeing operation .
* So it is freeing at most 10 k debug objects per second .
*/
# define ODEBUG_FREE_WORK_MAX 1024
# define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
2008-04-30 11:55:01 +04:00
struct debug_bucket {
struct hlist_head list ;
2009-11-17 20:11:28 +03:00
raw_spinlock_t lock ;
2008-04-30 11:55:01 +04:00
} ;
2019-05-20 17:14:46 +03:00
/*
* Debug object percpu free list
* Access is protected by disabling irq
*/
struct debug_percpu_free {
struct hlist_head free_objs ;
int obj_free ;
} ;
static DEFINE_PER_CPU ( struct debug_percpu_free , percpu_obj_pool ) ;
2008-04-30 11:55:01 +04:00
static struct debug_bucket obj_hash [ ODEBUG_HASH_SIZE ] ;
2009-03-16 20:53:18 +03:00
static struct debug_obj obj_static_pool [ ODEBUG_POOL_SIZE ] __initdata ;
2008-04-30 11:55:01 +04:00
2009-11-17 20:11:28 +03:00
static DEFINE_RAW_SPINLOCK ( pool_lock ) ;
2008-04-30 11:55:01 +04:00
static HLIST_HEAD ( obj_pool ) ;
2018-02-06 02:18:26 +03:00
static HLIST_HEAD ( obj_to_free ) ;
2008-04-30 11:55:01 +04:00
2019-05-20 17:14:46 +03:00
/*
* Because of the presence of percpu free pools , obj_pool_free will
* under - count those in the percpu free pools . Similarly , obj_pool_used
* will over - count those in the percpu free pools . Adjustments will be
* made at debug_stats_show ( ) . Both obj_pool_min_free and obj_pool_max_used
* can be off .
*/
2008-04-30 11:55:01 +04:00
static int obj_pool_min_free = ODEBUG_POOL_SIZE ;
static int obj_pool_free = ODEBUG_POOL_SIZE ;
static int obj_pool_used ;
static int obj_pool_max_used ;
2019-05-20 17:14:49 +03:00
static bool obj_freeing ;
2018-02-06 02:18:26 +03:00
/* The number of objs on the global free list */
static int obj_nr_tofree ;
2008-04-30 11:55:01 +04:00
static int debug_objects_maxchain __read_mostly ;
2018-03-13 16:18:46 +03:00
static int __maybe_unused debug_objects_maxchecked __read_mostly ;
2008-04-30 11:55:01 +04:00
static int debug_objects_fixups __read_mostly ;
static int debug_objects_warnings __read_mostly ;
2008-11-26 12:02:00 +03:00
static int debug_objects_enabled __read_mostly
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT ;
2017-01-05 23:17:04 +03:00
static int debug_objects_pool_size __read_mostly
= ODEBUG_POOL_SIZE ;
static int debug_objects_pool_min_level __read_mostly
= ODEBUG_POOL_MIN_LEVEL ;
2020-08-15 03:40:26 +03:00
static const struct debug_obj_descr * descr_test __read_mostly ;
2019-05-20 17:14:46 +03:00
static struct kmem_cache * obj_cache __read_mostly ;
2008-04-30 11:55:01 +04:00
2017-01-05 23:17:03 +03:00
/*
2017-02-08 00:40:30 +03:00
* Track numbers of kmem_cache_alloc ( ) / free ( ) calls done .
2017-01-05 23:17:03 +03:00
*/
2017-02-08 00:40:30 +03:00
static int debug_objects_allocated ;
2017-01-05 23:17:03 +03:00
static int debug_objects_freed ;
2009-03-16 12:04:53 +03:00
static void free_obj_work ( struct work_struct * work ) ;
2019-05-20 17:14:49 +03:00
static DECLARE_DELAYED_WORK ( debug_obj_work , free_obj_work ) ;
2009-03-16 12:04:53 +03:00
2008-04-30 11:55:01 +04:00
static int __init enable_object_debug ( char * str )
{
debug_objects_enabled = 1 ;
return 0 ;
}
2009-03-02 04:41:41 +03:00
static int __init disable_object_debug ( char * str )
{
debug_objects_enabled = 0 ;
return 0 ;
}
2008-04-30 11:55:01 +04:00
early_param ( " debug_objects " , enable_object_debug ) ;
2009-03-02 04:41:41 +03:00
early_param ( " no_debug_objects " , disable_object_debug ) ;
2008-04-30 11:55:01 +04:00
static const char * obj_states [ ODEBUG_STATE_MAX ] = {
[ ODEBUG_STATE_NONE ] = " none " ,
[ ODEBUG_STATE_INIT ] = " initialized " ,
[ ODEBUG_STATE_INACTIVE ] = " inactive " ,
[ ODEBUG_STATE_ACTIVE ] = " active " ,
[ ODEBUG_STATE_DESTROYED ] = " destroyed " ,
[ ODEBUG_STATE_NOTAVAILABLE ] = " not available " ,
} ;
2012-04-11 13:52:18 +04:00
static void fill_pool ( void )
2008-04-30 11:55:01 +04:00
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN ;
2019-05-20 17:14:48 +03:00
struct debug_obj * obj ;
2008-06-15 02:47:36 +04:00
unsigned long flags ;
2008-04-30 11:55:01 +04:00
2020-01-16 21:55:29 +03:00
if ( likely ( READ_ONCE ( obj_pool_free ) > = debug_objects_pool_min_level ) )
2012-04-11 13:52:18 +04:00
return ;
2008-04-30 11:55:01 +04:00
2018-02-06 02:18:26 +03:00
/*
* Reuse objs from the global free list ; they will be reinitialized
* when allocating .
2020-01-16 21:55:29 +03:00
*
* Both obj_nr_tofree and obj_pool_free are checked locklessly ; the
* READ_ONCE ( ) s pair with the WRITE_ONCE ( ) s in pool_lock critical
* sections .
2018-02-06 02:18:26 +03:00
*/
2020-01-16 21:55:29 +03:00
while ( READ_ONCE ( obj_nr_tofree ) & & ( READ_ONCE ( obj_pool_free ) < obj_pool_min_free ) ) {
2018-02-06 02:18:26 +03:00
raw_spin_lock_irqsave ( & pool_lock , flags ) ;
/*
* Recheck with the lock held as the worker thread might have
* won the race and freed the global free list already .
*/
2019-05-20 17:14:48 +03:00
while ( obj_nr_tofree & & ( obj_pool_free < obj_pool_min_free ) ) {
2018-02-06 02:18:26 +03:00
obj = hlist_entry ( obj_to_free . first , typeof ( * obj ) , node ) ;
hlist_del ( & obj - > node ) ;
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_nr_tofree , obj_nr_tofree - 1 ) ;
2018-02-06 02:18:26 +03:00
hlist_add_head ( & obj - > node , & obj_pool ) ;
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_pool_free , obj_pool_free + 1 ) ;
2018-02-06 02:18:26 +03:00
}
raw_spin_unlock_irqrestore ( & pool_lock , flags ) ;
}
2008-04-30 11:55:01 +04:00
if ( unlikely ( ! obj_cache ) )
2012-04-11 13:52:18 +04:00
return ;
2008-04-30 11:55:01 +04:00
2020-01-16 21:55:29 +03:00
while ( READ_ONCE ( obj_pool_free ) < debug_objects_pool_min_level ) {
2019-05-20 17:14:48 +03:00
struct debug_obj * new [ ODEBUG_BATCH_SIZE ] ;
int cnt ;
2008-04-30 11:55:01 +04:00
2019-05-20 17:14:48 +03:00
for ( cnt = 0 ; cnt < ODEBUG_BATCH_SIZE ; cnt + + ) {
new [ cnt ] = kmem_cache_zalloc ( obj_cache , gfp ) ;
if ( ! new [ cnt ] )
break ;
}
if ( ! cnt )
2012-04-18 15:28:10 +04:00
return ;
2008-04-30 11:55:01 +04:00
2009-11-17 20:11:28 +03:00
raw_spin_lock_irqsave ( & pool_lock , flags ) ;
2019-05-20 17:14:48 +03:00
while ( cnt ) {
hlist_add_head ( & new [ - - cnt ] - > node , & obj_pool ) ;
debug_objects_allocated + + ;
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_pool_free , obj_pool_free + 1 ) ;
2019-05-20 17:14:48 +03:00
}
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & pool_lock , flags ) ;
2008-04-30 11:55:01 +04:00
}
}
/*
* Lookup an object in the hash bucket .
*/
static struct debug_obj * lookup_object ( void * addr , struct debug_bucket * b )
{
struct debug_obj * obj ;
int cnt = 0 ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry ( obj , & b - > list , node ) {
2008-04-30 11:55:01 +04:00
cnt + + ;
if ( obj - > object = = addr )
return obj ;
}
if ( cnt > debug_objects_maxchain )
debug_objects_maxchain = cnt ;
return NULL ;
}
2019-05-20 17:14:46 +03:00
/*
* Allocate a new object from the hlist
*/
static struct debug_obj * __alloc_object ( struct hlist_head * list )
{
struct debug_obj * obj = NULL ;
if ( list - > first ) {
obj = hlist_entry ( list - > first , typeof ( * obj ) , node ) ;
hlist_del ( & obj - > node ) ;
}
return obj ;
}
2008-04-30 11:55:01 +04:00
/*
2008-06-15 02:47:36 +04:00
* Allocate a new object . If the pool is empty , switch off the debugger .
2008-09-01 01:39:21 +04:00
* Must be called with interrupts disabled .
2008-04-30 11:55:01 +04:00
*/
static struct debug_obj *
2020-08-15 03:40:26 +03:00
alloc_object ( void * addr , struct debug_bucket * b , const struct debug_obj_descr * descr )
2008-04-30 11:55:01 +04:00
{
2019-05-20 17:14:47 +03:00
struct debug_percpu_free * percpu_pool = this_cpu_ptr ( & percpu_obj_pool ) ;
2019-05-20 17:14:46 +03:00
struct debug_obj * obj ;
2008-04-30 11:55:01 +04:00
2019-05-20 17:14:46 +03:00
if ( likely ( obj_cache ) ) {
obj = __alloc_object ( & percpu_pool - > free_objs ) ;
if ( obj ) {
percpu_pool - > obj_free - - ;
goto init_obj ;
}
}
2008-04-30 11:55:01 +04:00
2019-05-20 17:14:46 +03:00
raw_spin_lock ( & pool_lock ) ;
obj = __alloc_object ( & obj_pool ) ;
if ( obj ) {
2008-04-30 11:55:01 +04:00
obj_pool_used + + ;
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_pool_free , obj_pool_free - 1 ) ;
2019-05-20 17:14:47 +03:00
/*
* Looking ahead , allocate one batch of debug objects and
* put them into the percpu free pool .
*/
if ( likely ( obj_cache ) ) {
int i ;
for ( i = 0 ; i < ODEBUG_BATCH_SIZE ; i + + ) {
struct debug_obj * obj2 ;
obj2 = __alloc_object ( & obj_pool ) ;
if ( ! obj2 )
break ;
hlist_add_head ( & obj2 - > node ,
& percpu_pool - > free_objs ) ;
percpu_pool - > obj_free + + ;
obj_pool_used + + ;
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_pool_free , obj_pool_free - 1 ) ;
2019-05-20 17:14:47 +03:00
}
}
2008-04-30 11:55:01 +04:00
if ( obj_pool_used > obj_pool_max_used )
obj_pool_max_used = obj_pool_used ;
if ( obj_pool_free < obj_pool_min_free )
obj_pool_min_free = obj_pool_free ;
}
2009-11-17 20:11:28 +03:00
raw_spin_unlock ( & pool_lock ) ;
2008-04-30 11:55:01 +04:00
2019-05-20 17:14:46 +03:00
init_obj :
if ( obj ) {
obj - > object = addr ;
obj - > descr = descr ;
obj - > state = ODEBUG_STATE_NONE ;
obj - > astate = 0 ;
hlist_add_head ( & obj - > node , & b - > list ) ;
}
2008-04-30 11:55:01 +04:00
return obj ;
}
/*
2009-03-16 12:04:53 +03:00
* workqueue function to free objects .
2017-01-05 23:17:05 +03:00
*
* To reduce contention on the global pool_lock , the actual freeing of
2018-02-06 02:18:27 +03:00
* debug objects will be delayed if the pool_lock is busy .
2008-04-30 11:55:01 +04:00
*/
2009-03-16 12:04:53 +03:00
static void free_obj_work ( struct work_struct * work )
2008-04-30 11:55:01 +04:00
{
2018-02-06 02:18:26 +03:00
struct hlist_node * tmp ;
struct debug_obj * obj ;
2008-09-01 01:39:21 +04:00
unsigned long flags ;
2018-02-06 02:18:26 +03:00
HLIST_HEAD ( tofree ) ;
2008-04-30 11:55:01 +04:00
2019-05-20 17:14:49 +03:00
WRITE_ONCE ( obj_freeing , false ) ;
2017-01-05 23:17:05 +03:00
if ( ! raw_spin_trylock_irqsave ( & pool_lock , flags ) )
return ;
2018-02-06 02:18:26 +03:00
2019-05-20 17:14:49 +03:00
if ( obj_pool_free > = debug_objects_pool_size )
goto free_objs ;
2018-02-06 02:18:26 +03:00
/*
* The objs on the pool list might be allocated before the work is
* run , so recheck if pool list it full or not , if not fill pool
2019-05-20 17:14:49 +03:00
* list from the global free list . As it is likely that a workload
* may be gearing up to use more and more objects , don ' t free any
* of them until the next round .
2018-02-06 02:18:26 +03:00
*/
while ( obj_nr_tofree & & obj_pool_free < debug_objects_pool_size ) {
obj = hlist_entry ( obj_to_free . first , typeof ( * obj ) , node ) ;
hlist_del ( & obj - > node ) ;
hlist_add_head ( & obj - > node , & obj_pool ) ;
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_pool_free , obj_pool_free + 1 ) ;
WRITE_ONCE ( obj_nr_tofree , obj_nr_tofree - 1 ) ;
2018-02-06 02:18:26 +03:00
}
2019-05-20 17:14:49 +03:00
raw_spin_unlock_irqrestore ( & pool_lock , flags ) ;
return ;
2018-02-06 02:18:26 +03:00
2019-05-20 17:14:49 +03:00
free_objs :
2018-02-06 02:18:26 +03:00
/*
* Pool list is already full and there are still objs on the free
* list . Move remaining free objs to a temporary list to free the
* memory outside the pool_lock held region .
*/
if ( obj_nr_tofree ) {
hlist_move_list ( & obj_to_free , & tofree ) ;
2018-02-22 18:52:58 +03:00
debug_objects_freed + = obj_nr_tofree ;
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_nr_tofree , 0 ) ;
2018-02-06 02:18:26 +03:00
}
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & pool_lock , flags ) ;
2018-02-06 02:18:26 +03:00
hlist_for_each_entry_safe ( obj , tmp , & tofree , node ) {
hlist_del ( & obj - > node ) ;
kmem_cache_free ( obj_cache , obj ) ;
}
2009-03-16 12:04:53 +03:00
}
2019-05-20 17:14:49 +03:00
static void __free_object ( struct debug_obj * obj )
2009-03-16 12:04:53 +03:00
{
2019-05-20 17:14:47 +03:00
struct debug_obj * objs [ ODEBUG_BATCH_SIZE ] ;
struct debug_percpu_free * percpu_pool ;
int lookahead_count = 0 ;
2009-03-16 12:04:53 +03:00
unsigned long flags ;
2018-02-06 02:18:27 +03:00
bool work ;
2009-03-16 12:04:53 +03:00
2019-05-20 17:14:46 +03:00
local_irq_save ( flags ) ;
2019-05-20 17:14:47 +03:00
if ( ! obj_cache )
goto free_to_obj_pool ;
2019-05-20 17:14:46 +03:00
/*
* Try to free it into the percpu pool first .
*/
percpu_pool = this_cpu_ptr ( & percpu_obj_pool ) ;
2019-05-20 17:14:47 +03:00
if ( percpu_pool - > obj_free < ODEBUG_POOL_PERCPU_SIZE ) {
2019-05-20 17:14:46 +03:00
hlist_add_head ( & obj - > node , & percpu_pool - > free_objs ) ;
percpu_pool - > obj_free + + ;
local_irq_restore ( flags ) ;
2019-05-20 17:14:49 +03:00
return ;
2019-05-20 17:14:46 +03:00
}
2019-05-20 17:14:47 +03:00
/*
* As the percpu pool is full , look ahead and pull out a batch
* of objects from the percpu pool and free them as well .
*/
for ( ; lookahead_count < ODEBUG_BATCH_SIZE ; lookahead_count + + ) {
objs [ lookahead_count ] = __alloc_object ( & percpu_pool - > free_objs ) ;
if ( ! objs [ lookahead_count ] )
break ;
percpu_pool - > obj_free - - ;
}
free_to_obj_pool :
2019-05-20 17:14:46 +03:00
raw_spin_lock ( & pool_lock ) ;
2019-05-20 17:14:49 +03:00
work = ( obj_pool_free > debug_objects_pool_size ) & & obj_cache & &
( obj_nr_tofree < ODEBUG_FREE_WORK_MAX ) ;
2009-03-16 12:04:53 +03:00
obj_pool_used - - ;
2018-02-06 02:18:27 +03:00
if ( work ) {
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_nr_tofree , obj_nr_tofree + 1 ) ;
2018-02-06 02:18:27 +03:00
hlist_add_head ( & obj - > node , & obj_to_free ) ;
2019-05-20 17:14:47 +03:00
if ( lookahead_count ) {
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_nr_tofree , obj_nr_tofree + lookahead_count ) ;
2019-05-20 17:14:47 +03:00
obj_pool_used - = lookahead_count ;
while ( lookahead_count ) {
hlist_add_head ( & objs [ - - lookahead_count ] - > node ,
& obj_to_free ) ;
}
}
2019-05-20 17:14:49 +03:00
if ( ( obj_pool_free > debug_objects_pool_size ) & &
( obj_nr_tofree < ODEBUG_FREE_WORK_MAX ) ) {
int i ;
/*
* Free one more batch of objects from obj_pool .
*/
for ( i = 0 ; i < ODEBUG_BATCH_SIZE ; i + + ) {
obj = __alloc_object ( & obj_pool ) ;
hlist_add_head ( & obj - > node , & obj_to_free ) ;
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_pool_free , obj_pool_free - 1 ) ;
WRITE_ONCE ( obj_nr_tofree , obj_nr_tofree + 1 ) ;
2019-05-20 17:14:49 +03:00
}
}
2018-02-06 02:18:27 +03:00
} else {
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_pool_free , obj_pool_free + 1 ) ;
2018-02-06 02:18:27 +03:00
hlist_add_head ( & obj - > node , & obj_pool ) ;
2019-05-20 17:14:47 +03:00
if ( lookahead_count ) {
2020-01-16 21:55:29 +03:00
WRITE_ONCE ( obj_pool_free , obj_pool_free + lookahead_count ) ;
2019-05-20 17:14:47 +03:00
obj_pool_used - = lookahead_count ;
while ( lookahead_count ) {
hlist_add_head ( & objs [ - - lookahead_count ] - > node ,
& obj_pool ) ;
}
}
2018-02-06 02:18:27 +03:00
}
2019-05-20 17:14:46 +03:00
raw_spin_unlock ( & pool_lock ) ;
local_irq_restore ( flags ) ;
2018-02-06 02:18:27 +03:00
}
/*
* Put the object back into the pool and schedule work to free objects
* if necessary .
*/
static void free_object ( struct debug_obj * obj )
{
2019-05-20 17:14:49 +03:00
__free_object ( obj ) ;
2020-01-16 21:55:29 +03:00
if ( ! READ_ONCE ( obj_freeing ) & & READ_ONCE ( obj_nr_tofree ) ) {
2019-05-20 17:14:49 +03:00
WRITE_ONCE ( obj_freeing , true ) ;
schedule_delayed_work ( & debug_obj_work , ODEBUG_FREE_WORK_DELAY ) ;
}
2008-04-30 11:55:01 +04:00
}
2020-09-08 09:27:09 +03:00
# ifdef CONFIG_HOTPLUG_CPU
static int object_cpu_offline ( unsigned int cpu )
{
struct debug_percpu_free * percpu_pool ;
struct hlist_node * tmp ;
struct debug_obj * obj ;
/* Remote access is safe as the CPU is dead already */
percpu_pool = per_cpu_ptr ( & percpu_obj_pool , cpu ) ;
hlist_for_each_entry_safe ( obj , tmp , & percpu_pool - > free_objs , node ) {
hlist_del ( & obj - > node ) ;
kmem_cache_free ( obj_cache , obj ) ;
}
percpu_pool - > obj_free = 0 ;
return 0 ;
}
# endif
2008-04-30 11:55:01 +04:00
/*
* We run out of memory . That means we probably have tons of objects
* allocated .
*/
static void debug_objects_oom ( void )
{
struct debug_bucket * db = obj_hash ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
struct hlist_node * tmp ;
2008-09-01 01:39:21 +04:00
HLIST_HEAD ( freelist ) ;
2008-04-30 11:55:01 +04:00
struct debug_obj * obj ;
unsigned long flags ;
int i ;
2014-06-05 03:06:04 +04:00
pr_warn ( " Out of memory. ODEBUG disabled \n " ) ;
2008-04-30 11:55:01 +04:00
for ( i = 0 ; i < ODEBUG_HASH_SIZE ; i + + , db + + ) {
2009-11-17 20:11:28 +03:00
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
2008-09-01 01:39:21 +04:00
hlist_move_list ( & db - > list , & freelist ) ;
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2008-09-01 01:39:21 +04:00
/* Now free them */
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_safe ( obj , tmp , & freelist , node ) {
2008-04-30 11:55:01 +04:00
hlist_del ( & obj - > node ) ;
free_object ( obj ) ;
}
}
}
/*
* We use the pfn of the address for the hash . That way we can check
* for freed objects simply by checking the affected bucket .
*/
static struct debug_bucket * get_bucket ( unsigned long addr )
{
unsigned long hash ;
hash = hash_long ( ( addr > > ODEBUG_CHUNK_SHIFT ) , ODEBUG_HASH_BITS ) ;
return & obj_hash [ hash ] ;
}
static void debug_print_object ( struct debug_obj * obj , char * msg )
{
2020-08-15 03:40:26 +03:00
const struct debug_obj_descr * descr = obj - > descr ;
2008-04-30 11:55:01 +04:00
static int limit ;
2011-03-07 11:58:33 +03:00
if ( limit < 5 & & descr ! = descr_test ) {
void * hint = descr - > debug_hint ?
descr - > debug_hint ( obj - > object ) : NULL ;
2008-04-30 11:55:01 +04:00
limit + + ;
2010-04-17 16:48:38 +04:00
WARN ( 1 , KERN_ERR " ODEBUG: %s %s (active state %u) "
2011-03-07 11:58:33 +03:00
" object type: %s hint: %pS \n " ,
2010-04-17 16:48:38 +04:00
msg , obj_states [ obj - > state ] , obj - > astate ,
2011-03-07 11:58:33 +03:00
descr - > name , hint ) ;
2008-04-30 11:55:01 +04:00
}
debug_objects_warnings + + ;
}
/*
* Try to repair the damage , so we have a better chance to get useful
* debug output .
*/
2016-05-20 03:09:20 +03:00
static bool
debug_object_fixup ( bool ( * fixup ) ( void * addr , enum debug_obj_state state ) ,
2008-04-30 11:55:01 +04:00
void * addr , enum debug_obj_state state )
{
2016-05-20 03:09:20 +03:00
if ( fixup & & fixup ( addr , state ) ) {
debug_objects_fixups + + ;
return true ;
}
return false ;
2008-04-30 11:55:01 +04:00
}
static void debug_object_is_on_stack ( void * addr , int onstack )
{
int is_on_stack ;
static int limit ;
if ( limit > 4 )
return ;
2008-07-24 08:26:53 +04:00
is_on_stack = object_is_on_stack ( addr ) ;
2008-04-30 11:55:01 +04:00
if ( is_on_stack = = onstack )
return ;
limit + + ;
if ( is_on_stack )
2018-07-24 00:25:31 +03:00
pr_warn ( " object %p is on stack %p, but NOT annotated. \n " , addr ,
task_stack_page ( current ) ) ;
2008-04-30 11:55:01 +04:00
else
2018-07-24 00:25:31 +03:00
pr_warn ( " object %p is NOT on stack %p, but annotated. \n " , addr ,
task_stack_page ( current ) ) ;
2008-04-30 11:55:01 +04:00
WARN_ON ( 1 ) ;
}
static void
2020-08-15 03:40:26 +03:00
__debug_object_init ( void * addr , const struct debug_obj_descr * descr , int onstack )
2008-04-30 11:55:01 +04:00
{
enum debug_obj_state state ;
2019-05-20 17:14:50 +03:00
bool check_stack = false ;
2008-04-30 11:55:01 +04:00
struct debug_bucket * db ;
struct debug_obj * obj ;
unsigned long flags ;
2021-08-12 18:43:26 +03:00
/*
* On RT enabled kernels the pool refill must happen in preemptible
* context :
*/
if ( ! IS_ENABLED ( CONFIG_PREEMPT_RT ) | | preemptible ( ) )
fill_pool ( ) ;
2008-06-15 02:47:36 +04:00
2008-04-30 11:55:01 +04:00
db = get_bucket ( ( unsigned long ) addr ) ;
2009-11-17 20:11:28 +03:00
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
obj = lookup_object ( addr , db ) ;
if ( ! obj ) {
obj = alloc_object ( addr , db , descr ) ;
if ( ! obj ) {
debug_objects_enabled = 0 ;
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
debug_objects_oom ( ) ;
return ;
}
2019-05-20 17:14:50 +03:00
check_stack = true ;
2008-04-30 11:55:01 +04:00
}
switch ( obj - > state ) {
case ODEBUG_STATE_NONE :
case ODEBUG_STATE_INIT :
case ODEBUG_STATE_INACTIVE :
obj - > state = ODEBUG_STATE_INIT ;
break ;
case ODEBUG_STATE_ACTIVE :
state = obj - > state ;
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2019-05-20 17:14:50 +03:00
debug_print_object ( obj , " init " ) ;
2008-04-30 11:55:01 +04:00
debug_object_fixup ( descr - > fixup_init , addr , state ) ;
return ;
case ODEBUG_STATE_DESTROYED :
2019-05-20 17:14:50 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
debug_print_object ( obj , " init " ) ;
2019-05-20 17:14:50 +03:00
return ;
2008-04-30 11:55:01 +04:00
default :
break ;
}
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2019-05-20 17:14:50 +03:00
if ( check_stack )
debug_object_is_on_stack ( addr , onstack ) ;
2008-04-30 11:55:01 +04:00
}
/**
* debug_object_init - debug checks when an object is initialized
* @ addr : address of the object
* @ descr : pointer to an object specific debug description structure
*/
2020-08-15 03:40:26 +03:00
void debug_object_init ( void * addr , const struct debug_obj_descr * descr )
2008-04-30 11:55:01 +04:00
{
if ( ! debug_objects_enabled )
return ;
__debug_object_init ( addr , descr , 0 ) ;
}
2016-12-01 02:54:10 +03:00
EXPORT_SYMBOL_GPL ( debug_object_init ) ;
2008-04-30 11:55:01 +04:00
/**
* debug_object_init_on_stack - debug checks when an object on stack is
* initialized
* @ addr : address of the object
* @ descr : pointer to an object specific debug description structure
*/
2020-08-15 03:40:26 +03:00
void debug_object_init_on_stack ( void * addr , const struct debug_obj_descr * descr )
2008-04-30 11:55:01 +04:00
{
if ( ! debug_objects_enabled )
return ;
__debug_object_init ( addr , descr , 1 ) ;
}
2016-12-01 02:54:10 +03:00
EXPORT_SYMBOL_GPL ( debug_object_init_on_stack ) ;
2008-04-30 11:55:01 +04:00
/**
* debug_object_activate - debug checks when an object is activated
* @ addr : address of the object
* @ descr : pointer to an object specific debug description structure
2013-04-23 23:51:11 +04:00
* Returns 0 for success , - EINVAL for check failed .
2008-04-30 11:55:01 +04:00
*/
2020-08-15 03:40:26 +03:00
int debug_object_activate ( void * addr , const struct debug_obj_descr * descr )
2008-04-30 11:55:01 +04:00
{
enum debug_obj_state state ;
struct debug_bucket * db ;
struct debug_obj * obj ;
unsigned long flags ;
2013-04-23 23:51:11 +04:00
int ret ;
2011-11-08 07:48:26 +04:00
struct debug_obj o = { . object = addr ,
. state = ODEBUG_STATE_NOTAVAILABLE ,
. descr = descr } ;
2008-04-30 11:55:01 +04:00
if ( ! debug_objects_enabled )
2013-04-23 23:51:11 +04:00
return 0 ;
2008-04-30 11:55:01 +04:00
db = get_bucket ( ( unsigned long ) addr ) ;
2009-11-17 20:11:28 +03:00
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
obj = lookup_object ( addr , db ) ;
if ( obj ) {
2019-05-20 17:14:50 +03:00
bool print_object = false ;
2008-04-30 11:55:01 +04:00
switch ( obj - > state ) {
case ODEBUG_STATE_INIT :
case ODEBUG_STATE_INACTIVE :
obj - > state = ODEBUG_STATE_ACTIVE ;
2013-04-23 23:51:11 +04:00
ret = 0 ;
2008-04-30 11:55:01 +04:00
break ;
case ODEBUG_STATE_ACTIVE :
state = obj - > state ;
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2019-05-20 17:14:50 +03:00
debug_print_object ( obj , " activate " ) ;
2013-04-23 23:51:11 +04:00
ret = debug_object_fixup ( descr - > fixup_activate , addr , state ) ;
2016-05-20 03:09:23 +03:00
return ret ? 0 : - EINVAL ;
2008-04-30 11:55:01 +04:00
case ODEBUG_STATE_DESTROYED :
2019-05-20 17:14:50 +03:00
print_object = true ;
2013-04-23 23:51:11 +04:00
ret = - EINVAL ;
2008-04-30 11:55:01 +04:00
break ;
default :
2013-04-23 23:51:11 +04:00
ret = 0 ;
2008-04-30 11:55:01 +04:00
break ;
}
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2019-05-20 17:14:50 +03:00
if ( print_object )
debug_print_object ( obj , " activate " ) ;
2013-04-23 23:51:11 +04:00
return ret ;
2008-04-30 11:55:01 +04:00
}
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2019-05-20 17:14:50 +03:00
2008-04-30 11:55:01 +04:00
/*
2016-05-20 03:09:41 +03:00
* We are here when a static object is activated . We
* let the type specific code confirm whether this is
* true or not . if true , we just make sure that the
* static object is tracked in the object tracker . If
* not , this must be a bug , so we try to fix it up .
2008-04-30 11:55:01 +04:00
*/
2016-05-20 03:09:41 +03:00
if ( descr - > is_static_object & & descr - > is_static_object ( addr ) ) {
/* track this static object */
debug_object_init ( addr , descr ) ;
debug_object_activate ( addr , descr ) ;
} else {
2011-11-08 07:48:26 +04:00
debug_print_object ( & o , " activate " ) ;
2016-05-20 03:09:41 +03:00
ret = debug_object_fixup ( descr - > fixup_activate , addr ,
ODEBUG_STATE_NOTAVAILABLE ) ;
return ret ? 0 : - EINVAL ;
2013-04-23 23:51:11 +04:00
}
return 0 ;
2008-04-30 11:55:01 +04:00
}
2016-12-01 02:54:10 +03:00
EXPORT_SYMBOL_GPL ( debug_object_activate ) ;
2008-04-30 11:55:01 +04:00
/**
* debug_object_deactivate - debug checks when an object is deactivated
* @ addr : address of the object
* @ descr : pointer to an object specific debug description structure
*/
2020-08-15 03:40:26 +03:00
void debug_object_deactivate ( void * addr , const struct debug_obj_descr * descr )
2008-04-30 11:55:01 +04:00
{
struct debug_bucket * db ;
struct debug_obj * obj ;
unsigned long flags ;
2019-05-20 17:14:50 +03:00
bool print_object = false ;
2008-04-30 11:55:01 +04:00
if ( ! debug_objects_enabled )
return ;
db = get_bucket ( ( unsigned long ) addr ) ;
2009-11-17 20:11:28 +03:00
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
obj = lookup_object ( addr , db ) ;
if ( obj ) {
switch ( obj - > state ) {
case ODEBUG_STATE_INIT :
case ODEBUG_STATE_INACTIVE :
case ODEBUG_STATE_ACTIVE :
2010-04-17 16:48:38 +04:00
if ( ! obj - > astate )
obj - > state = ODEBUG_STATE_INACTIVE ;
else
2019-05-20 17:14:50 +03:00
print_object = true ;
2008-04-30 11:55:01 +04:00
break ;
case ODEBUG_STATE_DESTROYED :
2019-05-20 17:14:50 +03:00
print_object = true ;
2008-04-30 11:55:01 +04:00
break ;
default :
break ;
}
2019-05-20 17:14:50 +03:00
}
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
if ( ! obj ) {
2008-04-30 11:55:01 +04:00
struct debug_obj o = { . object = addr ,
. state = ODEBUG_STATE_NOTAVAILABLE ,
. descr = descr } ;
debug_print_object ( & o , " deactivate " ) ;
2019-05-20 17:14:50 +03:00
} else if ( print_object ) {
debug_print_object ( obj , " deactivate " ) ;
2008-04-30 11:55:01 +04:00
}
}
2016-12-01 02:54:10 +03:00
EXPORT_SYMBOL_GPL ( debug_object_deactivate ) ;
2008-04-30 11:55:01 +04:00
/**
* debug_object_destroy - debug checks when an object is destroyed
* @ addr : address of the object
* @ descr : pointer to an object specific debug description structure
*/
2020-08-15 03:40:26 +03:00
void debug_object_destroy ( void * addr , const struct debug_obj_descr * descr )
2008-04-30 11:55:01 +04:00
{
enum debug_obj_state state ;
struct debug_bucket * db ;
struct debug_obj * obj ;
unsigned long flags ;
2019-05-20 17:14:50 +03:00
bool print_object = false ;
2008-04-30 11:55:01 +04:00
if ( ! debug_objects_enabled )
return ;
db = get_bucket ( ( unsigned long ) addr ) ;
2009-11-17 20:11:28 +03:00
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
obj = lookup_object ( addr , db ) ;
if ( ! obj )
goto out_unlock ;
switch ( obj - > state ) {
case ODEBUG_STATE_NONE :
case ODEBUG_STATE_INIT :
case ODEBUG_STATE_INACTIVE :
obj - > state = ODEBUG_STATE_DESTROYED ;
break ;
case ODEBUG_STATE_ACTIVE :
state = obj - > state ;
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2019-05-20 17:14:50 +03:00
debug_print_object ( obj , " destroy " ) ;
2008-04-30 11:55:01 +04:00
debug_object_fixup ( descr - > fixup_destroy , addr , state ) ;
return ;
case ODEBUG_STATE_DESTROYED :
2019-05-20 17:14:50 +03:00
print_object = true ;
2008-04-30 11:55:01 +04:00
break ;
default :
break ;
}
out_unlock :
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2019-05-20 17:14:50 +03:00
if ( print_object )
debug_print_object ( obj , " destroy " ) ;
2008-04-30 11:55:01 +04:00
}
2016-12-01 02:54:10 +03:00
EXPORT_SYMBOL_GPL ( debug_object_destroy ) ;
2008-04-30 11:55:01 +04:00
/**
* debug_object_free - debug checks when an object is freed
* @ addr : address of the object
* @ descr : pointer to an object specific debug description structure
*/
2020-08-15 03:40:26 +03:00
void debug_object_free ( void * addr , const struct debug_obj_descr * descr )
2008-04-30 11:55:01 +04:00
{
enum debug_obj_state state ;
struct debug_bucket * db ;
struct debug_obj * obj ;
unsigned long flags ;
if ( ! debug_objects_enabled )
return ;
db = get_bucket ( ( unsigned long ) addr ) ;
2009-11-17 20:11:28 +03:00
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
obj = lookup_object ( addr , db ) ;
if ( ! obj )
goto out_unlock ;
switch ( obj - > state ) {
case ODEBUG_STATE_ACTIVE :
state = obj - > state ;
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2019-05-20 17:14:50 +03:00
debug_print_object ( obj , " free " ) ;
2008-04-30 11:55:01 +04:00
debug_object_fixup ( descr - > fixup_free , addr , state ) ;
return ;
default :
hlist_del ( & obj - > node ) ;
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
free_object ( obj ) ;
2008-09-01 01:39:21 +04:00
return ;
2008-04-30 11:55:01 +04:00
}
out_unlock :
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
}
2016-12-01 02:54:10 +03:00
EXPORT_SYMBOL_GPL ( debug_object_free ) ;
2008-04-30 11:55:01 +04:00
2011-11-08 07:48:27 +04:00
/**
* debug_object_assert_init - debug checks when object should be init - ed
* @ addr : address of the object
* @ descr : pointer to an object specific debug description structure
*/
2020-08-15 03:40:26 +03:00
void debug_object_assert_init ( void * addr , const struct debug_obj_descr * descr )
2011-11-08 07:48:27 +04:00
{
struct debug_bucket * db ;
struct debug_obj * obj ;
unsigned long flags ;
if ( ! debug_objects_enabled )
return ;
db = get_bucket ( ( unsigned long ) addr ) ;
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
obj = lookup_object ( addr , db ) ;
if ( ! obj ) {
struct debug_obj o = { . object = addr ,
. state = ODEBUG_STATE_NOTAVAILABLE ,
. descr = descr } ;
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
/*
2016-05-20 03:09:41 +03:00
* Maybe the object is static , and we let the type specific
* code confirm . Track this static object if true , else invoke
* fixup .
2011-11-08 07:48:27 +04:00
*/
2016-05-20 03:09:41 +03:00
if ( descr - > is_static_object & & descr - > is_static_object ( addr ) ) {
/* Track this static object */
debug_object_init ( addr , descr ) ;
} else {
2011-11-08 07:48:27 +04:00
debug_print_object ( & o , " assert_init " ) ;
2016-05-20 03:09:41 +03:00
debug_object_fixup ( descr - > fixup_assert_init , addr ,
ODEBUG_STATE_NOTAVAILABLE ) ;
}
2011-11-08 07:48:27 +04:00
return ;
}
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
}
2016-12-01 02:54:10 +03:00
EXPORT_SYMBOL_GPL ( debug_object_assert_init ) ;
2011-11-08 07:48:27 +04:00
2010-04-17 16:48:38 +04:00
/**
* debug_object_active_state - debug checks object usage state machine
* @ addr : address of the object
* @ descr : pointer to an object specific debug description structure
* @ expect : expected state
* @ next : state to move to if expected state is found
*/
void
2020-08-15 03:40:26 +03:00
debug_object_active_state ( void * addr , const struct debug_obj_descr * descr ,
2010-04-17 16:48:38 +04:00
unsigned int expect , unsigned int next )
{
struct debug_bucket * db ;
struct debug_obj * obj ;
unsigned long flags ;
2019-05-20 17:14:50 +03:00
bool print_object = false ;
2010-04-17 16:48:38 +04:00
if ( ! debug_objects_enabled )
return ;
db = get_bucket ( ( unsigned long ) addr ) ;
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
obj = lookup_object ( addr , db ) ;
if ( obj ) {
switch ( obj - > state ) {
case ODEBUG_STATE_ACTIVE :
if ( obj - > astate = = expect )
obj - > astate = next ;
else
2019-05-20 17:14:50 +03:00
print_object = true ;
2010-04-17 16:48:38 +04:00
break ;
default :
2019-05-20 17:14:50 +03:00
print_object = true ;
2010-04-17 16:48:38 +04:00
break ;
}
2019-05-20 17:14:50 +03:00
}
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
if ( ! obj ) {
2010-04-17 16:48:38 +04:00
struct debug_obj o = { . object = addr ,
. state = ODEBUG_STATE_NOTAVAILABLE ,
. descr = descr } ;
debug_print_object ( & o , " active_state " ) ;
2019-05-20 17:14:50 +03:00
} else if ( print_object ) {
debug_print_object ( obj , " active_state " ) ;
2010-04-17 16:48:38 +04:00
}
}
2016-12-01 02:54:10 +03:00
EXPORT_SYMBOL_GPL ( debug_object_active_state ) ;
2010-04-17 16:48:38 +04:00
2008-04-30 11:55:01 +04:00
# ifdef CONFIG_DEBUG_OBJECTS_FREE
static void __debug_check_no_obj_freed ( const void * address , unsigned long size )
{
unsigned long flags , oaddr , saddr , eaddr , paddr , chunks ;
2020-08-15 03:40:26 +03:00
const struct debug_obj_descr * descr ;
2008-04-30 11:55:01 +04:00
enum debug_obj_state state ;
struct debug_bucket * db ;
2018-02-06 02:18:28 +03:00
struct hlist_node * tmp ;
2008-04-30 11:55:01 +04:00
struct debug_obj * obj ;
2018-02-06 02:18:25 +03:00
int cnt , objs_checked = 0 ;
2008-04-30 11:55:01 +04:00
saddr = ( unsigned long ) address ;
eaddr = saddr + size ;
paddr = saddr & ODEBUG_CHUNK_MASK ;
chunks = ( ( eaddr - paddr ) + ( ODEBUG_CHUNK_SIZE - 1 ) ) ;
chunks > > = ODEBUG_CHUNK_SHIFT ;
for ( ; chunks > 0 ; chunks - - , paddr + = ODEBUG_CHUNK_SIZE ) {
db = get_bucket ( paddr ) ;
repeat :
cnt = 0 ;
2009-11-17 20:11:28 +03:00
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_safe ( obj , tmp , & db - > list , node ) {
2008-04-30 11:55:01 +04:00
cnt + + ;
oaddr = ( unsigned long ) obj - > object ;
if ( oaddr < saddr | | oaddr > = eaddr )
continue ;
switch ( obj - > state ) {
case ODEBUG_STATE_ACTIVE :
descr = obj - > descr ;
state = obj - > state ;
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2019-05-20 17:14:50 +03:00
debug_print_object ( obj , " free " ) ;
2008-04-30 11:55:01 +04:00
debug_object_fixup ( descr - > fixup_free ,
( void * ) oaddr , state ) ;
goto repeat ;
default :
hlist_del ( & obj - > node ) ;
2019-05-20 17:14:49 +03:00
__free_object ( obj ) ;
2008-04-30 11:55:01 +04:00
break ;
}
}
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2008-09-01 01:39:21 +04:00
2008-04-30 11:55:01 +04:00
if ( cnt > debug_objects_maxchain )
debug_objects_maxchain = cnt ;
2018-02-06 02:18:25 +03:00
objs_checked + = cnt ;
2008-04-30 11:55:01 +04:00
}
2018-02-06 02:18:25 +03:00
if ( objs_checked > debug_objects_maxchecked )
debug_objects_maxchecked = objs_checked ;
2018-02-06 02:18:28 +03:00
/* Schedule work to actually kmem_cache_free() objects */
2020-01-16 21:55:29 +03:00
if ( ! READ_ONCE ( obj_freeing ) & & READ_ONCE ( obj_nr_tofree ) ) {
2019-05-20 17:14:49 +03:00
WRITE_ONCE ( obj_freeing , true ) ;
schedule_delayed_work ( & debug_obj_work , ODEBUG_FREE_WORK_DELAY ) ;
}
2008-04-30 11:55:01 +04:00
}
void debug_check_no_obj_freed ( const void * address , unsigned long size )
{
if ( debug_objects_enabled )
__debug_check_no_obj_freed ( address , size ) ;
}
# endif
# ifdef CONFIG_DEBUG_FS
static int debug_stats_show ( struct seq_file * m , void * v )
{
2019-05-20 17:14:46 +03:00
int cpu , obj_percpu_free = 0 ;
for_each_possible_cpu ( cpu )
obj_percpu_free + = per_cpu ( percpu_obj_pool . obj_free , cpu ) ;
2008-04-30 11:55:01 +04:00
seq_printf ( m , " max_chain :%d \n " , debug_objects_maxchain ) ;
2018-02-06 02:18:25 +03:00
seq_printf ( m , " max_checked :%d \n " , debug_objects_maxchecked ) ;
2008-04-30 11:55:01 +04:00
seq_printf ( m , " warnings :%d \n " , debug_objects_warnings ) ;
seq_printf ( m , " fixups :%d \n " , debug_objects_fixups ) ;
2020-01-16 21:55:29 +03:00
seq_printf ( m , " pool_free :%d \n " , READ_ONCE ( obj_pool_free ) + obj_percpu_free ) ;
2019-05-20 17:14:46 +03:00
seq_printf ( m , " pool_pcp_free :%d \n " , obj_percpu_free ) ;
2008-04-30 11:55:01 +04:00
seq_printf ( m , " pool_min_free :%d \n " , obj_pool_min_free ) ;
2019-05-20 17:14:46 +03:00
seq_printf ( m , " pool_used :%d \n " , obj_pool_used - obj_percpu_free ) ;
2008-04-30 11:55:01 +04:00
seq_printf ( m , " pool_max_used :%d \n " , obj_pool_max_used ) ;
2020-01-16 21:55:29 +03:00
seq_printf ( m , " on_free_list :%d \n " , READ_ONCE ( obj_nr_tofree ) ) ;
2017-02-08 00:40:30 +03:00
seq_printf ( m , " objs_allocated:%d \n " , debug_objects_allocated ) ;
seq_printf ( m , " objs_freed :%d \n " , debug_objects_freed ) ;
2008-04-30 11:55:01 +04:00
return 0 ;
}
2020-07-16 11:47:47 +03:00
DEFINE_SHOW_ATTRIBUTE ( debug_stats ) ;
2008-04-30 11:55:01 +04:00
static int __init debug_objects_init_debugfs ( void )
{
2019-06-12 18:35:13 +03:00
struct dentry * dbgdir ;
2008-04-30 11:55:01 +04:00
if ( ! debug_objects_enabled )
return 0 ;
dbgdir = debugfs_create_dir ( " debug_objects " , NULL ) ;
2019-06-12 18:35:13 +03:00
debugfs_create_file ( " stats " , 0444 , dbgdir , NULL , & debug_stats_fops ) ;
2008-04-30 11:55:01 +04:00
return 0 ;
}
__initcall ( debug_objects_init_debugfs ) ;
# else
static inline void debug_objects_init_debugfs ( void ) { }
# endif
# ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
/* Random data structure for the self test */
struct self_test {
unsigned long dummy1 [ 6 ] ;
int static_init ;
unsigned long dummy2 [ 3 ] ;
} ;
2020-08-15 03:40:26 +03:00
static __initconst const struct debug_obj_descr descr_type_test ;
2008-04-30 11:55:01 +04:00
2016-05-20 03:09:41 +03:00
static bool __init is_static_object ( void * addr )
{
struct self_test * obj = addr ;
return obj - > static_init ;
}
2008-04-30 11:55:01 +04:00
/*
* fixup_init is called when :
* - an active object is initialized
*/
2016-05-20 03:09:20 +03:00
static bool __init fixup_init ( void * addr , enum debug_obj_state state )
2008-04-30 11:55:01 +04:00
{
struct self_test * obj = addr ;
switch ( state ) {
case ODEBUG_STATE_ACTIVE :
debug_object_deactivate ( obj , & descr_type_test ) ;
debug_object_init ( obj , & descr_type_test ) ;
2016-05-20 03:09:20 +03:00
return true ;
2008-04-30 11:55:01 +04:00
default :
2016-05-20 03:09:20 +03:00
return false ;
2008-04-30 11:55:01 +04:00
}
}
/*
* fixup_activate is called when :
* - an active object is activated
2016-05-20 03:09:41 +03:00
* - an unknown non - static object is activated
2008-04-30 11:55:01 +04:00
*/
2016-05-20 03:09:20 +03:00
static bool __init fixup_activate ( void * addr , enum debug_obj_state state )
2008-04-30 11:55:01 +04:00
{
struct self_test * obj = addr ;
switch ( state ) {
case ODEBUG_STATE_NOTAVAILABLE :
2016-05-20 03:09:20 +03:00
return true ;
2008-04-30 11:55:01 +04:00
case ODEBUG_STATE_ACTIVE :
debug_object_deactivate ( obj , & descr_type_test ) ;
debug_object_activate ( obj , & descr_type_test ) ;
2016-05-20 03:09:20 +03:00
return true ;
2008-04-30 11:55:01 +04:00
default :
2016-05-20 03:09:20 +03:00
return false ;
2008-04-30 11:55:01 +04:00
}
}
/*
* fixup_destroy is called when :
* - an active object is destroyed
*/
2016-05-20 03:09:20 +03:00
static bool __init fixup_destroy ( void * addr , enum debug_obj_state state )
2008-04-30 11:55:01 +04:00
{
struct self_test * obj = addr ;
switch ( state ) {
case ODEBUG_STATE_ACTIVE :
debug_object_deactivate ( obj , & descr_type_test ) ;
debug_object_destroy ( obj , & descr_type_test ) ;
2016-05-20 03:09:20 +03:00
return true ;
2008-04-30 11:55:01 +04:00
default :
2016-05-20 03:09:20 +03:00
return false ;
2008-04-30 11:55:01 +04:00
}
}
/*
* fixup_free is called when :
* - an active object is freed
*/
2016-05-20 03:09:20 +03:00
static bool __init fixup_free ( void * addr , enum debug_obj_state state )
2008-04-30 11:55:01 +04:00
{
struct self_test * obj = addr ;
switch ( state ) {
case ODEBUG_STATE_ACTIVE :
debug_object_deactivate ( obj , & descr_type_test ) ;
debug_object_free ( obj , & descr_type_test ) ;
2016-05-20 03:09:20 +03:00
return true ;
2008-04-30 11:55:01 +04:00
default :
2016-05-20 03:09:20 +03:00
return false ;
2008-04-30 11:55:01 +04:00
}
}
2010-03-26 22:38:35 +03:00
static int __init
2008-04-30 11:55:01 +04:00
check_results ( void * addr , enum debug_obj_state state , int fixups , int warnings )
{
struct debug_bucket * db ;
struct debug_obj * obj ;
unsigned long flags ;
int res = - EINVAL ;
db = get_bucket ( ( unsigned long ) addr ) ;
2009-11-17 20:11:28 +03:00
raw_spin_lock_irqsave ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
obj = lookup_object ( addr , db ) ;
if ( ! obj & & state ! = ODEBUG_STATE_NONE ) {
2008-07-26 06:45:39 +04:00
WARN ( 1 , KERN_ERR " ODEBUG: selftest object not found \n " ) ;
2008-04-30 11:55:01 +04:00
goto out ;
}
if ( obj & & obj - > state ! = state ) {
2008-07-26 06:45:39 +04:00
WARN ( 1 , KERN_ERR " ODEBUG: selftest wrong state: %d != %d \n " ,
2008-04-30 11:55:01 +04:00
obj - > state , state ) ;
goto out ;
}
if ( fixups ! = debug_objects_fixups ) {
2008-07-26 06:45:39 +04:00
WARN ( 1 , KERN_ERR " ODEBUG: selftest fixups failed %d != %d \n " ,
2008-04-30 11:55:01 +04:00
fixups , debug_objects_fixups ) ;
goto out ;
}
if ( warnings ! = debug_objects_warnings ) {
2008-07-26 06:45:39 +04:00
WARN ( 1 , KERN_ERR " ODEBUG: selftest warnings failed %d != %d \n " ,
2008-04-30 11:55:01 +04:00
warnings , debug_objects_warnings ) ;
goto out ;
}
res = 0 ;
out :
2009-11-17 20:11:28 +03:00
raw_spin_unlock_irqrestore ( & db - > lock , flags ) ;
2008-04-30 11:55:01 +04:00
if ( res )
debug_objects_enabled = 0 ;
return res ;
}
2020-08-15 03:40:26 +03:00
static __initconst const struct debug_obj_descr descr_type_test = {
2008-04-30 11:55:01 +04:00
. name = " selftest " ,
2016-05-20 03:09:41 +03:00
. is_static_object = is_static_object ,
2008-04-30 11:55:01 +04:00
. fixup_init = fixup_init ,
. fixup_activate = fixup_activate ,
. fixup_destroy = fixup_destroy ,
. fixup_free = fixup_free ,
} ;
static __initdata struct self_test obj = { . static_init = 0 } ;
static void __init debug_objects_selftest ( void )
{
int fixups , oldfixups , warnings , oldwarnings ;
unsigned long flags ;
local_irq_save ( flags ) ;
fixups = oldfixups = debug_objects_fixups ;
warnings = oldwarnings = debug_objects_warnings ;
descr_test = & descr_type_test ;
debug_object_init ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_INIT , fixups , warnings ) )
goto out ;
debug_object_activate ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_ACTIVE , fixups , warnings ) )
goto out ;
debug_object_activate ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_ACTIVE , + + fixups , + + warnings ) )
goto out ;
debug_object_deactivate ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_INACTIVE , fixups , warnings ) )
goto out ;
debug_object_destroy ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_DESTROYED , fixups , warnings ) )
goto out ;
debug_object_init ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_DESTROYED , fixups , + + warnings ) )
goto out ;
debug_object_activate ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_DESTROYED , fixups , + + warnings ) )
goto out ;
debug_object_deactivate ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_DESTROYED , fixups , + + warnings ) )
goto out ;
debug_object_free ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_NONE , fixups , warnings ) )
goto out ;
obj . static_init = 1 ;
debug_object_activate ( & obj , & descr_type_test ) ;
2012-03-06 02:59:17 +04:00
if ( check_results ( & obj , ODEBUG_STATE_ACTIVE , fixups , warnings ) )
2008-04-30 11:55:01 +04:00
goto out ;
debug_object_init ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_INIT , + + fixups , + + warnings ) )
goto out ;
debug_object_free ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_NONE , fixups , warnings ) )
goto out ;
# ifdef CONFIG_DEBUG_OBJECTS_FREE
debug_object_init ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_INIT , fixups , warnings ) )
goto out ;
debug_object_activate ( & obj , & descr_type_test ) ;
if ( check_results ( & obj , ODEBUG_STATE_ACTIVE , fixups , warnings ) )
goto out ;
__debug_check_no_obj_freed ( & obj , sizeof ( obj ) ) ;
if ( check_results ( & obj , ODEBUG_STATE_NONE , + + fixups , + + warnings ) )
goto out ;
# endif
2014-06-05 03:06:04 +04:00
pr_info ( " selftest passed \n " ) ;
2008-04-30 11:55:01 +04:00
out :
debug_objects_fixups = oldfixups ;
debug_objects_warnings = oldwarnings ;
descr_test = NULL ;
local_irq_restore ( flags ) ;
}
# else
static inline void debug_objects_selftest ( void ) { }
# endif
/*
* Called during early boot to initialize the hash buckets and link
* the static object pool objects into the poll list . After this call
* the object tracker is fully operational .
*/
void __init debug_objects_early_init ( void )
{
int i ;
for ( i = 0 ; i < ODEBUG_HASH_SIZE ; i + + )
2009-11-17 20:11:28 +03:00
raw_spin_lock_init ( & obj_hash [ i ] . lock ) ;
2008-04-30 11:55:01 +04:00
for ( i = 0 ; i < ODEBUG_POOL_SIZE ; i + + )
hlist_add_head ( & obj_static_pool [ i ] . node , & obj_pool ) ;
}
2009-03-16 20:53:18 +03:00
/*
* Convert the statically allocated objects to dynamic ones :
*/
2010-03-26 22:38:35 +03:00
static int __init debug_objects_replace_static_objects ( void )
2009-03-16 20:53:18 +03:00
{
struct debug_bucket * db = obj_hash ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
struct hlist_node * tmp ;
2009-03-16 20:53:18 +03:00
struct debug_obj * obj , * new ;
HLIST_HEAD ( objects ) ;
int i , cnt = 0 ;
for ( i = 0 ; i < ODEBUG_POOL_SIZE ; i + + ) {
obj = kmem_cache_zalloc ( obj_cache , GFP_KERNEL ) ;
if ( ! obj )
goto free ;
hlist_add_head ( & obj - > node , & objects ) ;
}
/*
2018-12-28 11:32:32 +03:00
* debug_objects_mem_init ( ) is now called early that only one CPU is up
* and interrupts have been disabled , so it is safe to replace the
* active object references .
2009-03-16 20:53:18 +03:00
*/
/* Remove the statically allocated objects from the pool */
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_safe ( obj , tmp , & obj_pool , node )
2009-03-16 20:53:18 +03:00
hlist_del ( & obj - > node ) ;
/* Move the allocated objects to the pool */
hlist_move_list ( & objects , & obj_pool ) ;
/* Replace the active object references */
for ( i = 0 ; i < ODEBUG_HASH_SIZE ; i + + , db + + ) {
hlist_move_list ( & db - > list , & objects ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry ( obj , & objects , node ) {
2009-03-16 20:53:18 +03:00
new = hlist_entry ( obj_pool . first , typeof ( * obj ) , node ) ;
hlist_del ( & new - > node ) ;
/* copy object data */
* new = * obj ;
hlist_add_head ( & new - > node , & db - > list ) ;
cnt + + ;
}
}
2014-06-05 03:06:05 +04:00
pr_debug ( " %d of %d active objects replaced \n " ,
cnt , obj_pool_used ) ;
2009-03-16 20:53:18 +03:00
return 0 ;
free :
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry_safe ( obj , tmp , & objects , node ) {
2009-03-16 20:53:18 +03:00
hlist_del ( & obj - > node ) ;
kmem_cache_free ( obj_cache , obj ) ;
}
return - ENOMEM ;
}
2008-04-30 11:55:01 +04:00
/*
* Called after the kmem_caches are functional to setup a dedicated
* cache pool , which has the SLAB_DEBUG_OBJECTS flag set . This flag
* prevents that the debug code is called on kmem_cache_free ( ) for the
* debug tracker objects to avoid recursive calls .
*/
void __init debug_objects_mem_init ( void )
{
2019-05-20 17:14:47 +03:00
int cpu , extras ;
2019-05-20 17:14:46 +03:00
2008-04-30 11:55:01 +04:00
if ( ! debug_objects_enabled )
return ;
2019-05-20 17:14:46 +03:00
/*
* Initialize the percpu object pools
*
* Initialization is not strictly necessary , but was done for
* completeness .
*/
for_each_possible_cpu ( cpu )
INIT_HLIST_HEAD ( & per_cpu ( percpu_obj_pool . free_objs , cpu ) ) ;
2008-04-30 11:55:01 +04:00
obj_cache = kmem_cache_create ( " debug_objects_cache " ,
sizeof ( struct debug_obj ) , 0 ,
2018-12-01 01:09:48 +03:00
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE ,
NULL ) ;
2008-04-30 11:55:01 +04:00
2009-03-16 20:53:18 +03:00
if ( ! obj_cache | | debug_objects_replace_static_objects ( ) ) {
2008-04-30 11:55:01 +04:00
debug_objects_enabled = 0 ;
2018-07-31 19:24:58 +03:00
kmem_cache_destroy ( obj_cache ) ;
2014-06-05 03:06:04 +04:00
pr_warn ( " out of memory. \n " ) ;
2009-03-16 20:53:18 +03:00
} else
2008-04-30 11:55:01 +04:00
debug_objects_selftest ( ) ;
2019-05-20 17:14:47 +03:00
2020-09-08 09:27:09 +03:00
# ifdef CONFIG_HOTPLUG_CPU
cpuhp_setup_state_nocalls ( CPUHP_DEBUG_OBJ_DEAD , " object:offline " , NULL ,
object_cpu_offline ) ;
# endif
2019-05-20 17:14:47 +03:00
/*
* Increase the thresholds for allocating and freeing objects
* according to the number of possible CPUs available in the system .
*/
extras = num_possible_cpus ( ) * ODEBUG_BATCH_SIZE ;
debug_objects_pool_size + = extras ;
debug_objects_pool_min_level + = extras ;
2008-04-30 11:55:01 +04:00
}