2006-07-03 11:24:52 +04:00
/*
* kernel / lockdep_proc . c
*
* Runtime locking correctness validator
*
* Started by Ingo Molnar :
*
2007-07-19 12:48:59 +04:00
* Copyright ( C ) 2006 , 2007 Red Hat , Inc . , Ingo Molnar < mingo @ redhat . com >
* Copyright ( C ) 2007 Red Hat , Inc . , Peter Zijlstra < pzijlstr @ redhat . com >
2006-07-03 11:24:52 +04:00
*
* Code for / proc / lockdep and / proc / lockdep_stats :
*
*/
# include <linux/module.h>
# include <linux/proc_fs.h>
# include <linux/seq_file.h>
# include <linux/kallsyms.h>
# include <linux/debug_locks.h>
2007-07-19 12:48:57 +04:00
# include <linux/vmalloc.h>
# include <linux/sort.h>
# include <asm/uaccess.h>
# include <asm/div64.h>
2006-07-03 11:24:52 +04:00
# include "lockdep_internals.h"
static void * l_next ( struct seq_file * m , void * v , loff_t * pos )
{
2007-10-12 00:11:11 +04:00
struct lock_class * class ;
2006-07-03 11:24:52 +04:00
( * pos ) + + ;
2007-10-12 00:11:11 +04:00
if ( v = = SEQ_START_TOKEN )
class = m - > private ;
else {
class = v ;
if ( class - > lock_entry . next ! = & all_lock_classes )
class = list_entry ( class - > lock_entry . next ,
struct lock_class , lock_entry ) ;
else
class = NULL ;
}
2006-07-03 11:24:52 +04:00
return class ;
}
static void * l_start ( struct seq_file * m , loff_t * pos )
{
2007-10-12 00:11:11 +04:00
struct lock_class * class ;
loff_t i = 0 ;
2006-07-03 11:24:52 +04:00
2007-10-12 00:11:11 +04:00
if ( * pos = = 0 )
return SEQ_START_TOKEN ;
2006-07-03 11:24:52 +04:00
2007-10-12 00:11:11 +04:00
list_for_each_entry ( class , & all_lock_classes , lock_entry ) {
if ( + + i = = * pos )
return class ;
}
return NULL ;
2006-07-03 11:24:52 +04:00
}
static void l_stop ( struct seq_file * m , void * v )
{
}
2007-02-10 12:44:59 +03:00
static void print_name ( struct seq_file * m , struct lock_class * class )
{
char str [ 128 ] ;
const char * name = class - > name ;
if ( ! name ) {
name = __get_key_name ( class - > key , str ) ;
seq_printf ( m , " %s " , name ) ;
} else {
seq_printf ( m , " %s " , name ) ;
if ( class - > name_version > 1 )
seq_printf ( m , " #%d " , class - > name_version ) ;
if ( class - > subclass )
seq_printf ( m , " /%d " , class - > subclass ) ;
}
}
2006-07-03 11:24:52 +04:00
static int l_show ( struct seq_file * m , void * v )
{
2007-10-12 00:11:11 +04:00
struct lock_class * class = v ;
2007-02-10 12:44:59 +03:00
struct lock_list * entry ;
lockdep: annotate reclaim context (__GFP_NOFS)
Here is another version, with the incremental patch rolled up, and
added reclaim context annotation to kswapd, and allocation tracing
to slab allocators (which may only ever reach the page allocator
in rare cases, so it is good to put annotations here too).
Haven't tested this version as such, but it should be getting closer
to merge worthy ;)
--
After noticing some code in mm/filemap.c accidentally perform a __GFP_FS
allocation when it should not have been, I thought it might be a good idea to
try to catch this kind of thing with lockdep.
I coded up a little idea that seems to work. Unfortunately the system has to
actually be in __GFP_FS page reclaim, then take the lock, before it will mark
it. But at least that might still be some orders of magnitude more common
(and more debuggable) than an actual deadlock condition, so we have some
improvement I hope (the concept is no less complete than discovery of a lock's
interrupt contexts).
I guess we could even do the same thing with __GFP_IO (normal reclaim), and
even GFP_NOIO locks too... but filesystems will have the most locks and fiddly
code paths, so let's start there and see how it goes.
It *seems* to work. I did a quick test.
=================================
[ INFO: inconsistent lock state ]
2.6.28-rc6-00007-ged31348-dirty #26
---------------------------------
inconsistent {in-reclaim-W} -> {ov-reclaim-W} usage.
modprobe/8526 [HC0[0]:SC0[0]:HE1:SE1] takes:
(testlock){--..}, at: [<ffffffffa0020055>] brd_init+0x55/0x216 [brd]
{in-reclaim-W} state was registered at:
[<ffffffff80267bdb>] __lock_acquire+0x75b/0x1a60
[<ffffffff80268f71>] lock_acquire+0x91/0xc0
[<ffffffff8070f0e1>] mutex_lock_nested+0xb1/0x310
[<ffffffffa002002b>] brd_init+0x2b/0x216 [brd]
[<ffffffff8020903b>] _stext+0x3b/0x170
[<ffffffff80272ebf>] sys_init_module+0xaf/0x1e0
[<ffffffff8020c3fb>] system_call_fastpath+0x16/0x1b
[<ffffffffffffffff>] 0xffffffffffffffff
irq event stamp: 3929
hardirqs last enabled at (3929): [<ffffffff8070f2b5>] mutex_lock_nested+0x285/0x310
hardirqs last disabled at (3928): [<ffffffff8070f089>] mutex_lock_nested+0x59/0x310
softirqs last enabled at (3732): [<ffffffff8061f623>] sk_filter+0x83/0xe0
softirqs last disabled at (3730): [<ffffffff8061f5b6>] sk_filter+0x16/0xe0
other info that might help us debug this:
1 lock held by modprobe/8526:
#0: (testlock){--..}, at: [<ffffffffa0020055>] brd_init+0x55/0x216 [brd]
stack backtrace:
Pid: 8526, comm: modprobe Not tainted 2.6.28-rc6-00007-ged31348-dirty #26
Call Trace:
[<ffffffff80265483>] print_usage_bug+0x193/0x1d0
[<ffffffff80266530>] mark_lock+0xaf0/0xca0
[<ffffffff80266735>] mark_held_locks+0x55/0xc0
[<ffffffffa0020000>] ? brd_init+0x0/0x216 [brd]
[<ffffffff802667ca>] trace_reclaim_fs+0x2a/0x60
[<ffffffff80285005>] __alloc_pages_internal+0x475/0x580
[<ffffffff8070f29e>] ? mutex_lock_nested+0x26e/0x310
[<ffffffffa0020000>] ? brd_init+0x0/0x216 [brd]
[<ffffffffa002006a>] brd_init+0x6a/0x216 [brd]
[<ffffffffa0020000>] ? brd_init+0x0/0x216 [brd]
[<ffffffff8020903b>] _stext+0x3b/0x170
[<ffffffff8070f8b9>] ? mutex_unlock+0x9/0x10
[<ffffffff8070f83d>] ? __mutex_unlock_slowpath+0x10d/0x180
[<ffffffff802669ec>] ? trace_hardirqs_on_caller+0x12c/0x190
[<ffffffff80272ebf>] sys_init_module+0xaf/0x1e0
[<ffffffff8020c3fb>] system_call_fastpath+0x16/0x1b
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-01-21 10:12:39 +03:00
char c1 , c2 , c3 , c4 , c5 , c6 ;
2006-07-03 11:24:52 +04:00
2007-10-12 00:11:11 +04:00
if ( v = = SEQ_START_TOKEN ) {
seq_printf ( m , " all lock classes: \n " ) ;
return 0 ;
}
2006-07-03 11:24:52 +04:00
seq_printf ( m , " %p " , class - > key ) ;
# ifdef CONFIG_DEBUG_LOCKDEP
seq_printf ( m , " OPS:%8ld " , class - > ops ) ;
# endif
2008-08-15 20:33:05 +04:00
# ifdef CONFIG_PROVE_LOCKING
seq_printf ( m , " FD:%5ld " , lockdep_count_forward_deps ( class ) ) ;
seq_printf ( m , " BD:%5ld " , lockdep_count_backward_deps ( class ) ) ;
# endif
2006-07-03 11:24:52 +04:00
lockdep: annotate reclaim context (__GFP_NOFS)
Here is another version, with the incremental patch rolled up, and
added reclaim context annotation to kswapd, and allocation tracing
to slab allocators (which may only ever reach the page allocator
in rare cases, so it is good to put annotations here too).
Haven't tested this version as such, but it should be getting closer
to merge worthy ;)
--
After noticing some code in mm/filemap.c accidentally perform a __GFP_FS
allocation when it should not have been, I thought it might be a good idea to
try to catch this kind of thing with lockdep.
I coded up a little idea that seems to work. Unfortunately the system has to
actually be in __GFP_FS page reclaim, then take the lock, before it will mark
it. But at least that might still be some orders of magnitude more common
(and more debuggable) than an actual deadlock condition, so we have some
improvement I hope (the concept is no less complete than discovery of a lock's
interrupt contexts).
I guess we could even do the same thing with __GFP_IO (normal reclaim), and
even GFP_NOIO locks too... but filesystems will have the most locks and fiddly
code paths, so let's start there and see how it goes.
It *seems* to work. I did a quick test.
=================================
[ INFO: inconsistent lock state ]
2.6.28-rc6-00007-ged31348-dirty #26
---------------------------------
inconsistent {in-reclaim-W} -> {ov-reclaim-W} usage.
modprobe/8526 [HC0[0]:SC0[0]:HE1:SE1] takes:
(testlock){--..}, at: [<ffffffffa0020055>] brd_init+0x55/0x216 [brd]
{in-reclaim-W} state was registered at:
[<ffffffff80267bdb>] __lock_acquire+0x75b/0x1a60
[<ffffffff80268f71>] lock_acquire+0x91/0xc0
[<ffffffff8070f0e1>] mutex_lock_nested+0xb1/0x310
[<ffffffffa002002b>] brd_init+0x2b/0x216 [brd]
[<ffffffff8020903b>] _stext+0x3b/0x170
[<ffffffff80272ebf>] sys_init_module+0xaf/0x1e0
[<ffffffff8020c3fb>] system_call_fastpath+0x16/0x1b
[<ffffffffffffffff>] 0xffffffffffffffff
irq event stamp: 3929
hardirqs last enabled at (3929): [<ffffffff8070f2b5>] mutex_lock_nested+0x285/0x310
hardirqs last disabled at (3928): [<ffffffff8070f089>] mutex_lock_nested+0x59/0x310
softirqs last enabled at (3732): [<ffffffff8061f623>] sk_filter+0x83/0xe0
softirqs last disabled at (3730): [<ffffffff8061f5b6>] sk_filter+0x16/0xe0
other info that might help us debug this:
1 lock held by modprobe/8526:
#0: (testlock){--..}, at: [<ffffffffa0020055>] brd_init+0x55/0x216 [brd]
stack backtrace:
Pid: 8526, comm: modprobe Not tainted 2.6.28-rc6-00007-ged31348-dirty #26
Call Trace:
[<ffffffff80265483>] print_usage_bug+0x193/0x1d0
[<ffffffff80266530>] mark_lock+0xaf0/0xca0
[<ffffffff80266735>] mark_held_locks+0x55/0xc0
[<ffffffffa0020000>] ? brd_init+0x0/0x216 [brd]
[<ffffffff802667ca>] trace_reclaim_fs+0x2a/0x60
[<ffffffff80285005>] __alloc_pages_internal+0x475/0x580
[<ffffffff8070f29e>] ? mutex_lock_nested+0x26e/0x310
[<ffffffffa0020000>] ? brd_init+0x0/0x216 [brd]
[<ffffffffa002006a>] brd_init+0x6a/0x216 [brd]
[<ffffffffa0020000>] ? brd_init+0x0/0x216 [brd]
[<ffffffff8020903b>] _stext+0x3b/0x170
[<ffffffff8070f8b9>] ? mutex_unlock+0x9/0x10
[<ffffffff8070f83d>] ? __mutex_unlock_slowpath+0x10d/0x180
[<ffffffff802669ec>] ? trace_hardirqs_on_caller+0x12c/0x190
[<ffffffff80272ebf>] sys_init_module+0xaf/0x1e0
[<ffffffff8020c3fb>] system_call_fastpath+0x16/0x1b
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-01-21 10:12:39 +03:00
get_usage_chars ( class , & c1 , & c2 , & c3 , & c4 , & c5 , & c6 ) ;
seq_printf ( m , " %c%c%c%c%c%c " , c1 , c2 , c3 , c4 , c5 , c6 ) ;
2006-07-03 11:24:52 +04:00
2007-02-10 12:44:59 +03:00
seq_printf ( m , " : " ) ;
print_name ( m , class ) ;
seq_puts ( m , " \n " ) ;
list_for_each_entry ( entry , & class - > locks_after , entry ) {
if ( entry - > distance = = 1 ) {
2008-06-13 10:40:17 +04:00
seq_printf ( m , " -> [%p] " , entry - > class - > key ) ;
2007-02-10 12:44:59 +03:00
print_name ( m , entry - > class ) ;
seq_puts ( m , " \n " ) ;
}
2006-07-03 11:24:52 +04:00
}
seq_puts ( m , " \n " ) ;
return 0 ;
}
2006-12-07 07:40:36 +03:00
static const struct seq_operations lockdep_ops = {
2006-07-03 11:24:52 +04:00
. start = l_start ,
. next = l_next ,
. stop = l_stop ,
. show = l_show ,
} ;
static int lockdep_open ( struct inode * inode , struct file * file )
{
int res = seq_open ( file , & lockdep_ops ) ;
if ( ! res ) {
struct seq_file * m = file - > private_data ;
if ( ! list_empty ( & all_lock_classes ) )
m - > private = list_entry ( all_lock_classes . next ,
struct lock_class , lock_entry ) ;
else
m - > private = NULL ;
}
return res ;
}
2006-12-07 07:40:36 +03:00
static const struct file_operations proc_lockdep_operations = {
2006-07-03 11:24:52 +04:00
. open = lockdep_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = seq_release ,
} ;
2008-06-23 07:20:54 +04:00
# ifdef CONFIG_PROVE_LOCKING
2008-06-20 12:39:21 +04:00
static void * lc_next ( struct seq_file * m , void * v , loff_t * pos )
{
struct lock_chain * chain ;
( * pos ) + + ;
if ( v = = SEQ_START_TOKEN )
chain = m - > private ;
else {
chain = v ;
if ( * pos < nr_lock_chains )
chain = lock_chains + * pos ;
else
chain = NULL ;
}
return chain ;
}
static void * lc_start ( struct seq_file * m , loff_t * pos )
{
if ( * pos = = 0 )
return SEQ_START_TOKEN ;
if ( * pos < nr_lock_chains )
return lock_chains + * pos ;
return NULL ;
}
static void lc_stop ( struct seq_file * m , void * v )
{
}
static int lc_show ( struct seq_file * m , void * v )
{
struct lock_chain * chain = v ;
struct lock_class * class ;
int i ;
if ( v = = SEQ_START_TOKEN ) {
seq_printf ( m , " all lock chains: \n " ) ;
return 0 ;
}
seq_printf ( m , " irq_context: %d \n " , chain - > irq_context ) ;
for ( i = 0 ; i < chain - > depth ; i + + ) {
class = lock_chain_get_class ( chain , i ) ;
2008-08-11 11:30:26 +04:00
if ( ! class - > key )
continue ;
2008-06-20 12:39:21 +04:00
seq_printf ( m , " [%p] " , class - > key ) ;
print_name ( m , class ) ;
seq_puts ( m , " \n " ) ;
}
seq_puts ( m , " \n " ) ;
return 0 ;
}
static const struct seq_operations lockdep_chains_ops = {
. start = lc_start ,
. next = lc_next ,
. stop = lc_stop ,
. show = lc_show ,
} ;
static int lockdep_chains_open ( struct inode * inode , struct file * file )
{
int res = seq_open ( file , & lockdep_chains_ops ) ;
if ( ! res ) {
struct seq_file * m = file - > private_data ;
if ( nr_lock_chains )
m - > private = lock_chains ;
else
m - > private = NULL ;
}
return res ;
}
static const struct file_operations proc_lockdep_chains_operations = {
. open = lockdep_chains_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = seq_release ,
} ;
2008-06-23 07:20:54 +04:00
# endif /* CONFIG_PROVE_LOCKING */
2008-06-20 12:39:21 +04:00
2006-07-03 11:24:52 +04:00
static void lockdep_stats_debug_show ( struct seq_file * m )
{
# ifdef CONFIG_DEBUG_LOCKDEP
unsigned int hi1 = debug_atomic_read ( & hardirqs_on_events ) ,
hi2 = debug_atomic_read ( & hardirqs_off_events ) ,
hr1 = debug_atomic_read ( & redundant_hardirqs_on ) ,
hr2 = debug_atomic_read ( & redundant_hardirqs_off ) ,
si1 = debug_atomic_read ( & softirqs_on_events ) ,
si2 = debug_atomic_read ( & softirqs_off_events ) ,
sr1 = debug_atomic_read ( & redundant_softirqs_on ) ,
sr2 = debug_atomic_read ( & redundant_softirqs_off ) ;
seq_printf ( m , " chain lookup misses: %11u \n " ,
debug_atomic_read ( & chain_lookup_misses ) ) ;
seq_printf ( m , " chain lookup hits: %11u \n " ,
debug_atomic_read ( & chain_lookup_hits ) ) ;
seq_printf ( m , " cyclic checks: %11u \n " ,
debug_atomic_read ( & nr_cyclic_checks ) ) ;
seq_printf ( m , " cyclic-check recursions: %11u \n " ,
debug_atomic_read ( & nr_cyclic_check_recursions ) ) ;
seq_printf ( m , " find-mask forwards checks: %11u \n " ,
debug_atomic_read ( & nr_find_usage_forwards_checks ) ) ;
seq_printf ( m , " find-mask forwards recursions: %11u \n " ,
debug_atomic_read ( & nr_find_usage_forwards_recursions ) ) ;
seq_printf ( m , " find-mask backwards checks: %11u \n " ,
debug_atomic_read ( & nr_find_usage_backwards_checks ) ) ;
seq_printf ( m , " find-mask backwards recursions:%11u \n " ,
debug_atomic_read ( & nr_find_usage_backwards_recursions ) ) ;
seq_printf ( m , " hardirq on events: %11u \n " , hi1 ) ;
seq_printf ( m , " hardirq off events: %11u \n " , hi2 ) ;
seq_printf ( m , " redundant hardirq ons: %11u \n " , hr1 ) ;
seq_printf ( m , " redundant hardirq offs: %11u \n " , hr2 ) ;
seq_printf ( m , " softirq on events: %11u \n " , si1 ) ;
seq_printf ( m , " softirq off events: %11u \n " , si2 ) ;
seq_printf ( m , " redundant softirq ons: %11u \n " , sr1 ) ;
seq_printf ( m , " redundant softirq offs: %11u \n " , sr2 ) ;
# endif
}
static int lockdep_stats_show ( struct seq_file * m , void * v )
{
struct lock_class * class ;
unsigned long nr_unused = 0 , nr_uncategorized = 0 ,
nr_irq_safe = 0 , nr_irq_unsafe = 0 ,
nr_softirq_safe = 0 , nr_softirq_unsafe = 0 ,
nr_hardirq_safe = 0 , nr_hardirq_unsafe = 0 ,
nr_irq_read_safe = 0 , nr_irq_read_unsafe = 0 ,
nr_softirq_read_safe = 0 , nr_softirq_read_unsafe = 0 ,
nr_hardirq_read_safe = 0 , nr_hardirq_read_unsafe = 0 ,
sum_forward_deps = 0 , factor = 0 ;
list_for_each_entry ( class , & all_lock_classes , lock_entry ) {
if ( class - > usage_mask = = 0 )
nr_unused + + ;
if ( class - > usage_mask = = LOCKF_USED )
nr_uncategorized + + ;
if ( class - > usage_mask & LOCKF_USED_IN_IRQ )
nr_irq_safe + + ;
2009-01-22 15:10:52 +03:00
if ( class - > usage_mask & LOCKF_ENABLED_IRQ )
2006-07-03 11:24:52 +04:00
nr_irq_unsafe + + ;
if ( class - > usage_mask & LOCKF_USED_IN_SOFTIRQ )
nr_softirq_safe + + ;
2009-01-22 15:10:52 +03:00
if ( class - > usage_mask & LOCKF_ENABLED_SOFTIRQ )
2006-07-03 11:24:52 +04:00
nr_softirq_unsafe + + ;
if ( class - > usage_mask & LOCKF_USED_IN_HARDIRQ )
nr_hardirq_safe + + ;
2009-01-22 15:10:52 +03:00
if ( class - > usage_mask & LOCKF_ENABLED_HARDIRQ )
2006-07-03 11:24:52 +04:00
nr_hardirq_unsafe + + ;
if ( class - > usage_mask & LOCKF_USED_IN_IRQ_READ )
nr_irq_read_safe + + ;
2009-01-22 15:10:52 +03:00
if ( class - > usage_mask & LOCKF_ENABLED_IRQ_READ )
2006-07-03 11:24:52 +04:00
nr_irq_read_unsafe + + ;
if ( class - > usage_mask & LOCKF_USED_IN_SOFTIRQ_READ )
nr_softirq_read_safe + + ;
2009-01-22 15:10:52 +03:00
if ( class - > usage_mask & LOCKF_ENABLED_SOFTIRQ_READ )
2006-07-03 11:24:52 +04:00
nr_softirq_read_unsafe + + ;
if ( class - > usage_mask & LOCKF_USED_IN_HARDIRQ_READ )
nr_hardirq_read_safe + + ;
2009-01-22 15:10:52 +03:00
if ( class - > usage_mask & LOCKF_ENABLED_HARDIRQ_READ )
2006-07-03 11:24:52 +04:00
nr_hardirq_read_unsafe + + ;
2008-08-15 20:33:05 +04:00
# ifdef CONFIG_PROVE_LOCKING
2008-07-30 08:45:03 +04:00
sum_forward_deps + = lockdep_count_forward_deps ( class ) ;
2008-08-15 20:33:05 +04:00
# endif
2006-07-03 11:24:52 +04:00
}
2007-02-10 12:46:34 +03:00
# ifdef CONFIG_DEBUG_LOCKDEP
2006-07-03 11:24:52 +04:00
DEBUG_LOCKS_WARN_ON ( debug_atomic_read ( & nr_unused_locks ) ! = nr_unused ) ;
# endif
seq_printf ( m , " lock-classes: %11lu [max: %lu] \n " ,
nr_lock_classes , MAX_LOCKDEP_KEYS ) ;
seq_printf ( m , " direct dependencies: %11lu [max: %lu] \n " ,
nr_list_entries , MAX_LOCKDEP_ENTRIES ) ;
seq_printf ( m , " indirect dependencies: %11lu \n " ,
sum_forward_deps ) ;
/*
* Total number of dependencies :
*
* All irq - safe locks may nest inside irq - unsafe locks ,
* plus all the other known dependencies :
*/
seq_printf ( m , " all direct dependencies: %11lu \n " ,
nr_irq_unsafe * nr_irq_safe +
nr_hardirq_unsafe * nr_hardirq_safe +
nr_list_entries ) ;
/*
* Estimated factor between direct and indirect
* dependencies :
*/
if ( nr_list_entries )
factor = sum_forward_deps / nr_list_entries ;
2007-07-19 12:48:54 +04:00
# ifdef CONFIG_PROVE_LOCKING
2006-07-03 11:24:52 +04:00
seq_printf ( m , " dependency chains: %11lu [max: %lu] \n " ,
nr_lock_chains , MAX_LOCKDEP_CHAINS ) ;
2008-06-20 12:39:21 +04:00
seq_printf ( m , " dependency chain hlocks: %11d [max: %lu] \n " ,
2008-06-23 07:20:54 +04:00
nr_chain_hlocks , MAX_LOCKDEP_CHAIN_HLOCKS ) ;
2007-07-19 12:48:54 +04:00
# endif
2006-07-03 11:24:52 +04:00
# ifdef CONFIG_TRACE_IRQFLAGS
seq_printf ( m , " in-hardirq chains: %11u \n " ,
nr_hardirq_chains ) ;
seq_printf ( m , " in-softirq chains: %11u \n " ,
nr_softirq_chains ) ;
# endif
seq_printf ( m , " in-process chains: %11u \n " ,
nr_process_chains ) ;
seq_printf ( m , " stack-trace entries: %11lu [max: %lu] \n " ,
nr_stack_trace_entries , MAX_STACK_TRACE_ENTRIES ) ;
seq_printf ( m , " combined max dependencies: %11u \n " ,
( nr_hardirq_chains + 1 ) *
( nr_softirq_chains + 1 ) *
( nr_process_chains + 1 )
) ;
seq_printf ( m , " hardirq-safe locks: %11lu \n " ,
nr_hardirq_safe ) ;
seq_printf ( m , " hardirq-unsafe locks: %11lu \n " ,
nr_hardirq_unsafe ) ;
seq_printf ( m , " softirq-safe locks: %11lu \n " ,
nr_softirq_safe ) ;
seq_printf ( m , " softirq-unsafe locks: %11lu \n " ,
nr_softirq_unsafe ) ;
seq_printf ( m , " irq-safe locks: %11lu \n " ,
nr_irq_safe ) ;
seq_printf ( m , " irq-unsafe locks: %11lu \n " ,
nr_irq_unsafe ) ;
seq_printf ( m , " hardirq-read-safe locks: %11lu \n " ,
nr_hardirq_read_safe ) ;
seq_printf ( m , " hardirq-read-unsafe locks: %11lu \n " ,
nr_hardirq_read_unsafe ) ;
seq_printf ( m , " softirq-read-safe locks: %11lu \n " ,
nr_softirq_read_safe ) ;
seq_printf ( m , " softirq-read-unsafe locks: %11lu \n " ,
nr_softirq_read_unsafe ) ;
seq_printf ( m , " irq-read-safe locks: %11lu \n " ,
nr_irq_read_safe ) ;
seq_printf ( m , " irq-read-unsafe locks: %11lu \n " ,
nr_irq_read_unsafe ) ;
seq_printf ( m , " uncategorized locks: %11lu \n " ,
nr_uncategorized ) ;
seq_printf ( m , " unused locks: %11lu \n " ,
nr_unused ) ;
seq_printf ( m , " max locking depth: %11u \n " ,
max_lockdep_depth ) ;
seq_printf ( m , " max recursion depth: %11u \n " ,
max_recursion_depth ) ;
lockdep_stats_debug_show ( m ) ;
seq_printf ( m , " debug_locks: %11u \n " ,
debug_locks ) ;
return 0 ;
}
static int lockdep_stats_open ( struct inode * inode , struct file * file )
{
return single_open ( file , lockdep_stats_show , NULL ) ;
}
2006-12-07 07:40:36 +03:00
static const struct file_operations proc_lockdep_stats_operations = {
2006-07-03 11:24:52 +04:00
. open = lockdep_stats_open ,
. read = seq_read ,
. llseek = seq_lseek ,
2007-07-31 11:38:50 +04:00
. release = single_release ,
2006-07-03 11:24:52 +04:00
} ;
2007-07-19 12:48:57 +04:00
# ifdef CONFIG_LOCK_STAT
struct lock_stat_data {
struct lock_class * class ;
struct lock_class_stats stats ;
} ;
struct lock_stat_seq {
struct lock_stat_data * iter ;
struct lock_stat_data * iter_end ;
struct lock_stat_data stats [ MAX_LOCKDEP_KEYS ] ;
} ;
/*
* sort on absolute number of contentions
*/
static int lock_stat_cmp ( const void * l , const void * r )
{
const struct lock_stat_data * dl = l , * dr = r ;
unsigned long nl , nr ;
nl = dl - > stats . read_waittime . nr + dl - > stats . write_waittime . nr ;
nr = dr - > stats . read_waittime . nr + dr - > stats . write_waittime . nr ;
return nr - nl ;
}
static void seq_line ( struct seq_file * m , char c , int offset , int length )
{
int i ;
for ( i = 0 ; i < offset ; i + + )
seq_puts ( m , " " ) ;
for ( i = 0 ; i < length ; i + + )
seq_printf ( m , " %c " , c ) ;
seq_puts ( m , " \n " ) ;
}
static void snprint_time ( char * buf , size_t bufsiz , s64 nr )
{
2008-09-23 17:33:41 +04:00
s64 div ;
s32 rem ;
2007-07-19 12:48:57 +04:00
2008-08-26 01:15:33 +04:00
nr + = 5 ; /* for display rounding */
2008-09-23 17:33:41 +04:00
div = div_s64_rem ( nr , 1000 , & rem ) ;
snprintf ( buf , bufsiz , " %lld.%02d " , ( long long ) div , ( int ) rem / 10 ) ;
2007-07-19 12:48:57 +04:00
}
static void seq_time ( struct seq_file * m , s64 time )
{
char num [ 15 ] ;
snprint_time ( num , sizeof ( num ) , time ) ;
seq_printf ( m , " %14s " , num ) ;
}
static void seq_lock_time ( struct seq_file * m , struct lock_time * lt )
{
seq_printf ( m , " %14lu " , lt - > nr ) ;
seq_time ( m , lt - > min ) ;
seq_time ( m , lt - > max ) ;
seq_time ( m , lt - > total ) ;
}
static void seq_stats ( struct seq_file * m , struct lock_stat_data * data )
{
char name [ 39 ] ;
struct lock_class * class ;
struct lock_class_stats * stats ;
int i , namelen ;
class = data - > class ;
stats = & data - > stats ;
2007-07-19 12:49:01 +04:00
namelen = 38 ;
if ( class - > name_version > 1 )
namelen - = 2 ; /* XXX truncates versions > 9 */
if ( class - > subclass )
namelen - = 2 ;
if ( ! class - > name ) {
char str [ KSYM_NAME_LEN ] ;
const char * key_name ;
key_name = __get_key_name ( class - > key , str ) ;
snprintf ( name , namelen , " %s " , key_name ) ;
} else {
snprintf ( name , namelen , " %s " , class - > name ) ;
}
2007-07-19 12:48:57 +04:00
namelen = strlen ( name ) ;
2007-07-19 12:49:01 +04:00
if ( class - > name_version > 1 ) {
snprintf ( name + namelen , 3 , " #%d " , class - > name_version ) ;
namelen + = 2 ;
}
if ( class - > subclass ) {
snprintf ( name + namelen , 3 , " /%d " , class - > subclass ) ;
namelen + = 2 ;
}
2007-07-19 12:48:57 +04:00
if ( stats - > write_holdtime . nr ) {
if ( stats - > read_holdtime . nr )
seq_printf ( m , " %38s-W: " , name ) ;
else
seq_printf ( m , " %40s: " , name ) ;
2007-07-19 12:49:00 +04:00
seq_printf ( m , " %14lu " , stats - > bounces [ bounce_contended_write ] ) ;
2007-07-19 12:48:57 +04:00
seq_lock_time ( m , & stats - > write_waittime ) ;
2007-07-19 12:49:00 +04:00
seq_printf ( m , " %14lu " , stats - > bounces [ bounce_acquired_write ] ) ;
2007-07-19 12:48:57 +04:00
seq_lock_time ( m , & stats - > write_holdtime ) ;
seq_puts ( m , " \n " ) ;
}
if ( stats - > read_holdtime . nr ) {
seq_printf ( m , " %38s-R: " , name ) ;
2007-07-19 12:49:00 +04:00
seq_printf ( m , " %14lu " , stats - > bounces [ bounce_contended_read ] ) ;
2007-07-19 12:48:57 +04:00
seq_lock_time ( m , & stats - > read_waittime ) ;
2007-07-19 12:49:00 +04:00
seq_printf ( m , " %14lu " , stats - > bounces [ bounce_acquired_read ] ) ;
2007-07-19 12:48:57 +04:00
seq_lock_time ( m , & stats - > read_holdtime ) ;
seq_puts ( m , " \n " ) ;
}
if ( stats - > read_waittime . nr + stats - > write_waittime . nr = = 0 )
return ;
if ( stats - > read_holdtime . nr )
namelen + = 2 ;
2008-10-17 01:17:09 +04:00
for ( i = 0 ; i < LOCKSTAT_POINTS ; i + + ) {
2007-07-19 12:48:57 +04:00
char sym [ KSYM_SYMBOL_LEN ] ;
char ip [ 32 ] ;
if ( class - > contention_point [ i ] = = 0 )
break ;
if ( ! i )
seq_line ( m , ' - ' , 40 - namelen , namelen ) ;
sprint_symbol ( sym , class - > contention_point [ i ] ) ;
snprintf ( ip , sizeof ( ip ) , " [<%p>] " ,
( void * ) class - > contention_point [ i ] ) ;
seq_printf ( m , " %40s %14lu %29s %s \n " , name ,
stats - > contention_point [ i ] ,
ip , sym ) ;
}
2008-10-17 01:17:09 +04:00
for ( i = 0 ; i < LOCKSTAT_POINTS ; i + + ) {
char sym [ KSYM_SYMBOL_LEN ] ;
char ip [ 32 ] ;
if ( class - > contending_point [ i ] = = 0 )
break ;
if ( ! i )
seq_line ( m , ' - ' , 40 - namelen , namelen ) ;
sprint_symbol ( sym , class - > contending_point [ i ] ) ;
snprintf ( ip , sizeof ( ip ) , " [<%p>] " ,
( void * ) class - > contending_point [ i ] ) ;
seq_printf ( m , " %40s %14lu %29s %s \n " , name ,
stats - > contending_point [ i ] ,
ip , sym ) ;
}
2007-07-19 12:48:57 +04:00
if ( i ) {
seq_puts ( m , " \n " ) ;
2007-07-19 12:49:00 +04:00
seq_line ( m , ' . ' , 0 , 40 + 1 + 10 * ( 14 + 1 ) ) ;
2007-07-19 12:48:57 +04:00
seq_puts ( m , " \n " ) ;
}
}
static void seq_header ( struct seq_file * m )
{
2008-10-17 01:17:09 +04:00
seq_printf ( m , " lock_stat version 0.3 \n " ) ;
2007-07-19 12:49:00 +04:00
seq_line ( m , ' - ' , 0 , 40 + 1 + 10 * ( 14 + 1 ) ) ;
seq_printf ( m , " %40s %14s %14s %14s %14s %14s %14s %14s %14s "
" %14s %14s \n " ,
2007-07-19 12:48:57 +04:00
" class name " ,
2007-07-19 12:49:00 +04:00
" con-bounces " ,
2007-07-19 12:48:57 +04:00
" contentions " ,
" waittime-min " ,
" waittime-max " ,
" waittime-total " ,
2007-07-19 12:49:00 +04:00
" acq-bounces " ,
2007-07-19 12:48:57 +04:00
" acquisitions " ,
" holdtime-min " ,
" holdtime-max " ,
" holdtime-total " ) ;
2007-07-19 12:49:00 +04:00
seq_line ( m , ' - ' , 0 , 40 + 1 + 10 * ( 14 + 1 ) ) ;
2007-07-19 12:48:57 +04:00
seq_printf ( m , " \n " ) ;
}
static void * ls_start ( struct seq_file * m , loff_t * pos )
{
struct lock_stat_seq * data = m - > private ;
2007-10-12 00:11:11 +04:00
if ( * pos = = 0 )
return SEQ_START_TOKEN ;
2007-07-19 12:48:57 +04:00
2007-10-12 00:11:11 +04:00
data - > iter = data - > stats + * pos ;
if ( data - > iter > = data - > iter_end )
2007-07-19 12:48:59 +04:00
data - > iter = NULL ;
2007-07-19 12:48:57 +04:00
return data - > iter ;
}
static void * ls_next ( struct seq_file * m , void * v , loff_t * pos )
{
struct lock_stat_seq * data = m - > private ;
( * pos ) + + ;
2007-10-12 00:11:11 +04:00
if ( v = = SEQ_START_TOKEN )
data - > iter = data - > stats ;
else {
data - > iter = v ;
data - > iter + + ;
}
2007-07-19 12:48:57 +04:00
if ( data - > iter = = data - > iter_end )
data - > iter = NULL ;
return data - > iter ;
}
static void ls_stop ( struct seq_file * m , void * v )
{
}
static int ls_show ( struct seq_file * m , void * v )
{
2007-10-12 00:11:11 +04:00
if ( v = = SEQ_START_TOKEN )
seq_header ( m ) ;
else
seq_stats ( m , v ) ;
2007-07-19 12:48:57 +04:00
return 0 ;
}
static struct seq_operations lockstat_ops = {
. start = ls_start ,
. next = ls_next ,
. stop = ls_stop ,
. show = ls_show ,
} ;
static int lock_stat_open ( struct inode * inode , struct file * file )
{
int res ;
struct lock_class * class ;
struct lock_stat_seq * data = vmalloc ( sizeof ( struct lock_stat_seq ) ) ;
if ( ! data )
return - ENOMEM ;
res = seq_open ( file , & lockstat_ops ) ;
if ( ! res ) {
struct lock_stat_data * iter = data - > stats ;
struct seq_file * m = file - > private_data ;
data - > iter = iter ;
list_for_each_entry ( class , & all_lock_classes , lock_entry ) {
iter - > class = class ;
iter - > stats = lock_stats ( class ) ;
iter + + ;
}
data - > iter_end = iter ;
sort ( data - > stats , data - > iter_end - data - > iter ,
sizeof ( struct lock_stat_data ) ,
lock_stat_cmp , NULL ) ;
m - > private = data ;
} else
vfree ( data ) ;
return res ;
}
static ssize_t lock_stat_write ( struct file * file , const char __user * buf ,
size_t count , loff_t * ppos )
{
struct lock_class * class ;
char c ;
if ( count ) {
if ( get_user ( c , buf ) )
return - EFAULT ;
if ( c ! = ' 0 ' )
return count ;
list_for_each_entry ( class , & all_lock_classes , lock_entry )
clear_lock_stats ( class ) ;
}
return count ;
}
static int lock_stat_release ( struct inode * inode , struct file * file )
{
struct seq_file * seq = file - > private_data ;
vfree ( seq - > private ) ;
seq - > private = NULL ;
return seq_release ( inode , file ) ;
}
static const struct file_operations proc_lock_stat_operations = {
. open = lock_stat_open ,
. write = lock_stat_write ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = lock_stat_release ,
} ;
# endif /* CONFIG_LOCK_STAT */
2006-07-03 11:24:52 +04:00
static int __init lockdep_proc_init ( void )
{
2008-04-29 12:02:31 +04:00
proc_create ( " lockdep " , S_IRUSR , NULL , & proc_lockdep_operations ) ;
2008-06-23 07:20:54 +04:00
# ifdef CONFIG_PROVE_LOCKING
2008-06-20 12:39:21 +04:00
proc_create ( " lockdep_chains " , S_IRUSR , NULL ,
& proc_lockdep_chains_operations ) ;
2008-06-23 07:20:54 +04:00
# endif
2008-04-29 12:02:31 +04:00
proc_create ( " lockdep_stats " , S_IRUSR , NULL ,
& proc_lockdep_stats_operations ) ;
2006-07-03 11:24:52 +04:00
2007-07-19 12:48:57 +04:00
# ifdef CONFIG_LOCK_STAT
2008-04-29 12:02:31 +04:00
proc_create ( " lock_stat " , S_IRUSR , NULL , & proc_lock_stat_operations ) ;
2007-07-19 12:48:57 +04:00
# endif
2006-07-03 11:24:52 +04:00
return 0 ;
}
__initcall ( lockdep_proc_init ) ;