2010-01-30 14:43:33 +03:00
# include "builtin.h"
# include "perf.h"
# include "util/util.h"
# include "util/cache.h"
# include "util/symbol.h"
# include "util/thread.h"
# include "util/header.h"
# include "util/parse-options.h"
# include "util/trace-event.h"
# include "util/debug.h"
# include "util/session.h"
# include <sys/types.h>
# include <sys/prctl.h>
# include <semaphore.h>
# include <pthread.h>
# include <math.h>
# include <limits.h>
# include <linux/list.h>
# include <linux/hash.h>
2010-04-21 16:23:54 +04:00
static struct perf_session * session ;
2010-01-30 14:43:33 +03:00
/* based on kernel/lockdep.c */
# define LOCKHASH_BITS 12
# define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
static struct list_head lockhash_table [ LOCKHASH_SIZE ] ;
# define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
# define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
struct lock_stat {
2010-01-31 10:27:58 +03:00
struct list_head hash_entry ;
struct rb_node rb ; /* used for sorting */
2010-01-30 14:43:33 +03:00
2010-01-31 10:27:58 +03:00
/*
* FIXME : raw_field_value ( ) returns unsigned long long ,
2010-01-30 14:43:33 +03:00
* so address of lockdep_map should be dealed as 64 bit .
2010-01-31 10:27:58 +03:00
* Is there more better solution ?
*/
void * addr ; /* address of lockdep_map, used as ID */
char * name ; /* for strcpy(), we cannot use const */
2010-01-30 14:43:33 +03:00
2010-01-31 10:27:58 +03:00
unsigned int nr_acquire ;
2010-04-21 16:23:54 +04:00
unsigned int nr_acquired ;
2010-01-31 10:27:58 +03:00
unsigned int nr_contended ;
unsigned int nr_release ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
unsigned int nr_readlock ;
unsigned int nr_trylock ;
2010-01-30 14:43:33 +03:00
/* these times are in nano sec. */
2010-01-31 10:27:58 +03:00
u64 wait_time_total ;
u64 wait_time_min ;
u64 wait_time_max ;
2010-04-21 16:23:54 +04:00
int discard ; /* flag of blacklist */
} ;
/*
* States of lock_seq_stat
*
* UNINITIALIZED is required for detecting first event of acquire .
* As the nature of lock events , there is no guarantee
* that the first event for the locks are acquire ,
* it can be acquired , contended or release .
*/
# define SEQ_STATE_UNINITIALIZED 0 /* initial state */
# define SEQ_STATE_RELEASED 1
# define SEQ_STATE_ACQUIRING 2
# define SEQ_STATE_ACQUIRED 3
# define SEQ_STATE_READ_ACQUIRED 4
# define SEQ_STATE_CONTENDED 5
/*
* MAX_LOCK_DEPTH
* Imported from include / linux / sched . h .
* Should this be synchronized ?
*/
# define MAX_LOCK_DEPTH 48
/*
* struct lock_seq_stat :
* Place to put on state of one lock sequence
* 1 ) acquire - > acquired - > release
* 2 ) acquire - > contended - > acquired - > release
* 3 ) acquire ( with read or try ) - > release
* 4 ) Are there other patterns ?
*/
struct lock_seq_stat {
struct list_head list ;
int state ;
u64 prev_event_time ;
void * addr ;
int read_count ;
2010-01-30 14:43:33 +03:00
} ;
2010-04-21 16:23:54 +04:00
struct thread_stat {
struct rb_node rb ;
u32 tid ;
struct list_head seq_list ;
} ;
static struct rb_root thread_stats ;
static struct thread_stat * thread_stat_find ( u32 tid )
{
struct rb_node * node ;
struct thread_stat * st ;
node = thread_stats . rb_node ;
while ( node ) {
st = container_of ( node , struct thread_stat , rb ) ;
if ( st - > tid = = tid )
return st ;
else if ( tid < st - > tid )
node = node - > rb_left ;
else
node = node - > rb_right ;
}
return NULL ;
}
static void thread_stat_insert ( struct thread_stat * new )
{
struct rb_node * * rb = & thread_stats . rb_node ;
struct rb_node * parent = NULL ;
struct thread_stat * p ;
while ( * rb ) {
p = container_of ( * rb , struct thread_stat , rb ) ;
parent = * rb ;
if ( new - > tid < p - > tid )
rb = & ( * rb ) - > rb_left ;
else if ( new - > tid > p - > tid )
rb = & ( * rb ) - > rb_right ;
else
BUG_ON ( " inserting invalid thread_stat \n " ) ;
}
rb_link_node ( & new - > rb , parent , rb ) ;
rb_insert_color ( & new - > rb , & thread_stats ) ;
}
static struct thread_stat * thread_stat_findnew_after_first ( u32 tid )
{
struct thread_stat * st ;
st = thread_stat_find ( tid ) ;
if ( st )
return st ;
st = zalloc ( sizeof ( struct thread_stat ) ) ;
if ( ! st )
die ( " memory allocation failed \n " ) ;
st - > tid = tid ;
INIT_LIST_HEAD ( & st - > seq_list ) ;
thread_stat_insert ( st ) ;
return st ;
}
static struct thread_stat * thread_stat_findnew_first ( u32 tid ) ;
static struct thread_stat * ( * thread_stat_findnew ) ( u32 tid ) =
thread_stat_findnew_first ;
static struct thread_stat * thread_stat_findnew_first ( u32 tid )
{
struct thread_stat * st ;
st = zalloc ( sizeof ( struct thread_stat ) ) ;
if ( ! st )
die ( " memory allocation failed \n " ) ;
st - > tid = tid ;
INIT_LIST_HEAD ( & st - > seq_list ) ;
rb_link_node ( & st - > rb , NULL , & thread_stats . rb_node ) ;
rb_insert_color ( & st - > rb , & thread_stats ) ;
thread_stat_findnew = thread_stat_findnew_after_first ;
return st ;
}
2010-01-30 14:43:33 +03:00
/* build simple key function one is bigger than two */
2010-01-31 10:27:58 +03:00
# define SINGLE_KEY(member) \
2010-01-30 14:43:33 +03:00
static int lock_stat_key_ # # member ( struct lock_stat * one , \
struct lock_stat * two ) \
{ \
return one - > member > two - > member ; \
}
SINGLE_KEY ( nr_acquired )
SINGLE_KEY ( nr_contended )
SINGLE_KEY ( wait_time_total )
SINGLE_KEY ( wait_time_min )
SINGLE_KEY ( wait_time_max )
struct lock_key {
/*
* name : the value for specify by user
* this should be simpler than raw name of member
* e . g . nr_acquired - > acquired , wait_time_total - > wait_total
*/
2010-01-31 10:27:58 +03:00
const char * name ;
int ( * key ) ( struct lock_stat * , struct lock_stat * ) ;
2010-01-30 14:43:33 +03:00
} ;
2010-01-31 10:27:58 +03:00
static const char * sort_key = " acquired " ;
static int ( * compare ) ( struct lock_stat * , struct lock_stat * ) ;
static struct rb_root result ; /* place to store sorted data */
2010-01-30 14:43:33 +03:00
# define DEF_KEY_LOCK(name, fn_suffix) \
{ # name , lock_stat_key_ # # fn_suffix }
struct lock_key keys [ ] = {
DEF_KEY_LOCK ( acquired , nr_acquired ) ,
DEF_KEY_LOCK ( contended , nr_contended ) ,
DEF_KEY_LOCK ( wait_total , wait_time_total ) ,
DEF_KEY_LOCK ( wait_min , wait_time_min ) ,
DEF_KEY_LOCK ( wait_max , wait_time_max ) ,
/* extra comparisons much complicated should be here */
{ NULL , NULL }
} ;
static void select_key ( void )
{
int i ;
for ( i = 0 ; keys [ i ] . name ; i + + ) {
if ( ! strcmp ( keys [ i ] . name , sort_key ) ) {
compare = keys [ i ] . key ;
return ;
}
}
die ( " Unknown compare key:%s \n " , sort_key ) ;
}
static void insert_to_result ( struct lock_stat * st ,
2010-01-31 10:27:58 +03:00
int ( * bigger ) ( struct lock_stat * , struct lock_stat * ) )
2010-01-30 14:43:33 +03:00
{
struct rb_node * * rb = & result . rb_node ;
struct rb_node * parent = NULL ;
struct lock_stat * p ;
while ( * rb ) {
p = container_of ( * rb , struct lock_stat , rb ) ;
parent = * rb ;
if ( bigger ( st , p ) )
rb = & ( * rb ) - > rb_left ;
else
rb = & ( * rb ) - > rb_right ;
}
rb_link_node ( & st - > rb , parent , rb ) ;
rb_insert_color ( & st - > rb , & result ) ;
}
/* returns left most element of result, and erase it */
static struct lock_stat * pop_from_result ( void )
{
struct rb_node * node = result . rb_node ;
if ( ! node )
return NULL ;
while ( node - > rb_left )
node = node - > rb_left ;
rb_erase ( node , & result ) ;
return container_of ( node , struct lock_stat , rb ) ;
}
2010-01-31 10:27:58 +03:00
static struct lock_stat * lock_stat_findnew ( void * addr , const char * name )
2010-01-30 14:43:33 +03:00
{
struct list_head * entry = lockhashentry ( addr ) ;
struct lock_stat * ret , * new ;
list_for_each_entry ( ret , entry , hash_entry ) {
if ( ret - > addr = = addr )
return ret ;
}
new = zalloc ( sizeof ( struct lock_stat ) ) ;
if ( ! new )
goto alloc_failed ;
new - > addr = addr ;
new - > name = zalloc ( sizeof ( char ) * strlen ( name ) + 1 ) ;
if ( ! new - > name )
goto alloc_failed ;
strcpy ( new - > name , name ) ;
new - > wait_time_min = ULLONG_MAX ;
list_add ( & new - > hash_entry , entry ) ;
return new ;
alloc_failed :
die ( " memory allocation failed \n " ) ;
}
static char const * input_name = " perf.data " ;
struct raw_event_sample {
2010-01-31 10:27:58 +03:00
u32 size ;
char data [ 0 ] ;
2010-01-30 14:43:33 +03:00
} ;
struct trace_acquire_event {
2010-01-31 10:27:58 +03:00
void * addr ;
const char * name ;
2010-04-21 16:23:54 +04:00
int flag ;
2010-01-30 14:43:33 +03:00
} ;
struct trace_acquired_event {
2010-01-31 10:27:58 +03:00
void * addr ;
const char * name ;
2010-01-30 14:43:33 +03:00
} ;
struct trace_contended_event {
2010-01-31 10:27:58 +03:00
void * addr ;
const char * name ;
2010-01-30 14:43:33 +03:00
} ;
struct trace_release_event {
2010-01-31 10:27:58 +03:00
void * addr ;
const char * name ;
2010-01-30 14:43:33 +03:00
} ;
struct trace_lock_handler {
void ( * acquire_event ) ( struct trace_acquire_event * ,
struct event * ,
int cpu ,
u64 timestamp ,
struct thread * thread ) ;
void ( * acquired_event ) ( struct trace_acquired_event * ,
struct event * ,
int cpu ,
u64 timestamp ,
struct thread * thread ) ;
void ( * contended_event ) ( struct trace_contended_event * ,
struct event * ,
int cpu ,
u64 timestamp ,
struct thread * thread ) ;
void ( * release_event ) ( struct trace_release_event * ,
struct event * ,
int cpu ,
u64 timestamp ,
struct thread * thread ) ;
} ;
2010-04-21 16:23:54 +04:00
static struct lock_seq_stat * get_seq ( struct thread_stat * ts , void * addr )
{
struct lock_seq_stat * seq ;
list_for_each_entry ( seq , & ts - > seq_list , list ) {
if ( seq - > addr = = addr )
return seq ;
}
seq = zalloc ( sizeof ( struct lock_seq_stat ) ) ;
if ( ! seq )
die ( " Not enough memory \n " ) ;
seq - > state = SEQ_STATE_UNINITIALIZED ;
seq - > addr = addr ;
list_add ( & seq - > list , & ts - > seq_list ) ;
return seq ;
}
2010-05-06 01:47:28 +04:00
enum broken_state {
BROKEN_ACQUIRE ,
BROKEN_ACQUIRED ,
BROKEN_CONTENDED ,
BROKEN_RELEASE ,
BROKEN_MAX ,
} ;
static int bad_hist [ BROKEN_MAX ] ;
2010-04-21 16:23:54 +04:00
2010-05-06 01:57:25 +04:00
enum acquire_flags {
TRY_LOCK = 1 ,
READ_LOCK = 2 ,
} ;
2010-01-31 10:27:58 +03:00
static void
report_lock_acquire_event ( struct trace_acquire_event * acquire_event ,
2010-01-30 14:43:33 +03:00
struct event * __event __used ,
int cpu __used ,
2010-04-21 16:23:54 +04:00
u64 timestamp __used ,
2010-01-30 14:43:33 +03:00
struct thread * thread __used )
{
2010-04-21 16:23:54 +04:00
struct lock_stat * ls ;
struct thread_stat * ts ;
struct lock_seq_stat * seq ;
ls = lock_stat_findnew ( acquire_event - > addr , acquire_event - > name ) ;
if ( ls - > discard )
return ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
ts = thread_stat_findnew ( thread - > pid ) ;
seq = get_seq ( ts , acquire_event - > addr ) ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
switch ( seq - > state ) {
case SEQ_STATE_UNINITIALIZED :
case SEQ_STATE_RELEASED :
if ( ! acquire_event - > flag ) {
seq - > state = SEQ_STATE_ACQUIRING ;
} else {
2010-05-06 01:57:25 +04:00
if ( acquire_event - > flag & TRY_LOCK )
2010-04-21 16:23:54 +04:00
ls - > nr_trylock + + ;
2010-05-06 01:57:25 +04:00
if ( acquire_event - > flag & READ_LOCK )
2010-04-21 16:23:54 +04:00
ls - > nr_readlock + + ;
seq - > state = SEQ_STATE_READ_ACQUIRED ;
seq - > read_count = 1 ;
ls - > nr_acquired + + ;
}
break ;
case SEQ_STATE_READ_ACQUIRED :
2010-05-06 01:57:25 +04:00
if ( acquire_event - > flag & READ_LOCK ) {
2010-04-21 16:23:54 +04:00
seq - > read_count + + ;
ls - > nr_acquired + + ;
goto end ;
} else {
goto broken ;
}
2010-01-30 14:43:33 +03:00
break ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_ACQUIRED :
case SEQ_STATE_ACQUIRING :
case SEQ_STATE_CONTENDED :
broken :
/* broken lock sequence, discard it */
ls - > discard = 1 ;
2010-05-06 01:47:28 +04:00
bad_hist [ BROKEN_ACQUIRE ] + + ;
2010-04-21 16:23:54 +04:00
list_del ( & seq - > list ) ;
free ( seq ) ;
goto end ;
2010-01-30 14:43:33 +03:00
break ;
default :
2010-04-21 16:23:54 +04:00
BUG_ON ( " Unknown state of lock sequence found! \n " ) ;
2010-01-30 14:43:33 +03:00
break ;
}
2010-04-21 16:23:54 +04:00
ls - > nr_acquire + + ;
seq - > prev_event_time = timestamp ;
end :
return ;
2010-01-30 14:43:33 +03:00
}
2010-01-31 10:27:58 +03:00
static void
report_lock_acquired_event ( struct trace_acquired_event * acquired_event ,
2010-01-30 14:43:33 +03:00
struct event * __event __used ,
int cpu __used ,
2010-04-21 16:23:54 +04:00
u64 timestamp __used ,
2010-01-30 14:43:33 +03:00
struct thread * thread __used )
{
2010-04-21 16:23:54 +04:00
struct lock_stat * ls ;
struct thread_stat * ts ;
struct lock_seq_stat * seq ;
u64 contended_term ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
ls = lock_stat_findnew ( acquired_event - > addr , acquired_event - > name ) ;
if ( ls - > discard )
return ;
ts = thread_stat_findnew ( thread - > pid ) ;
seq = get_seq ( ts , acquired_event - > addr ) ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
switch ( seq - > state ) {
case SEQ_STATE_UNINITIALIZED :
/* orphan event, do nothing */
return ;
case SEQ_STATE_ACQUIRING :
2010-01-30 14:43:33 +03:00
break ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_CONTENDED :
contended_term = timestamp - seq - > prev_event_time ;
ls - > wait_time_total + = contended_term ;
if ( contended_term < ls - > wait_time_min )
ls - > wait_time_min = contended_term ;
2010-05-07 04:33:42 +04:00
if ( ls - > wait_time_max < contended_term )
2010-04-21 16:23:54 +04:00
ls - > wait_time_max = contended_term ;
2010-01-30 14:43:33 +03:00
break ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_RELEASED :
case SEQ_STATE_ACQUIRED :
case SEQ_STATE_READ_ACQUIRED :
/* broken lock sequence, discard it */
ls - > discard = 1 ;
2010-05-06 01:47:28 +04:00
bad_hist [ BROKEN_ACQUIRED ] + + ;
2010-04-21 16:23:54 +04:00
list_del ( & seq - > list ) ;
free ( seq ) ;
goto end ;
break ;
2010-01-30 14:43:33 +03:00
default :
2010-04-21 16:23:54 +04:00
BUG_ON ( " Unknown state of lock sequence found! \n " ) ;
2010-01-30 14:43:33 +03:00
break ;
}
2010-04-21 16:23:54 +04:00
seq - > state = SEQ_STATE_ACQUIRED ;
ls - > nr_acquired + + ;
seq - > prev_event_time = timestamp ;
end :
return ;
2010-01-30 14:43:33 +03:00
}
2010-01-31 10:27:58 +03:00
static void
report_lock_contended_event ( struct trace_contended_event * contended_event ,
2010-01-30 14:43:33 +03:00
struct event * __event __used ,
int cpu __used ,
2010-04-21 16:23:54 +04:00
u64 timestamp __used ,
2010-01-30 14:43:33 +03:00
struct thread * thread __used )
{
2010-04-21 16:23:54 +04:00
struct lock_stat * ls ;
struct thread_stat * ts ;
struct lock_seq_stat * seq ;
ls = lock_stat_findnew ( contended_event - > addr , contended_event - > name ) ;
if ( ls - > discard )
return ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
ts = thread_stat_findnew ( thread - > pid ) ;
seq = get_seq ( ts , contended_event - > addr ) ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
switch ( seq - > state ) {
case SEQ_STATE_UNINITIALIZED :
/* orphan event, do nothing */
return ;
case SEQ_STATE_ACQUIRING :
2010-01-30 14:43:33 +03:00
break ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_RELEASED :
case SEQ_STATE_ACQUIRED :
case SEQ_STATE_READ_ACQUIRED :
case SEQ_STATE_CONTENDED :
/* broken lock sequence, discard it */
ls - > discard = 1 ;
2010-05-06 01:47:28 +04:00
bad_hist [ BROKEN_CONTENDED ] + + ;
2010-04-21 16:23:54 +04:00
list_del ( & seq - > list ) ;
free ( seq ) ;
goto end ;
2010-01-30 14:43:33 +03:00
break ;
default :
2010-04-21 16:23:54 +04:00
BUG_ON ( " Unknown state of lock sequence found! \n " ) ;
2010-01-30 14:43:33 +03:00
break ;
}
2010-04-21 16:23:54 +04:00
seq - > state = SEQ_STATE_CONTENDED ;
ls - > nr_contended + + ;
seq - > prev_event_time = timestamp ;
end :
return ;
2010-01-30 14:43:33 +03:00
}
2010-01-31 10:27:58 +03:00
static void
report_lock_release_event ( struct trace_release_event * release_event ,
2010-01-30 14:43:33 +03:00
struct event * __event __used ,
int cpu __used ,
2010-04-21 16:23:54 +04:00
u64 timestamp __used ,
2010-01-30 14:43:33 +03:00
struct thread * thread __used )
{
2010-04-21 16:23:54 +04:00
struct lock_stat * ls ;
struct thread_stat * ts ;
struct lock_seq_stat * seq ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
ls = lock_stat_findnew ( release_event - > addr , release_event - > name ) ;
if ( ls - > discard )
return ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
ts = thread_stat_findnew ( thread - > pid ) ;
seq = get_seq ( ts , release_event - > addr ) ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
switch ( seq - > state ) {
case SEQ_STATE_UNINITIALIZED :
goto end ;
break ;
case SEQ_STATE_ACQUIRED :
break ;
case SEQ_STATE_READ_ACQUIRED :
seq - > read_count - - ;
BUG_ON ( seq - > read_count < 0 ) ;
if ( ! seq - > read_count ) {
ls - > nr_release + + ;
2010-01-30 14:43:33 +03:00
goto end ;
}
2010-04-21 16:23:54 +04:00
break ;
case SEQ_STATE_ACQUIRING :
case SEQ_STATE_CONTENDED :
case SEQ_STATE_RELEASED :
/* broken lock sequence, discard it */
ls - > discard = 1 ;
2010-05-06 01:47:28 +04:00
bad_hist [ BROKEN_RELEASE ] + + ;
2010-04-21 16:23:54 +04:00
goto free_seq ;
2010-01-30 14:43:33 +03:00
break ;
default :
2010-04-21 16:23:54 +04:00
BUG_ON ( " Unknown state of lock sequence found! \n " ) ;
2010-01-30 14:43:33 +03:00
break ;
}
2010-04-21 16:23:54 +04:00
ls - > nr_release + + ;
free_seq :
list_del ( & seq - > list ) ;
free ( seq ) ;
2010-01-30 14:43:33 +03:00
end :
2010-04-21 16:23:54 +04:00
return ;
2010-01-30 14:43:33 +03:00
}
/* lock oriented handlers */
/* TODO: handlers for CPU oriented, thread oriented */
2010-01-31 10:27:58 +03:00
static struct trace_lock_handler report_lock_ops = {
. acquire_event = report_lock_acquire_event ,
. acquired_event = report_lock_acquired_event ,
. contended_event = report_lock_contended_event ,
. release_event = report_lock_release_event ,
2010-01-30 14:43:33 +03:00
} ;
static struct trace_lock_handler * trace_handler ;
static void
process_lock_acquire_event ( void * data ,
struct event * event __used ,
int cpu __used ,
u64 timestamp __used ,
struct thread * thread __used )
{
struct trace_acquire_event acquire_event ;
u64 tmp ; /* this is required for casting... */
tmp = raw_field_value ( event , " lockdep_addr " , data ) ;
memcpy ( & acquire_event . addr , & tmp , sizeof ( void * ) ) ;
acquire_event . name = ( char * ) raw_field_ptr ( event , " name " , data ) ;
2010-04-21 16:23:54 +04:00
acquire_event . flag = ( int ) raw_field_value ( event , " flag " , data ) ;
2010-01-30 14:43:33 +03:00
2010-01-31 10:27:58 +03:00
if ( trace_handler - > acquire_event )
trace_handler - > acquire_event ( & acquire_event , event , cpu , timestamp , thread ) ;
2010-01-30 14:43:33 +03:00
}
static void
process_lock_acquired_event ( void * data ,
struct event * event __used ,
int cpu __used ,
u64 timestamp __used ,
struct thread * thread __used )
{
struct trace_acquired_event acquired_event ;
u64 tmp ; /* this is required for casting... */
tmp = raw_field_value ( event , " lockdep_addr " , data ) ;
memcpy ( & acquired_event . addr , & tmp , sizeof ( void * ) ) ;
acquired_event . name = ( char * ) raw_field_ptr ( event , " name " , data ) ;
2010-01-31 10:27:58 +03:00
if ( trace_handler - > acquire_event )
trace_handler - > acquired_event ( & acquired_event , event , cpu , timestamp , thread ) ;
2010-01-30 14:43:33 +03:00
}
static void
process_lock_contended_event ( void * data ,
struct event * event __used ,
int cpu __used ,
u64 timestamp __used ,
struct thread * thread __used )
{
struct trace_contended_event contended_event ;
u64 tmp ; /* this is required for casting... */
tmp = raw_field_value ( event , " lockdep_addr " , data ) ;
memcpy ( & contended_event . addr , & tmp , sizeof ( void * ) ) ;
contended_event . name = ( char * ) raw_field_ptr ( event , " name " , data ) ;
2010-01-31 10:27:58 +03:00
if ( trace_handler - > acquire_event )
trace_handler - > contended_event ( & contended_event , event , cpu , timestamp , thread ) ;
2010-01-30 14:43:33 +03:00
}
static void
process_lock_release_event ( void * data ,
struct event * event __used ,
int cpu __used ,
u64 timestamp __used ,
struct thread * thread __used )
{
struct trace_release_event release_event ;
u64 tmp ; /* this is required for casting... */
tmp = raw_field_value ( event , " lockdep_addr " , data ) ;
memcpy ( & release_event . addr , & tmp , sizeof ( void * ) ) ;
release_event . name = ( char * ) raw_field_ptr ( event , " name " , data ) ;
2010-01-31 10:27:58 +03:00
if ( trace_handler - > acquire_event )
trace_handler - > release_event ( & release_event , event , cpu , timestamp , thread ) ;
2010-01-30 14:43:33 +03:00
}
static void
2010-04-24 02:04:12 +04:00
process_raw_event ( void * data , int cpu , u64 timestamp , struct thread * thread )
2010-01-30 14:43:33 +03:00
{
struct event * event ;
int type ;
type = trace_parse_common_type ( data ) ;
event = trace_find_event ( type ) ;
if ( ! strcmp ( event - > name , " lock_acquire " ) )
process_lock_acquire_event ( data , event , cpu , timestamp , thread ) ;
if ( ! strcmp ( event - > name , " lock_acquired " ) )
process_lock_acquired_event ( data , event , cpu , timestamp , thread ) ;
if ( ! strcmp ( event - > name , " lock_contended " ) )
process_lock_contended_event ( data , event , cpu , timestamp , thread ) ;
if ( ! strcmp ( event - > name , " lock_release " ) )
process_lock_release_event ( data , event , cpu , timestamp , thread ) ;
}
2010-05-06 01:47:28 +04:00
static void print_bad_events ( int bad , int total )
{
/* Output for debug, this have to be removed */
int i ;
const char * name [ 4 ] =
{ " acquire " , " acquired " , " contended " , " release " } ;
pr_info ( " \n === output for debug=== \n \n " ) ;
2010-05-06 06:55:22 +04:00
pr_info ( " bad: %d, total: %d \n " , bad , total ) ;
pr_info ( " bad rate: %f %% \n " , ( double ) bad / ( double ) total * 100 ) ;
2010-05-06 01:47:28 +04:00
pr_info ( " histogram of events caused bad sequence \n " ) ;
for ( i = 0 ; i < BROKEN_MAX ; i + + )
pr_info ( " %10s: %d \n " , name [ i ] , bad_hist [ i ] ) ;
}
2010-01-30 14:43:33 +03:00
/* TODO: various way to print, coloring, nano or milli sec */
static void print_result ( void )
{
struct lock_stat * st ;
char cut_name [ 20 ] ;
2010-04-21 16:23:54 +04:00
int bad , total ;
2010-01-30 14:43:33 +03:00
2010-05-03 09:12:00 +04:00
pr_info ( " %20s " , " Name " ) ;
pr_info ( " %10s " , " acquired " ) ;
pr_info ( " %10s " , " contended " ) ;
2010-01-30 14:43:33 +03:00
2010-05-03 09:12:00 +04:00
pr_info ( " %15s " , " total wait (ns) " ) ;
pr_info ( " %15s " , " max wait (ns) " ) ;
pr_info ( " %15s " , " min wait (ns) " ) ;
2010-01-30 14:43:33 +03:00
2010-05-03 09:12:00 +04:00
pr_info ( " \n \n " ) ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
bad = total = 0 ;
2010-01-30 14:43:33 +03:00
while ( ( st = pop_from_result ( ) ) ) {
2010-04-21 16:23:54 +04:00
total + + ;
if ( st - > discard ) {
bad + + ;
continue ;
}
2010-01-30 14:43:33 +03:00
bzero ( cut_name , 20 ) ;
if ( strlen ( st - > name ) < 16 ) {
/* output raw name */
2010-05-03 09:12:00 +04:00
pr_info ( " %20s " , st - > name ) ;
2010-01-30 14:43:33 +03:00
} else {
strncpy ( cut_name , st - > name , 16 ) ;
cut_name [ 16 ] = ' . ' ;
cut_name [ 17 ] = ' . ' ;
cut_name [ 18 ] = ' . ' ;
cut_name [ 19 ] = ' \0 ' ;
/* cut off name for saving output style */
2010-05-03 09:12:00 +04:00
pr_info ( " %20s " , cut_name ) ;
2010-01-30 14:43:33 +03:00
}
2010-05-03 09:12:00 +04:00
pr_info ( " %10u " , st - > nr_acquired ) ;
pr_info ( " %10u " , st - > nr_contended ) ;
2010-01-30 14:43:33 +03:00
2010-05-03 09:12:00 +04:00
pr_info ( " %15llu " , st - > wait_time_total ) ;
pr_info ( " %15llu " , st - > wait_time_max ) ;
pr_info ( " %15llu " , st - > wait_time_min = = ULLONG_MAX ?
2010-01-30 14:43:33 +03:00
0 : st - > wait_time_min ) ;
2010-05-03 09:12:00 +04:00
pr_info ( " \n " ) ;
2010-01-30 14:43:33 +03:00
}
2010-04-21 16:23:54 +04:00
2010-05-06 01:47:28 +04:00
print_bad_events ( bad , total ) ;
2010-01-30 14:43:33 +03:00
}
2010-05-03 09:12:00 +04:00
static int info_threads ;
static int info_map ;
static void dump_threads ( void )
{
struct thread_stat * st ;
struct rb_node * node ;
struct thread * t ;
pr_info ( " %10s: comm \n " , " Thread ID " ) ;
node = rb_first ( & thread_stats ) ;
while ( node ) {
st = container_of ( node , struct thread_stat , rb ) ;
t = perf_session__findnew ( session , st - > tid ) ;
pr_info ( " %10d: %s \n " , st - > tid , t - > comm ) ;
node = rb_next ( node ) ;
} ;
}
2010-01-30 14:43:33 +03:00
static void dump_map ( void )
{
unsigned int i ;
struct lock_stat * st ;
2010-05-03 09:12:00 +04:00
pr_info ( " Address of instance: name of class \n " ) ;
2010-01-30 14:43:33 +03:00
for ( i = 0 ; i < LOCKHASH_SIZE ; i + + ) {
list_for_each_entry ( st , & lockhash_table [ i ] , hash_entry ) {
2010-05-03 09:12:00 +04:00
pr_info ( " %p: %s \n " , st - > addr , st - > name ) ;
2010-01-30 14:43:33 +03:00
}
}
}
2010-05-03 09:12:00 +04:00
static void dump_info ( void )
{
if ( info_threads )
dump_threads ( ) ;
else if ( info_map )
dump_map ( ) ;
else
die ( " Unknown type of information \n " ) ;
}
2010-04-24 02:04:12 +04:00
static int process_sample_event ( event_t * self , struct perf_session * s )
{
struct sample_data data ;
struct thread * thread ;
bzero ( & data , sizeof ( data ) ) ;
event__parse_sample ( self , s - > sample_type , & data ) ;
thread = perf_session__findnew ( s , data . tid ) ;
if ( thread = = NULL ) {
pr_debug ( " problem processing %d event, skipping it. \n " ,
self - > header . type ) ;
return - 1 ;
}
process_raw_event ( data . raw_data , data . cpu , data . time , thread ) ;
return 0 ;
}
2010-01-30 14:43:33 +03:00
static struct perf_event_ops eops = {
2010-01-31 10:27:58 +03:00
. sample = process_sample_event ,
. comm = event__process_comm ,
2010-04-24 02:04:12 +04:00
. ordered_samples = true ,
2010-01-30 14:43:33 +03:00
} ;
static int read_events ( void )
{
2010-05-01 10:41:20 +04:00
session = perf_session__new ( input_name , O_RDONLY , 0 , false ) ;
2010-01-30 14:43:33 +03:00
if ( ! session )
die ( " Initializing perf session failed \n " ) ;
return perf_session__process_events ( session , & eops ) ;
}
static void sort_result ( void )
{
unsigned int i ;
struct lock_stat * st ;
for ( i = 0 ; i < LOCKHASH_SIZE ; i + + ) {
list_for_each_entry ( st , & lockhash_table [ i ] , hash_entry ) {
insert_to_result ( st , compare ) ;
}
}
}
2010-01-31 10:27:58 +03:00
static void __cmd_report ( void )
2010-01-30 14:43:33 +03:00
{
setup_pager ( ) ;
select_key ( ) ;
read_events ( ) ;
sort_result ( ) ;
print_result ( ) ;
}
2010-01-31 10:27:58 +03:00
static const char * const report_usage [ ] = {
" perf lock report [<options>] " ,
2010-01-30 14:43:33 +03:00
NULL
} ;
2010-01-31 10:27:58 +03:00
static const struct option report_options [ ] = {
2010-01-30 14:43:33 +03:00
OPT_STRING ( ' k ' , " key " , & sort_key , " acquired " ,
" key for sorting " ) ,
/* TODO: type */
OPT_END ( )
} ;
2010-05-03 09:12:00 +04:00
static const char * const info_usage [ ] = {
" perf lock info [<options>] " ,
NULL
} ;
static const struct option info_options [ ] = {
OPT_BOOLEAN ( ' t ' , " threads " , & info_threads ,
" dump thread list in perf.data " ) ,
OPT_BOOLEAN ( ' m ' , " map " , & info_map ,
" map of lock instances (name:address table) " ) ,
OPT_END ( )
} ;
2010-01-30 14:43:33 +03:00
static const char * const lock_usage [ ] = {
2010-01-31 10:27:58 +03:00
" perf lock [<options>] {record|trace|report} " ,
2010-01-30 14:43:33 +03:00
NULL
} ;
static const struct option lock_options [ ] = {
2010-01-31 10:27:58 +03:00
OPT_STRING ( ' i ' , " input " , & input_name , " file " , " input file name " ) ,
2010-04-13 12:37:33 +04:00
OPT_INCR ( ' v ' , " verbose " , & verbose , " be more verbose (show symbol address, etc) " ) ,
2010-01-31 10:27:58 +03:00
OPT_BOOLEAN ( ' D ' , " dump-raw-trace " , & dump_trace , " dump raw trace in ASCII " ) ,
2010-01-30 14:43:33 +03:00
OPT_END ( )
} ;
static const char * record_args [ ] = {
" record " ,
" -a " ,
" -R " ,
" -f " ,
" -m " , " 1024 " ,
" -c " , " 1 " ,
" -e " , " lock:lock_acquire:r " ,
" -e " , " lock:lock_acquired:r " ,
" -e " , " lock:lock_contended:r " ,
" -e " , " lock:lock_release:r " ,
} ;
static int __cmd_record ( int argc , const char * * argv )
{
unsigned int rec_argc , i , j ;
const char * * rec_argv ;
rec_argc = ARRAY_SIZE ( record_args ) + argc - 1 ;
rec_argv = calloc ( rec_argc + 1 , sizeof ( char * ) ) ;
for ( i = 0 ; i < ARRAY_SIZE ( record_args ) ; i + + )
rec_argv [ i ] = strdup ( record_args [ i ] ) ;
for ( j = 1 ; j < ( unsigned int ) argc ; j + + , i + + )
rec_argv [ i ] = argv [ j ] ;
BUG_ON ( i ! = rec_argc ) ;
return cmd_record ( i , rec_argv , NULL ) ;
}
int cmd_lock ( int argc , const char * * argv , const char * prefix __used )
{
unsigned int i ;
symbol__init ( ) ;
for ( i = 0 ; i < LOCKHASH_SIZE ; i + + )
INIT_LIST_HEAD ( lockhash_table + i ) ;
argc = parse_options ( argc , argv , lock_options , lock_usage ,
PARSE_OPT_STOP_AT_NON_OPTION ) ;
if ( ! argc )
usage_with_options ( lock_usage , lock_options ) ;
if ( ! strncmp ( argv [ 0 ] , " rec " , 3 ) ) {
return __cmd_record ( argc , argv ) ;
2010-01-31 10:27:58 +03:00
} else if ( ! strncmp ( argv [ 0 ] , " report " , 6 ) ) {
trace_handler = & report_lock_ops ;
2010-01-30 14:43:33 +03:00
if ( argc ) {
argc = parse_options ( argc , argv ,
2010-01-31 10:27:58 +03:00
report_options , report_usage , 0 ) ;
2010-01-30 14:43:33 +03:00
if ( argc )
2010-01-31 10:27:58 +03:00
usage_with_options ( report_usage , report_options ) ;
2010-01-30 14:43:33 +03:00
}
2010-01-31 10:27:58 +03:00
__cmd_report ( ) ;
2010-01-30 14:43:33 +03:00
} else if ( ! strcmp ( argv [ 0 ] , " trace " ) ) {
/* Aliased to 'perf trace' */
return cmd_trace ( argc , argv , prefix ) ;
2010-05-03 09:12:00 +04:00
} else if ( ! strcmp ( argv [ 0 ] , " info " ) ) {
if ( argc ) {
argc = parse_options ( argc , argv ,
info_options , info_usage , 0 ) ;
if ( argc )
usage_with_options ( info_usage , info_options ) ;
}
2010-01-31 10:27:58 +03:00
/* recycling report_lock_ops */
trace_handler = & report_lock_ops ;
2010-01-30 14:43:33 +03:00
setup_pager ( ) ;
read_events ( ) ;
2010-05-03 09:12:00 +04:00
dump_info ( ) ;
2010-01-30 14:43:33 +03:00
} else {
usage_with_options ( lock_usage , lock_options ) ;
}
return 0 ;
}