2010-01-30 14:43:33 +03:00
# include "builtin.h"
# include "perf.h"
2012-09-24 17:52:12 +04:00
# include "util/evlist.h"
2012-08-07 16:58:03 +04:00
# include "util/evsel.h"
2010-01-30 14:43:33 +03:00
# include "util/util.h"
# include "util/cache.h"
# include "util/symbol.h"
# include "util/thread.h"
# include "util/header.h"
# include "util/parse-options.h"
# include "util/trace-event.h"
# include "util/debug.h"
# include "util/session.h"
2011-11-28 14:30:20 +04:00
# include "util/tool.h"
2013-10-15 18:27:32 +04:00
# include "util/data.h"
2010-01-30 14:43:33 +03:00
# include <sys/types.h>
# include <sys/prctl.h>
# include <semaphore.h>
# include <pthread.h>
# include <math.h>
# include <limits.h>
# include <linux/list.h>
# include <linux/hash.h>
2010-04-21 16:23:54 +04:00
static struct perf_session * session ;
2010-01-30 14:43:33 +03:00
/* based on kernel/lockdep.c */
# define LOCKHASH_BITS 12
# define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
static struct list_head lockhash_table [ LOCKHASH_SIZE ] ;
# define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
# define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
struct lock_stat {
2010-01-31 10:27:58 +03:00
struct list_head hash_entry ;
struct rb_node rb ; /* used for sorting */
2010-01-30 14:43:33 +03:00
2010-01-31 10:27:58 +03:00
/*
2012-09-24 17:52:12 +04:00
* FIXME : perf_evsel__intval ( ) returns u64 ,
2010-01-30 14:43:33 +03:00
* so address of lockdep_map should be dealed as 64 bit .
2010-01-31 10:27:58 +03:00
* Is there more better solution ?
*/
void * addr ; /* address of lockdep_map, used as ID */
char * name ; /* for strcpy(), we cannot use const */
2010-01-30 14:43:33 +03:00
2010-01-31 10:27:58 +03:00
unsigned int nr_acquire ;
2010-04-21 16:23:54 +04:00
unsigned int nr_acquired ;
2010-01-31 10:27:58 +03:00
unsigned int nr_contended ;
unsigned int nr_release ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
unsigned int nr_readlock ;
unsigned int nr_trylock ;
2013-09-09 06:19:19 +04:00
2010-01-30 14:43:33 +03:00
/* these times are in nano sec. */
2013-09-09 06:19:19 +04:00
u64 avg_wait_time ;
2010-01-31 10:27:58 +03:00
u64 wait_time_total ;
u64 wait_time_min ;
u64 wait_time_max ;
2010-04-21 16:23:54 +04:00
int discard ; /* flag of blacklist */
} ;
/*
* States of lock_seq_stat
*
* UNINITIALIZED is required for detecting first event of acquire .
* As the nature of lock events , there is no guarantee
* that the first event for the locks are acquire ,
* it can be acquired , contended or release .
*/
# define SEQ_STATE_UNINITIALIZED 0 /* initial state */
# define SEQ_STATE_RELEASED 1
# define SEQ_STATE_ACQUIRING 2
# define SEQ_STATE_ACQUIRED 3
# define SEQ_STATE_READ_ACQUIRED 4
# define SEQ_STATE_CONTENDED 5
/*
* MAX_LOCK_DEPTH
* Imported from include / linux / sched . h .
* Should this be synchronized ?
*/
# define MAX_LOCK_DEPTH 48
/*
* struct lock_seq_stat :
* Place to put on state of one lock sequence
* 1 ) acquire - > acquired - > release
* 2 ) acquire - > contended - > acquired - > release
* 3 ) acquire ( with read or try ) - > release
* 4 ) Are there other patterns ?
*/
struct lock_seq_stat {
struct list_head list ;
int state ;
u64 prev_event_time ;
void * addr ;
int read_count ;
2010-01-30 14:43:33 +03:00
} ;
2010-04-21 16:23:54 +04:00
struct thread_stat {
struct rb_node rb ;
u32 tid ;
struct list_head seq_list ;
} ;
static struct rb_root thread_stats ;
static struct thread_stat * thread_stat_find ( u32 tid )
{
struct rb_node * node ;
struct thread_stat * st ;
node = thread_stats . rb_node ;
while ( node ) {
st = container_of ( node , struct thread_stat , rb ) ;
if ( st - > tid = = tid )
return st ;
else if ( tid < st - > tid )
node = node - > rb_left ;
else
node = node - > rb_right ;
}
return NULL ;
}
static void thread_stat_insert ( struct thread_stat * new )
{
struct rb_node * * rb = & thread_stats . rb_node ;
struct rb_node * parent = NULL ;
struct thread_stat * p ;
while ( * rb ) {
p = container_of ( * rb , struct thread_stat , rb ) ;
parent = * rb ;
if ( new - > tid < p - > tid )
rb = & ( * rb ) - > rb_left ;
else if ( new - > tid > p - > tid )
rb = & ( * rb ) - > rb_right ;
else
BUG_ON ( " inserting invalid thread_stat \n " ) ;
}
rb_link_node ( & new - > rb , parent , rb ) ;
rb_insert_color ( & new - > rb , & thread_stats ) ;
}
static struct thread_stat * thread_stat_findnew_after_first ( u32 tid )
{
struct thread_stat * st ;
st = thread_stat_find ( tid ) ;
if ( st )
return st ;
st = zalloc ( sizeof ( struct thread_stat ) ) ;
2012-08-26 22:24:43 +04:00
if ( ! st ) {
pr_err ( " memory allocation failed \n " ) ;
return NULL ;
}
2010-04-21 16:23:54 +04:00
st - > tid = tid ;
INIT_LIST_HEAD ( & st - > seq_list ) ;
thread_stat_insert ( st ) ;
return st ;
}
static struct thread_stat * thread_stat_findnew_first ( u32 tid ) ;
static struct thread_stat * ( * thread_stat_findnew ) ( u32 tid ) =
thread_stat_findnew_first ;
static struct thread_stat * thread_stat_findnew_first ( u32 tid )
{
struct thread_stat * st ;
st = zalloc ( sizeof ( struct thread_stat ) ) ;
2012-08-26 22:24:43 +04:00
if ( ! st ) {
pr_err ( " memory allocation failed \n " ) ;
return NULL ;
}
2010-04-21 16:23:54 +04:00
st - > tid = tid ;
INIT_LIST_HEAD ( & st - > seq_list ) ;
rb_link_node ( & st - > rb , NULL , & thread_stats . rb_node ) ;
rb_insert_color ( & st - > rb , & thread_stats ) ;
thread_stat_findnew = thread_stat_findnew_after_first ;
return st ;
}
2010-01-30 14:43:33 +03:00
/* build simple key function one is bigger than two */
2010-01-31 10:27:58 +03:00
# define SINGLE_KEY(member) \
2010-01-30 14:43:33 +03:00
static int lock_stat_key_ # # member ( struct lock_stat * one , \
struct lock_stat * two ) \
{ \
return one - > member > two - > member ; \
}
SINGLE_KEY ( nr_acquired )
SINGLE_KEY ( nr_contended )
2013-09-09 06:19:19 +04:00
SINGLE_KEY ( avg_wait_time )
2010-01-30 14:43:33 +03:00
SINGLE_KEY ( wait_time_total )
SINGLE_KEY ( wait_time_max )
2011-02-22 20:47:15 +03:00
static int lock_stat_key_wait_time_min ( struct lock_stat * one ,
struct lock_stat * two )
{
u64 s1 = one - > wait_time_min ;
u64 s2 = two - > wait_time_min ;
if ( s1 = = ULLONG_MAX )
s1 = 0 ;
if ( s2 = = ULLONG_MAX )
s2 = 0 ;
return s1 > s2 ;
}
2010-01-30 14:43:33 +03:00
struct lock_key {
/*
* name : the value for specify by user
* this should be simpler than raw name of member
* e . g . nr_acquired - > acquired , wait_time_total - > wait_total
*/
2010-01-31 10:27:58 +03:00
const char * name ;
int ( * key ) ( struct lock_stat * , struct lock_stat * ) ;
2010-01-30 14:43:33 +03:00
} ;
2010-01-31 10:27:58 +03:00
static const char * sort_key = " acquired " ;
static int ( * compare ) ( struct lock_stat * , struct lock_stat * ) ;
static struct rb_root result ; /* place to store sorted data */
2010-01-30 14:43:33 +03:00
# define DEF_KEY_LOCK(name, fn_suffix) \
{ # name , lock_stat_key_ # # fn_suffix }
struct lock_key keys [ ] = {
DEF_KEY_LOCK ( acquired , nr_acquired ) ,
DEF_KEY_LOCK ( contended , nr_contended ) ,
2013-09-09 06:19:19 +04:00
DEF_KEY_LOCK ( avg_wait , avg_wait_time ) ,
2010-01-30 14:43:33 +03:00
DEF_KEY_LOCK ( wait_total , wait_time_total ) ,
DEF_KEY_LOCK ( wait_min , wait_time_min ) ,
DEF_KEY_LOCK ( wait_max , wait_time_max ) ,
/* extra comparisons much complicated should be here */
{ NULL , NULL }
} ;
2012-08-26 22:24:43 +04:00
static int select_key ( void )
2010-01-30 14:43:33 +03:00
{
int i ;
for ( i = 0 ; keys [ i ] . name ; i + + ) {
if ( ! strcmp ( keys [ i ] . name , sort_key ) ) {
compare = keys [ i ] . key ;
2012-08-26 22:24:43 +04:00
return 0 ;
2010-01-30 14:43:33 +03:00
}
}
2012-08-26 22:24:43 +04:00
pr_err ( " Unknown compare key: %s \n " , sort_key ) ;
return - 1 ;
2010-01-30 14:43:33 +03:00
}
static void insert_to_result ( struct lock_stat * st ,
2010-01-31 10:27:58 +03:00
int ( * bigger ) ( struct lock_stat * , struct lock_stat * ) )
2010-01-30 14:43:33 +03:00
{
struct rb_node * * rb = & result . rb_node ;
struct rb_node * parent = NULL ;
struct lock_stat * p ;
while ( * rb ) {
p = container_of ( * rb , struct lock_stat , rb ) ;
parent = * rb ;
if ( bigger ( st , p ) )
rb = & ( * rb ) - > rb_left ;
else
rb = & ( * rb ) - > rb_right ;
}
rb_link_node ( & st - > rb , parent , rb ) ;
rb_insert_color ( & st - > rb , & result ) ;
}
/* returns left most element of result, and erase it */
static struct lock_stat * pop_from_result ( void )
{
struct rb_node * node = result . rb_node ;
if ( ! node )
return NULL ;
while ( node - > rb_left )
node = node - > rb_left ;
rb_erase ( node , & result ) ;
return container_of ( node , struct lock_stat , rb ) ;
}
2010-01-31 10:27:58 +03:00
static struct lock_stat * lock_stat_findnew ( void * addr , const char * name )
2010-01-30 14:43:33 +03:00
{
struct list_head * entry = lockhashentry ( addr ) ;
struct lock_stat * ret , * new ;
list_for_each_entry ( ret , entry , hash_entry ) {
if ( ret - > addr = = addr )
return ret ;
}
new = zalloc ( sizeof ( struct lock_stat ) ) ;
if ( ! new )
goto alloc_failed ;
new - > addr = addr ;
new - > name = zalloc ( sizeof ( char ) * strlen ( name ) + 1 ) ;
2013-09-09 06:19:15 +04:00
if ( ! new - > name ) {
free ( new ) ;
2010-01-30 14:43:33 +03:00
goto alloc_failed ;
2013-09-09 06:19:15 +04:00
}
2010-01-30 14:43:33 +03:00
2013-09-09 06:19:15 +04:00
strcpy ( new - > name , name ) ;
2010-01-30 14:43:33 +03:00
new - > wait_time_min = ULLONG_MAX ;
list_add ( & new - > hash_entry , entry ) ;
return new ;
alloc_failed :
2012-08-26 22:24:43 +04:00
pr_err ( " memory allocation failed \n " ) ;
return NULL ;
2010-01-30 14:43:33 +03:00
}
struct trace_lock_handler {
2012-09-24 17:52:12 +04:00
int ( * acquire_event ) ( struct perf_evsel * evsel ,
struct perf_sample * sample ) ;
2010-01-30 14:43:33 +03:00
2012-09-24 17:52:12 +04:00
int ( * acquired_event ) ( struct perf_evsel * evsel ,
struct perf_sample * sample ) ;
2010-01-30 14:43:33 +03:00
2012-09-24 17:52:12 +04:00
int ( * contended_event ) ( struct perf_evsel * evsel ,
struct perf_sample * sample ) ;
2010-01-30 14:43:33 +03:00
2012-09-24 17:52:12 +04:00
int ( * release_event ) ( struct perf_evsel * evsel ,
struct perf_sample * sample ) ;
2010-01-30 14:43:33 +03:00
} ;
2010-04-21 16:23:54 +04:00
static struct lock_seq_stat * get_seq ( struct thread_stat * ts , void * addr )
{
struct lock_seq_stat * seq ;
list_for_each_entry ( seq , & ts - > seq_list , list ) {
if ( seq - > addr = = addr )
return seq ;
}
seq = zalloc ( sizeof ( struct lock_seq_stat ) ) ;
2012-08-26 22:24:43 +04:00
if ( ! seq ) {
pr_err ( " memory allocation failed \n " ) ;
return NULL ;
}
2010-04-21 16:23:54 +04:00
seq - > state = SEQ_STATE_UNINITIALIZED ;
seq - > addr = addr ;
list_add ( & seq - > list , & ts - > seq_list ) ;
return seq ;
}
2010-05-06 01:47:28 +04:00
enum broken_state {
BROKEN_ACQUIRE ,
BROKEN_ACQUIRED ,
BROKEN_CONTENDED ,
BROKEN_RELEASE ,
BROKEN_MAX ,
} ;
static int bad_hist [ BROKEN_MAX ] ;
2010-04-21 16:23:54 +04:00
2010-05-06 01:57:25 +04:00
enum acquire_flags {
TRY_LOCK = 1 ,
READ_LOCK = 2 ,
} ;
2012-09-24 17:52:12 +04:00
static int report_lock_acquire_event ( struct perf_evsel * evsel ,
struct perf_sample * sample )
2010-01-30 14:43:33 +03:00
{
2012-09-24 17:52:12 +04:00
void * addr ;
2010-04-21 16:23:54 +04:00
struct lock_stat * ls ;
struct thread_stat * ts ;
struct lock_seq_stat * seq ;
2012-09-24 17:52:12 +04:00
const char * name = perf_evsel__strval ( evsel , sample , " name " ) ;
u64 tmp = perf_evsel__intval ( evsel , sample , " lockdep_addr " ) ;
int flag = perf_evsel__intval ( evsel , sample , " flag " ) ;
2010-04-21 16:23:54 +04:00
2012-09-24 17:52:12 +04:00
memcpy ( & addr , & tmp , sizeof ( void * ) ) ;
ls = lock_stat_findnew ( addr , name ) ;
2012-08-26 22:24:43 +04:00
if ( ! ls )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2010-04-21 16:23:54 +04:00
if ( ls - > discard )
2012-08-26 22:24:43 +04:00
return 0 ;
2010-01-30 14:43:33 +03:00
2012-08-07 17:59:44 +04:00
ts = thread_stat_findnew ( sample - > tid ) ;
2012-08-26 22:24:43 +04:00
if ( ! ts )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2012-08-26 22:24:43 +04:00
2012-09-24 17:52:12 +04:00
seq = get_seq ( ts , addr ) ;
2012-08-26 22:24:43 +04:00
if ( ! seq )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
switch ( seq - > state ) {
case SEQ_STATE_UNINITIALIZED :
case SEQ_STATE_RELEASED :
2012-09-24 17:52:12 +04:00
if ( ! flag ) {
2010-04-21 16:23:54 +04:00
seq - > state = SEQ_STATE_ACQUIRING ;
} else {
2012-09-24 17:52:12 +04:00
if ( flag & TRY_LOCK )
2010-04-21 16:23:54 +04:00
ls - > nr_trylock + + ;
2012-09-24 17:52:12 +04:00
if ( flag & READ_LOCK )
2010-04-21 16:23:54 +04:00
ls - > nr_readlock + + ;
seq - > state = SEQ_STATE_READ_ACQUIRED ;
seq - > read_count = 1 ;
ls - > nr_acquired + + ;
}
break ;
case SEQ_STATE_READ_ACQUIRED :
2012-09-24 17:52:12 +04:00
if ( flag & READ_LOCK ) {
2010-04-21 16:23:54 +04:00
seq - > read_count + + ;
ls - > nr_acquired + + ;
goto end ;
} else {
goto broken ;
}
2010-01-30 14:43:33 +03:00
break ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_ACQUIRED :
case SEQ_STATE_ACQUIRING :
case SEQ_STATE_CONTENDED :
broken :
/* broken lock sequence, discard it */
ls - > discard = 1 ;
2010-05-06 01:47:28 +04:00
bad_hist [ BROKEN_ACQUIRE ] + + ;
2010-04-21 16:23:54 +04:00
list_del ( & seq - > list ) ;
free ( seq ) ;
goto end ;
2010-01-30 14:43:33 +03:00
default :
2010-04-21 16:23:54 +04:00
BUG_ON ( " Unknown state of lock sequence found! \n " ) ;
2010-01-30 14:43:33 +03:00
break ;
}
2010-04-21 16:23:54 +04:00
ls - > nr_acquire + + ;
2012-08-07 17:59:44 +04:00
seq - > prev_event_time = sample - > time ;
2010-04-21 16:23:54 +04:00
end :
2012-08-26 22:24:43 +04:00
return 0 ;
2010-01-30 14:43:33 +03:00
}
2012-09-24 17:52:12 +04:00
static int report_lock_acquired_event ( struct perf_evsel * evsel ,
struct perf_sample * sample )
2010-01-30 14:43:33 +03:00
{
2012-09-24 17:52:12 +04:00
void * addr ;
2010-04-21 16:23:54 +04:00
struct lock_stat * ls ;
struct thread_stat * ts ;
struct lock_seq_stat * seq ;
u64 contended_term ;
2012-09-24 17:52:12 +04:00
const char * name = perf_evsel__strval ( evsel , sample , " name " ) ;
u64 tmp = perf_evsel__intval ( evsel , sample , " lockdep_addr " ) ;
memcpy ( & addr , & tmp , sizeof ( void * ) ) ;
2010-01-30 14:43:33 +03:00
2012-09-24 17:52:12 +04:00
ls = lock_stat_findnew ( addr , name ) ;
2012-08-26 22:24:43 +04:00
if ( ! ls )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2010-04-21 16:23:54 +04:00
if ( ls - > discard )
2012-08-26 22:24:43 +04:00
return 0 ;
2010-04-21 16:23:54 +04:00
2012-08-07 17:59:44 +04:00
ts = thread_stat_findnew ( sample - > tid ) ;
2012-08-26 22:24:43 +04:00
if ( ! ts )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2012-08-26 22:24:43 +04:00
2012-09-24 17:52:12 +04:00
seq = get_seq ( ts , addr ) ;
2012-08-26 22:24:43 +04:00
if ( ! seq )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
switch ( seq - > state ) {
case SEQ_STATE_UNINITIALIZED :
/* orphan event, do nothing */
2012-08-26 22:24:43 +04:00
return 0 ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_ACQUIRING :
2010-01-30 14:43:33 +03:00
break ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_CONTENDED :
2012-09-24 17:52:12 +04:00
contended_term = sample - > time - seq - > prev_event_time ;
2010-04-21 16:23:54 +04:00
ls - > wait_time_total + = contended_term ;
if ( contended_term < ls - > wait_time_min )
ls - > wait_time_min = contended_term ;
2010-05-07 04:33:42 +04:00
if ( ls - > wait_time_max < contended_term )
2010-04-21 16:23:54 +04:00
ls - > wait_time_max = contended_term ;
2010-01-30 14:43:33 +03:00
break ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_RELEASED :
case SEQ_STATE_ACQUIRED :
case SEQ_STATE_READ_ACQUIRED :
/* broken lock sequence, discard it */
ls - > discard = 1 ;
2010-05-06 01:47:28 +04:00
bad_hist [ BROKEN_ACQUIRED ] + + ;
2010-04-21 16:23:54 +04:00
list_del ( & seq - > list ) ;
free ( seq ) ;
goto end ;
2010-01-30 14:43:33 +03:00
default :
2010-04-21 16:23:54 +04:00
BUG_ON ( " Unknown state of lock sequence found! \n " ) ;
2010-01-30 14:43:33 +03:00
break ;
}
2010-04-21 16:23:54 +04:00
seq - > state = SEQ_STATE_ACQUIRED ;
ls - > nr_acquired + + ;
2013-09-09 06:19:19 +04:00
ls - > avg_wait_time = ls - > nr_contended ? ls - > wait_time_total / ls - > nr_contended : 0 ;
2012-09-24 17:52:12 +04:00
seq - > prev_event_time = sample - > time ;
2010-04-21 16:23:54 +04:00
end :
2012-08-26 22:24:43 +04:00
return 0 ;
2010-01-30 14:43:33 +03:00
}
2012-09-24 17:52:12 +04:00
static int report_lock_contended_event ( struct perf_evsel * evsel ,
struct perf_sample * sample )
2010-01-30 14:43:33 +03:00
{
2012-09-24 17:52:12 +04:00
void * addr ;
2010-04-21 16:23:54 +04:00
struct lock_stat * ls ;
struct thread_stat * ts ;
struct lock_seq_stat * seq ;
2012-09-24 17:52:12 +04:00
const char * name = perf_evsel__strval ( evsel , sample , " name " ) ;
u64 tmp = perf_evsel__intval ( evsel , sample , " lockdep_addr " ) ;
memcpy ( & addr , & tmp , sizeof ( void * ) ) ;
2010-04-21 16:23:54 +04:00
2012-09-24 17:52:12 +04:00
ls = lock_stat_findnew ( addr , name ) ;
2012-08-26 22:24:43 +04:00
if ( ! ls )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2010-04-21 16:23:54 +04:00
if ( ls - > discard )
2012-08-26 22:24:43 +04:00
return 0 ;
2010-01-30 14:43:33 +03:00
2012-08-07 17:59:44 +04:00
ts = thread_stat_findnew ( sample - > tid ) ;
2012-08-26 22:24:43 +04:00
if ( ! ts )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2012-08-26 22:24:43 +04:00
2012-09-24 17:52:12 +04:00
seq = get_seq ( ts , addr ) ;
2012-08-26 22:24:43 +04:00
if ( ! seq )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
switch ( seq - > state ) {
case SEQ_STATE_UNINITIALIZED :
/* orphan event, do nothing */
2012-08-26 22:24:43 +04:00
return 0 ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_ACQUIRING :
2010-01-30 14:43:33 +03:00
break ;
2010-04-21 16:23:54 +04:00
case SEQ_STATE_RELEASED :
case SEQ_STATE_ACQUIRED :
case SEQ_STATE_READ_ACQUIRED :
case SEQ_STATE_CONTENDED :
/* broken lock sequence, discard it */
ls - > discard = 1 ;
2010-05-06 01:47:28 +04:00
bad_hist [ BROKEN_CONTENDED ] + + ;
2010-04-21 16:23:54 +04:00
list_del ( & seq - > list ) ;
free ( seq ) ;
goto end ;
2010-01-30 14:43:33 +03:00
default :
2010-04-21 16:23:54 +04:00
BUG_ON ( " Unknown state of lock sequence found! \n " ) ;
2010-01-30 14:43:33 +03:00
break ;
}
2010-04-21 16:23:54 +04:00
seq - > state = SEQ_STATE_CONTENDED ;
ls - > nr_contended + + ;
2013-09-09 06:19:19 +04:00
ls - > avg_wait_time = ls - > wait_time_total / ls - > nr_contended ;
2012-08-07 17:59:44 +04:00
seq - > prev_event_time = sample - > time ;
2010-04-21 16:23:54 +04:00
end :
2012-08-26 22:24:43 +04:00
return 0 ;
2010-01-30 14:43:33 +03:00
}
2012-09-24 17:52:12 +04:00
static int report_lock_release_event ( struct perf_evsel * evsel ,
struct perf_sample * sample )
2010-01-30 14:43:33 +03:00
{
2012-09-24 17:52:12 +04:00
void * addr ;
2010-04-21 16:23:54 +04:00
struct lock_stat * ls ;
struct thread_stat * ts ;
struct lock_seq_stat * seq ;
2012-09-24 17:52:12 +04:00
const char * name = perf_evsel__strval ( evsel , sample , " name " ) ;
u64 tmp = perf_evsel__intval ( evsel , sample , " lockdep_addr " ) ;
2010-01-30 14:43:33 +03:00
2012-09-24 17:52:12 +04:00
memcpy ( & addr , & tmp , sizeof ( void * ) ) ;
ls = lock_stat_findnew ( addr , name ) ;
2012-08-26 22:24:43 +04:00
if ( ! ls )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2010-04-21 16:23:54 +04:00
if ( ls - > discard )
2012-08-26 22:24:43 +04:00
return 0 ;
2010-01-30 14:43:33 +03:00
2012-08-07 17:59:44 +04:00
ts = thread_stat_findnew ( sample - > tid ) ;
2012-08-26 22:24:43 +04:00
if ( ! ts )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2012-08-26 22:24:43 +04:00
2012-09-24 17:52:12 +04:00
seq = get_seq ( ts , addr ) ;
2012-08-26 22:24:43 +04:00
if ( ! seq )
2013-09-09 06:19:14 +04:00
return - ENOMEM ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
switch ( seq - > state ) {
case SEQ_STATE_UNINITIALIZED :
goto end ;
case SEQ_STATE_ACQUIRED :
break ;
case SEQ_STATE_READ_ACQUIRED :
seq - > read_count - - ;
BUG_ON ( seq - > read_count < 0 ) ;
if ( ! seq - > read_count ) {
ls - > nr_release + + ;
2010-01-30 14:43:33 +03:00
goto end ;
}
2010-04-21 16:23:54 +04:00
break ;
case SEQ_STATE_ACQUIRING :
case SEQ_STATE_CONTENDED :
case SEQ_STATE_RELEASED :
/* broken lock sequence, discard it */
ls - > discard = 1 ;
2010-05-06 01:47:28 +04:00
bad_hist [ BROKEN_RELEASE ] + + ;
2010-04-21 16:23:54 +04:00
goto free_seq ;
2010-01-30 14:43:33 +03:00
default :
2010-04-21 16:23:54 +04:00
BUG_ON ( " Unknown state of lock sequence found! \n " ) ;
2010-01-30 14:43:33 +03:00
break ;
}
2010-04-21 16:23:54 +04:00
ls - > nr_release + + ;
free_seq :
list_del ( & seq - > list ) ;
free ( seq ) ;
2010-01-30 14:43:33 +03:00
end :
2012-08-26 22:24:43 +04:00
return 0 ;
2010-01-30 14:43:33 +03:00
}
/* lock oriented handlers */
/* TODO: handlers for CPU oriented, thread oriented */
2010-01-31 10:27:58 +03:00
static struct trace_lock_handler report_lock_ops = {
. acquire_event = report_lock_acquire_event ,
. acquired_event = report_lock_acquired_event ,
. contended_event = report_lock_contended_event ,
. release_event = report_lock_release_event ,
2010-01-30 14:43:33 +03:00
} ;
static struct trace_lock_handler * trace_handler ;
2012-08-26 22:24:43 +04:00
static int perf_evsel__process_lock_acquire ( struct perf_evsel * evsel ,
2012-08-07 17:59:44 +04:00
struct perf_sample * sample )
2010-01-30 14:43:33 +03:00
{
2010-01-31 10:27:58 +03:00
if ( trace_handler - > acquire_event )
2012-09-24 17:52:12 +04:00
return trace_handler - > acquire_event ( evsel , sample ) ;
return 0 ;
2010-01-30 14:43:33 +03:00
}
2012-08-26 22:24:43 +04:00
static int perf_evsel__process_lock_acquired ( struct perf_evsel * evsel ,
2012-08-07 17:59:44 +04:00
struct perf_sample * sample )
2010-01-30 14:43:33 +03:00
{
2012-08-26 22:24:43 +04:00
if ( trace_handler - > acquired_event )
2012-09-24 17:52:12 +04:00
return trace_handler - > acquired_event ( evsel , sample ) ;
return 0 ;
2010-01-30 14:43:33 +03:00
}
2012-08-26 22:24:43 +04:00
static int perf_evsel__process_lock_contended ( struct perf_evsel * evsel ,
2012-09-24 17:52:12 +04:00
struct perf_sample * sample )
2010-01-30 14:43:33 +03:00
{
2012-08-26 22:24:43 +04:00
if ( trace_handler - > contended_event )
2012-09-24 17:52:12 +04:00
return trace_handler - > contended_event ( evsel , sample ) ;
return 0 ;
2010-01-30 14:43:33 +03:00
}
2012-08-26 22:24:43 +04:00
static int perf_evsel__process_lock_release ( struct perf_evsel * evsel ,
2012-09-24 17:52:12 +04:00
struct perf_sample * sample )
2010-01-30 14:43:33 +03:00
{
2012-08-26 22:24:43 +04:00
if ( trace_handler - > release_event )
2012-09-24 17:52:12 +04:00
return trace_handler - > release_event ( evsel , sample ) ;
return 0 ;
2010-01-30 14:43:33 +03:00
}
2010-05-06 01:47:28 +04:00
static void print_bad_events ( int bad , int total )
{
/* Output for debug, this have to be removed */
int i ;
const char * name [ 4 ] =
{ " acquire " , " acquired " , " contended " , " release " } ;
pr_info ( " \n === output for debug=== \n \n " ) ;
2010-05-06 06:55:22 +04:00
pr_info ( " bad: %d, total: %d \n " , bad , total ) ;
2013-09-09 06:19:18 +04:00
pr_info ( " bad rate: %.2f %% \n " , ( double ) bad / ( double ) total * 100 ) ;
2010-05-06 01:47:28 +04:00
pr_info ( " histogram of events caused bad sequence \n " ) ;
for ( i = 0 ; i < BROKEN_MAX ; i + + )
pr_info ( " %10s: %d \n " , name [ i ] , bad_hist [ i ] ) ;
}
2010-01-30 14:43:33 +03:00
/* TODO: various way to print, coloring, nano or milli sec */
static void print_result ( void )
{
struct lock_stat * st ;
char cut_name [ 20 ] ;
2010-04-21 16:23:54 +04:00
int bad , total ;
2010-01-30 14:43:33 +03:00
2010-05-03 09:12:00 +04:00
pr_info ( " %20s " , " Name " ) ;
pr_info ( " %10s " , " acquired " ) ;
pr_info ( " %10s " , " contended " ) ;
2010-01-30 14:43:33 +03:00
2013-09-09 06:19:19 +04:00
pr_info ( " %15s " , " avg wait (ns) " ) ;
2010-05-03 09:12:00 +04:00
pr_info ( " %15s " , " total wait (ns) " ) ;
pr_info ( " %15s " , " max wait (ns) " ) ;
pr_info ( " %15s " , " min wait (ns) " ) ;
2010-01-30 14:43:33 +03:00
2010-05-03 09:12:00 +04:00
pr_info ( " \n \n " ) ;
2010-01-30 14:43:33 +03:00
2010-04-21 16:23:54 +04:00
bad = total = 0 ;
2010-01-30 14:43:33 +03:00
while ( ( st = pop_from_result ( ) ) ) {
2010-04-21 16:23:54 +04:00
total + + ;
if ( st - > discard ) {
bad + + ;
continue ;
}
2010-01-30 14:43:33 +03:00
bzero ( cut_name , 20 ) ;
if ( strlen ( st - > name ) < 16 ) {
/* output raw name */
2010-05-03 09:12:00 +04:00
pr_info ( " %20s " , st - > name ) ;
2010-01-30 14:43:33 +03:00
} else {
strncpy ( cut_name , st - > name , 16 ) ;
cut_name [ 16 ] = ' . ' ;
cut_name [ 17 ] = ' . ' ;
cut_name [ 18 ] = ' . ' ;
cut_name [ 19 ] = ' \0 ' ;
/* cut off name for saving output style */
2010-05-03 09:12:00 +04:00
pr_info ( " %20s " , cut_name ) ;
2010-01-30 14:43:33 +03:00
}
2010-05-03 09:12:00 +04:00
pr_info ( " %10u " , st - > nr_acquired ) ;
pr_info ( " %10u " , st - > nr_contended ) ;
2010-01-30 14:43:33 +03:00
2013-09-09 06:19:19 +04:00
pr_info ( " %15 " PRIu64 " " , st - > avg_wait_time ) ;
2011-01-23 01:37:02 +03:00
pr_info ( " %15 " PRIu64 " " , st - > wait_time_total ) ;
pr_info ( " %15 " PRIu64 " " , st - > wait_time_max ) ;
pr_info ( " %15 " PRIu64 " " , st - > wait_time_min = = ULLONG_MAX ?
2010-01-30 14:43:33 +03:00
0 : st - > wait_time_min ) ;
2010-05-03 09:12:00 +04:00
pr_info ( " \n " ) ;
2010-01-30 14:43:33 +03:00
}
2010-04-21 16:23:54 +04:00
2010-05-06 01:47:28 +04:00
print_bad_events ( bad , total ) ;
2010-01-30 14:43:33 +03:00
}
2010-05-17 22:51:10 +04:00
static bool info_threads , info_map ;
2010-05-03 09:12:00 +04:00
static void dump_threads ( void )
{
struct thread_stat * st ;
struct rb_node * node ;
struct thread * t ;
pr_info ( " %10s: comm \n " , " Thread ID " ) ;
node = rb_first ( & thread_stats ) ;
while ( node ) {
st = container_of ( node , struct thread_stat , rb ) ;
t = perf_session__findnew ( session , st - > tid ) ;
2013-09-11 16:46:56 +04:00
pr_info ( " %10d: %s \n " , st - > tid , thread__comm_str ( t ) ) ;
2010-05-03 09:12:00 +04:00
node = rb_next ( node ) ;
} ;
}
2010-01-30 14:43:33 +03:00
static void dump_map ( void )
{
unsigned int i ;
struct lock_stat * st ;
2010-05-03 09:12:00 +04:00
pr_info ( " Address of instance: name of class \n " ) ;
2010-01-30 14:43:33 +03:00
for ( i = 0 ; i < LOCKHASH_SIZE ; i + + ) {
list_for_each_entry ( st , & lockhash_table [ i ] , hash_entry ) {
2010-05-03 09:12:00 +04:00
pr_info ( " %p: %s \n " , st - > addr , st - > name ) ;
2010-01-30 14:43:33 +03:00
}
}
}
2012-08-26 22:24:43 +04:00
static int dump_info ( void )
2010-05-03 09:12:00 +04:00
{
2012-08-26 22:24:43 +04:00
int rc = 0 ;
2010-05-03 09:12:00 +04:00
if ( info_threads )
dump_threads ( ) ;
else if ( info_map )
dump_map ( ) ;
2012-08-26 22:24:43 +04:00
else {
rc = - 1 ;
pr_err ( " Unknown type of information \n " ) ;
}
return rc ;
2010-05-03 09:12:00 +04:00
}
2012-09-24 17:52:12 +04:00
typedef int ( * tracepoint_handler ) ( struct perf_evsel * evsel ,
struct perf_sample * sample ) ;
2012-09-11 02:15:03 +04:00
static int process_sample_event ( struct perf_tool * tool __maybe_unused ,
2011-11-25 14:19:45 +04:00
union perf_event * event ,
2011-03-15 21:44:01 +03:00
struct perf_sample * sample ,
2012-08-07 16:58:03 +04:00
struct perf_evsel * evsel ,
2011-11-28 13:56:39 +04:00
struct machine * machine )
2010-04-24 02:04:12 +04:00
{
2013-08-27 12:23:03 +04:00
struct thread * thread = machine__findnew_thread ( machine , sample - > pid ,
sample - > tid ) ;
2010-04-24 02:04:12 +04:00
if ( thread = = NULL ) {
pr_debug ( " problem processing %d event, skipping it. \n " ,
2011-01-29 19:01:45 +03:00
event - > header . type ) ;
2010-04-24 02:04:12 +04:00
return - 1 ;
}
2013-11-06 17:17:38 +04:00
if ( evsel - > handler ! = NULL ) {
tracepoint_handler f = evsel - > handler ;
2012-09-24 17:52:12 +04:00
return f ( evsel , sample ) ;
}
return 0 ;
2010-04-24 02:04:12 +04:00
}
2013-09-09 06:19:16 +04:00
static void sort_result ( void )
{
unsigned int i ;
struct lock_stat * st ;
for ( i = 0 ; i < LOCKHASH_SIZE ; i + + ) {
list_for_each_entry ( st , & lockhash_table [ i ] , hash_entry ) {
insert_to_result ( st , compare ) ;
}
}
}
2012-09-24 17:52:12 +04:00
static const struct perf_evsel_str_handler lock_tracepoints [ ] = {
{ " lock:lock_acquire " , perf_evsel__process_lock_acquire , } , /* CONFIG_LOCKDEP */
{ " lock:lock_acquired " , perf_evsel__process_lock_acquired , } , /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ " lock:lock_contended " , perf_evsel__process_lock_contended , } , /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
{ " lock:lock_release " , perf_evsel__process_lock_release , } , /* CONFIG_LOCKDEP */
} ;
2013-09-09 06:19:16 +04:00
static int __cmd_report ( bool display_info )
2010-01-30 14:43:33 +03:00
{
2013-09-09 06:19:16 +04:00
int err = - EINVAL ;
2012-10-01 22:20:58 +04:00
struct perf_tool eops = {
. sample = process_sample_event ,
. comm = perf_event__process_comm ,
2014-07-06 16:18:21 +04:00
. ordered_events = true ,
2012-10-01 22:20:58 +04:00
} ;
2013-10-15 18:27:32 +04:00
struct perf_data_file file = {
. path = input_name ,
. mode = PERF_DATA_MODE_READ ,
} ;
2013-09-09 06:19:16 +04:00
2013-10-15 18:27:32 +04:00
session = perf_session__new ( & file , false , & eops ) ;
2012-08-26 22:24:43 +04:00
if ( ! session ) {
pr_err ( " Initializing perf session failed \n " ) ;
2013-09-09 06:19:16 +04:00
return - ENOMEM ;
2012-08-26 22:24:43 +04:00
}
2010-01-30 14:43:33 +03:00
2014-08-12 10:40:40 +04:00
symbol__init ( ) ;
2013-09-09 06:19:16 +04:00
if ( ! perf_session__has_traces ( session , " lock record " ) )
goto out_delete ;
2012-09-24 17:52:12 +04:00
if ( perf_session__set_tracepoints_handlers ( session , lock_tracepoints ) ) {
pr_err ( " Initializing perf session tracepoint handlers failed \n " ) ;
2013-09-09 06:19:16 +04:00
goto out_delete ;
2012-09-24 17:52:12 +04:00
}
2013-09-09 06:19:16 +04:00
if ( select_key ( ) )
goto out_delete ;
2010-01-30 14:43:33 +03:00
2013-09-09 06:19:16 +04:00
err = perf_session__process_events ( session , & eops ) ;
if ( err )
goto out_delete ;
2010-01-30 14:43:33 +03:00
setup_pager ( ) ;
2013-09-09 06:19:16 +04:00
if ( display_info ) /* used for info subcommand */
err = dump_info ( ) ;
else {
sort_result ( ) ;
print_result ( ) ;
}
2012-08-26 22:24:43 +04:00
2013-09-09 06:19:16 +04:00
out_delete :
perf_session__delete ( session ) ;
return err ;
2010-01-30 14:43:33 +03:00
}
static int __cmd_record ( int argc , const char * * argv )
{
2012-10-01 22:20:58 +04:00
const char * record_args [ ] = {
2013-06-05 15:37:21 +04:00
" record " , " -R " , " -m " , " 1024 " , " -c " , " 1 " ,
2012-10-01 22:20:58 +04:00
} ;
2013-09-09 06:19:15 +04:00
unsigned int rec_argc , i , j , ret ;
2010-01-30 14:43:33 +03:00
const char * * rec_argv ;
2012-08-09 20:35:37 +04:00
for ( i = 0 ; i < ARRAY_SIZE ( lock_tracepoints ) ; i + + ) {
2012-09-24 17:52:12 +04:00
if ( ! is_valid_tracepoint ( lock_tracepoints [ i ] . name ) ) {
2012-08-09 20:35:37 +04:00
pr_err ( " tracepoint %s is not enabled. "
" Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled? \n " ,
2012-09-24 17:52:12 +04:00
lock_tracepoints [ i ] . name ) ;
2012-08-09 20:35:37 +04:00
return 1 ;
}
}
2010-01-30 14:43:33 +03:00
rec_argc = ARRAY_SIZE ( record_args ) + argc - 1 ;
2012-08-09 20:35:37 +04:00
/* factor of 2 is for -e in front of each tracepoint */
rec_argc + = 2 * ARRAY_SIZE ( lock_tracepoints ) ;
2010-01-30 14:43:33 +03:00
2012-08-09 20:35:37 +04:00
rec_argv = calloc ( rec_argc + 1 , sizeof ( char * ) ) ;
2013-09-09 06:19:15 +04:00
if ( ! rec_argv )
2010-11-13 05:35:06 +03:00
return - ENOMEM ;
2010-01-30 14:43:33 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( record_args ) ; i + + )
rec_argv [ i ] = strdup ( record_args [ i ] ) ;
2012-08-09 20:35:37 +04:00
for ( j = 0 ; j < ARRAY_SIZE ( lock_tracepoints ) ; j + + ) {
rec_argv [ i + + ] = " -e " ;
2012-09-24 17:52:12 +04:00
rec_argv [ i + + ] = strdup ( lock_tracepoints [ j ] . name ) ;
2012-08-09 20:35:37 +04:00
}
2010-01-30 14:43:33 +03:00
for ( j = 1 ; j < ( unsigned int ) argc ; j + + , i + + )
rec_argv [ i ] = argv [ j ] ;
BUG_ON ( i ! = rec_argc ) ;
2013-09-09 06:19:15 +04:00
ret = cmd_record ( i , rec_argv , NULL ) ;
free ( rec_argv ) ;
return ret ;
2010-01-30 14:43:33 +03:00
}
2012-09-11 02:15:03 +04:00
int cmd_lock ( int argc , const char * * argv , const char * prefix __maybe_unused )
2010-01-30 14:43:33 +03:00
{
2012-10-01 22:20:58 +04:00
const struct option info_options [ ] = {
OPT_BOOLEAN ( ' t ' , " threads " , & info_threads ,
" dump thread list in perf.data " ) ,
OPT_BOOLEAN ( ' m ' , " map " , & info_map ,
" map of lock instances (address:name table) " ) ,
OPT_END ( )
} ;
const struct option lock_options [ ] = {
OPT_STRING ( ' i ' , " input " , & input_name , " file " , " input file name " ) ,
OPT_INCR ( ' v ' , " verbose " , & verbose , " be more verbose (show symbol address, etc) " ) ,
OPT_BOOLEAN ( ' D ' , " dump-raw-trace " , & dump_trace , " dump raw trace in ASCII " ) ,
OPT_END ( )
} ;
const struct option report_options [ ] = {
OPT_STRING ( ' k ' , " key " , & sort_key , " acquired " ,
2013-09-09 06:19:19 +04:00
" key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min) " ) ,
2012-10-01 22:20:58 +04:00
/* TODO: type */
OPT_END ( )
} ;
const char * const info_usage [ ] = {
" perf lock info [<options>] " ,
NULL
} ;
2014-03-15 07:17:53 +04:00
const char * const lock_subcommands [ ] = { " record " , " report " , " script " ,
" info " , NULL } ;
const char * lock_usage [ ] = {
NULL ,
2012-10-01 22:20:58 +04:00
NULL
} ;
const char * const report_usage [ ] = {
" perf lock report [<options>] " ,
NULL
} ;
2010-01-30 14:43:33 +03:00
unsigned int i ;
2012-08-26 22:24:43 +04:00
int rc = 0 ;
2010-01-30 14:43:33 +03:00
for ( i = 0 ; i < LOCKHASH_SIZE ; i + + )
INIT_LIST_HEAD ( lockhash_table + i ) ;
2014-03-15 07:17:53 +04:00
argc = parse_options_subcommand ( argc , argv , lock_options , lock_subcommands ,
lock_usage , PARSE_OPT_STOP_AT_NON_OPTION ) ;
2010-01-30 14:43:33 +03:00
if ( ! argc )
usage_with_options ( lock_usage , lock_options ) ;
if ( ! strncmp ( argv [ 0 ] , " rec " , 3 ) ) {
return __cmd_record ( argc , argv ) ;
2010-01-31 10:27:58 +03:00
} else if ( ! strncmp ( argv [ 0 ] , " report " , 6 ) ) {
trace_handler = & report_lock_ops ;
2010-01-30 14:43:33 +03:00
if ( argc ) {
argc = parse_options ( argc , argv ,
2010-01-31 10:27:58 +03:00
report_options , report_usage , 0 ) ;
2010-01-30 14:43:33 +03:00
if ( argc )
2010-01-31 10:27:58 +03:00
usage_with_options ( report_usage , report_options ) ;
2010-01-30 14:43:33 +03:00
}
2013-09-09 06:19:16 +04:00
rc = __cmd_report ( false ) ;
2010-11-16 20:45:39 +03:00
} else if ( ! strcmp ( argv [ 0 ] , " script " ) ) {
/* Aliased to 'perf script' */
return cmd_script ( argc , argv , prefix ) ;
2010-05-03 09:12:00 +04:00
} else if ( ! strcmp ( argv [ 0 ] , " info " ) ) {
if ( argc ) {
argc = parse_options ( argc , argv ,
info_options , info_usage , 0 ) ;
if ( argc )
usage_with_options ( info_usage , info_options ) ;
}
2010-01-31 10:27:58 +03:00
/* recycling report_lock_ops */
trace_handler = & report_lock_ops ;
2013-09-09 06:19:16 +04:00
rc = __cmd_report ( true ) ;
2010-01-30 14:43:33 +03:00
} else {
usage_with_options ( lock_usage , lock_options ) ;
}
2012-08-26 22:24:43 +04:00
return rc ;
2010-01-30 14:43:33 +03:00
}