2017-04-17 15:23:08 -03:00
# include <inttypes.h>
2012-09-03 11:53:06 +09:00
# include <math.h>
2013-01-31 23:31:11 +01:00
# include <linux/compiler.h>
2012-09-03 11:53:06 +09:00
# include "../util/hist.h"
# include "../util/util.h"
# include "../util/sort.h"
2013-01-22 18:09:34 +09:00
# include "../util/evsel.h"
2016-03-07 16:44:45 -03:00
# include "../util/evlist.h"
2012-09-03 11:53:06 +09:00
/* hist period print (hpp) functions */
2014-03-03 10:14:04 +09:00
# define hpp__call_print_fn(hpp, fn, fmt, ...) \
( { \
int __ret = fn ( hpp , fmt , # # __VA_ARGS__ ) ; \
advance_hpp ( hpp , __ret ) ; \
__ret ; \
} )
2014-07-31 14:47:38 +09:00
static int __hpp__fmt ( struct perf_hpp * hpp , struct hist_entry * he ,
hpp_field_fn get_field , const char * fmt , int len ,
hpp_snprint_fn print_fn , bool fmt_percent )
2012-09-03 11:53:06 +09:00
{
2014-03-03 17:05:19 +09:00
int ret ;
2012-10-04 21:49:40 +09:00
struct hists * hists = he - > hists ;
2013-03-05 14:53:26 +09:00
struct perf_evsel * evsel = hists_to_evsel ( hists ) ;
2014-03-03 10:14:04 +09:00
char * buf = hpp - > buf ;
size_t size = hpp - > size ;
2012-09-03 11:53:06 +09:00
2013-02-04 13:32:55 +01:00
if ( fmt_percent ) {
double percent = 0.0 ;
2014-01-14 11:52:48 +09:00
u64 total = hists__total_period ( hists ) ;
2012-09-03 11:53:07 +09:00
2014-01-14 11:52:48 +09:00
if ( total )
percent = 100.0 * get_field ( he ) / total ;
2013-02-04 13:32:55 +01:00
2014-07-31 14:47:36 +09:00
ret = hpp__call_print_fn ( hpp , print_fn , fmt , len , percent ) ;
2013-02-04 13:32:55 +01:00
} else
2014-07-31 14:47:36 +09:00
ret = hpp__call_print_fn ( hpp , print_fn , fmt , len , get_field ( he ) ) ;
2013-01-22 18:09:37 +09:00
2013-03-05 14:53:26 +09:00
if ( perf_evsel__is_group_event ( evsel ) ) {
2013-01-22 18:09:37 +09:00
int prev_idx , idx_delta ;
struct hist_entry * pair ;
int nr_members = evsel - > nr_members ;
prev_idx = perf_evsel__group_idx ( evsel ) ;
list_for_each_entry ( pair , & he - > pairs . head , pairs . node ) {
u64 period = get_field ( pair ) ;
2014-01-14 11:52:48 +09:00
u64 total = hists__total_period ( pair - > hists ) ;
2013-01-22 18:09:37 +09:00
if ( ! total )
continue ;
evsel = hists_to_evsel ( pair - > hists ) ;
idx_delta = perf_evsel__group_idx ( evsel ) - prev_idx - 1 ;
while ( idx_delta - - ) {
/*
* zero - fill group members in the middle which
* have no sample
*/
2014-03-03 10:14:02 +09:00
if ( fmt_percent ) {
2014-03-03 10:14:04 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn ,
2014-07-31 14:47:36 +09:00
fmt , len , 0.0 ) ;
2014-03-03 10:14:02 +09:00
} else {
2014-03-03 10:14:04 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn ,
2014-07-31 14:47:36 +09:00
fmt , len , 0ULL ) ;
2014-03-03 10:14:02 +09:00
}
2013-01-22 18:09:37 +09:00
}
2014-03-03 10:14:04 +09:00
if ( fmt_percent ) {
2014-07-31 14:47:36 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn , fmt , len ,
2014-03-03 10:14:04 +09:00
100.0 * period / total ) ;
} else {
ret + = hpp__call_print_fn ( hpp , print_fn , fmt ,
2014-07-31 14:47:36 +09:00
len , period ) ;
2014-03-03 10:14:04 +09:00
}
2013-01-22 18:09:37 +09:00
prev_idx = perf_evsel__group_idx ( evsel ) ;
}
idx_delta = nr_members - prev_idx - 1 ;
while ( idx_delta - - ) {
/*
* zero - fill group members at last which have no sample
*/
2014-03-03 10:14:02 +09:00
if ( fmt_percent ) {
2014-03-03 10:14:04 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn ,
2014-07-31 14:47:36 +09:00
fmt , len , 0.0 ) ;
2014-03-03 10:14:02 +09:00
} else {
2014-03-03 10:14:04 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn ,
2014-07-31 14:47:36 +09:00
fmt , len , 0ULL ) ;
2014-03-03 10:14:02 +09:00
}
2013-01-22 18:09:37 +09:00
}
}
2014-03-03 10:14:04 +09:00
/*
* Restore original buf and size as it ' s where caller expects
* the result will be saved .
*/
hpp - > buf = buf ;
hpp - > size = size ;
2013-01-22 18:09:34 +09:00
return ret ;
2012-09-03 11:53:06 +09:00
}
2014-07-31 14:47:38 +09:00
int hpp__fmt ( struct perf_hpp_fmt * fmt , struct perf_hpp * hpp ,
struct hist_entry * he , hpp_field_fn get_field ,
const char * fmtstr , hpp_snprint_fn print_fn , bool fmt_percent )
{
int len = fmt - > user_len ? : fmt - > len ;
if ( symbol_conf . field_sep ) {
return __hpp__fmt ( hpp , he , get_field , fmtstr , 1 ,
print_fn , fmt_percent ) ;
}
if ( fmt_percent )
len - = 2 ; /* 2 for a space and a % sign */
else
len - = 1 ;
return __hpp__fmt ( hpp , he , get_field , fmtstr , len , print_fn , fmt_percent ) ;
}
int hpp__fmt_acc ( struct perf_hpp_fmt * fmt , struct perf_hpp * hpp ,
struct hist_entry * he , hpp_field_fn get_field ,
const char * fmtstr , hpp_snprint_fn print_fn , bool fmt_percent )
2013-10-30 16:06:59 +09:00
{
if ( ! symbol_conf . cumulate_callchain ) {
2014-07-31 14:47:38 +09:00
int len = fmt - > user_len ? : fmt - > len ;
return snprintf ( hpp - > buf , hpp - > size , " %*s " , len - 1 , " N/A " ) ;
2013-10-30 16:06:59 +09:00
}
2014-07-31 14:47:38 +09:00
return hpp__fmt ( fmt , hpp , he , get_field , fmtstr , print_fn , fmt_percent ) ;
2013-10-30 16:06:59 +09:00
}
2014-03-03 14:14:03 +09:00
static int field_cmp ( u64 field_a , u64 field_b )
{
if ( field_a > field_b )
return 1 ;
if ( field_a < field_b )
return - 1 ;
return 0 ;
}
static int __hpp__sort ( struct hist_entry * a , struct hist_entry * b ,
hpp_field_fn get_field )
{
s64 ret ;
int i , nr_members ;
struct perf_evsel * evsel ;
struct hist_entry * pair ;
u64 * fields_a , * fields_b ;
ret = field_cmp ( get_field ( a ) , get_field ( b ) ) ;
if ( ret | | ! symbol_conf . event_group )
return ret ;
evsel = hists_to_evsel ( a - > hists ) ;
if ( ! perf_evsel__is_group_event ( evsel ) )
return ret ;
nr_members = evsel - > nr_members ;
2014-12-06 17:10:43 +05:30
fields_a = calloc ( nr_members , sizeof ( * fields_a ) ) ;
fields_b = calloc ( nr_members , sizeof ( * fields_b ) ) ;
2014-03-03 14:14:03 +09:00
if ( ! fields_a | | ! fields_b )
goto out ;
list_for_each_entry ( pair , & a - > pairs . head , pairs . node ) {
evsel = hists_to_evsel ( pair - > hists ) ;
fields_a [ perf_evsel__group_idx ( evsel ) ] = get_field ( pair ) ;
}
list_for_each_entry ( pair , & b - > pairs . head , pairs . node ) {
evsel = hists_to_evsel ( pair - > hists ) ;
fields_b [ perf_evsel__group_idx ( evsel ) ] = get_field ( pair ) ;
}
for ( i = 1 ; i < nr_members ; i + + ) {
ret = field_cmp ( fields_a [ i ] , fields_b [ i ] ) ;
if ( ret )
break ;
}
out :
free ( fields_a ) ;
free ( fields_b ) ;
return ret ;
}
2013-10-30 16:06:59 +09:00
static int __hpp__sort_acc ( struct hist_entry * a , struct hist_entry * b ,
hpp_field_fn get_field )
{
s64 ret = 0 ;
if ( symbol_conf . cumulate_callchain ) {
/*
* Put caller above callee when they have equal period .
*/
ret = field_cmp ( get_field ( a ) , get_field ( b ) ) ;
if ( ret )
return ret ;
2014-12-23 13:36:21 +09:00
if ( a - > thread ! = b - > thread | | ! symbol_conf . use_callchain )
return 0 ;
2013-10-30 16:06:59 +09:00
ret = b - > callchain - > max_depth - a - > callchain - > max_depth ;
2017-05-24 15:21:29 +09:00
if ( callchain_param . order = = ORDER_CALLER )
ret = - ret ;
2013-10-30 16:06:59 +09:00
}
return ret ;
}
2014-07-31 14:47:40 +09:00
static int hpp__width_fn ( struct perf_hpp_fmt * fmt ,
struct perf_hpp * hpp __maybe_unused ,
2016-06-14 20:19:20 +02:00
struct hists * hists )
2014-07-31 14:47:40 +09:00
{
int len = fmt - > user_len ? : fmt - > len ;
2016-06-14 20:19:20 +02:00
struct perf_evsel * evsel = hists_to_evsel ( hists ) ;
2014-07-31 14:47:40 +09:00
if ( symbol_conf . event_group )
len = max ( len , evsel - > nr_members * fmt - > len ) ;
if ( len < ( int ) strlen ( fmt - > name ) )
len = strlen ( fmt - > name ) ;
return len ;
2012-09-03 11:53:06 +09:00
}
2014-07-31 14:47:40 +09:00
static int hpp__header_fn ( struct perf_hpp_fmt * fmt , struct perf_hpp * hpp ,
2016-08-07 17:28:30 +02:00
struct hists * hists , int line __maybe_unused ,
int * span __maybe_unused )
2014-07-31 14:47:40 +09:00
{
2016-06-14 20:19:20 +02:00
int len = hpp__width_fn ( fmt , hpp , hists ) ;
2014-07-31 14:47:40 +09:00
return scnprintf ( hpp - > buf , hpp - > size , " %*s " , len , fmt - > name ) ;
2014-07-31 14:47:37 +09:00
}
2016-09-22 17:36:35 +02:00
int hpp_color_scnprintf ( struct perf_hpp * hpp , const char * fmt , . . . )
2014-03-03 10:14:04 +09:00
{
va_list args ;
ssize_t ssize = hpp - > size ;
double percent ;
2014-07-31 14:47:36 +09:00
int ret , len ;
2014-03-03 10:14:04 +09:00
va_start ( args , fmt ) ;
2014-07-31 14:47:36 +09:00
len = va_arg ( args , int ) ;
2014-03-03 10:14:04 +09:00
percent = va_arg ( args , double ) ;
2014-07-31 14:47:36 +09:00
ret = percent_color_len_snprintf ( hpp - > buf , hpp - > size , fmt , len , percent ) ;
2014-03-03 10:14:04 +09:00
va_end ( args ) ;
return ( ret > = ssize ) ? ( ssize - 1 ) : ret ;
}
static int hpp_entry_scnprintf ( struct perf_hpp * hpp , const char * fmt , . . . )
{
va_list args ;
ssize_t ssize = hpp - > size ;
int ret ;
va_start ( args , fmt ) ;
ret = vsnprintf ( hpp - > buf , hpp - > size , fmt , args ) ;
va_end ( args ) ;
return ( ret > = ssize ) ? ( ssize - 1 ) : ret ;
}
2013-01-22 18:09:34 +09:00
# define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_ # # _field ( struct hist_entry * he ) \
{ \
return he - > stat . _field ; \
} \
\
2014-07-31 14:47:37 +09:00
static int hpp__color_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-01-31 23:31:11 +01:00
struct perf_hpp * hpp , struct hist_entry * he ) \
2013-01-22 18:09:34 +09:00
{ \
2014-07-31 14:47:38 +09:00
return hpp__fmt ( fmt , hpp , he , he_get_ # # _field , " %*.2f%% " , \
hpp_color_scnprintf , true ) ; \
2012-09-03 11:53:06 +09:00
}
2013-01-22 18:09:34 +09:00
# define __HPP_ENTRY_PERCENT_FN(_type, _field) \
2014-07-31 14:47:37 +09:00
static int hpp__entry_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-01-31 23:31:11 +01:00
struct perf_hpp * hpp , struct hist_entry * he ) \
2013-01-22 18:09:34 +09:00
{ \
2014-07-31 14:47:38 +09:00
return hpp__fmt ( fmt , hpp , he , he_get_ # # _field , " %*.2f%% " , \
hpp_entry_scnprintf , true ) ; \
2012-09-03 11:53:06 +09:00
}
2014-03-03 10:59:57 +09:00
# define __HPP_SORT_FN(_type, _field) \
2015-01-08 09:45:46 +09:00
static int64_t hpp__sort_ # # _type ( struct perf_hpp_fmt * fmt __maybe_unused , \
struct hist_entry * a , struct hist_entry * b ) \
2014-03-03 10:59:57 +09:00
{ \
2014-03-03 14:14:03 +09:00
return __hpp__sort ( a , b , he_get_ # # _field ) ; \
2014-03-03 10:59:57 +09:00
}
2013-10-30 16:06:59 +09:00
# define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
static u64 he_get_acc_ # # _field ( struct hist_entry * he ) \
{ \
return he - > stat_acc - > _field ; \
} \
\
2014-07-31 14:47:37 +09:00
static int hpp__color_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-10-30 16:06:59 +09:00
struct perf_hpp * hpp , struct hist_entry * he ) \
{ \
2014-07-31 14:47:38 +09:00
return hpp__fmt_acc ( fmt , hpp , he , he_get_acc_ # # _field , " %*.2f%% " , \
hpp_color_scnprintf , true ) ; \
2013-10-30 16:06:59 +09:00
}
# define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
2014-07-31 14:47:37 +09:00
static int hpp__entry_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-10-30 16:06:59 +09:00
struct perf_hpp * hpp , struct hist_entry * he ) \
{ \
2014-08-20 17:07:56 +09:00
return hpp__fmt_acc ( fmt , hpp , he , he_get_acc_ # # _field , " %*.2f%% " , \
2014-07-31 14:47:38 +09:00
hpp_entry_scnprintf , true ) ; \
2013-10-30 16:06:59 +09:00
}
# define __HPP_SORT_ACC_FN(_type, _field) \
2015-01-08 09:45:46 +09:00
static int64_t hpp__sort_ # # _type ( struct perf_hpp_fmt * fmt __maybe_unused , \
struct hist_entry * a , struct hist_entry * b ) \
2013-10-30 16:06:59 +09:00
{ \
return __hpp__sort_acc ( a , b , he_get_acc_ # # _field ) ; \
}
2013-01-22 18:09:34 +09:00
# define __HPP_ENTRY_RAW_FN(_type, _field) \
static u64 he_get_raw_ # # _field ( struct hist_entry * he ) \
{ \
return he - > stat . _field ; \
} \
\
2014-07-31 14:47:37 +09:00
static int hpp__entry_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-01-31 23:31:11 +01:00
struct perf_hpp * hpp , struct hist_entry * he ) \
2013-01-22 18:09:34 +09:00
{ \
2014-07-31 14:47:38 +09:00
return hpp__fmt ( fmt , hpp , he , he_get_raw_ # # _field , " %* " PRIu64 , \
hpp_entry_scnprintf , false ) ; \
2012-09-03 11:53:06 +09:00
}
2014-03-03 10:59:57 +09:00
# define __HPP_SORT_RAW_FN(_type, _field) \
2015-01-08 09:45:46 +09:00
static int64_t hpp__sort_ # # _type ( struct perf_hpp_fmt * fmt __maybe_unused , \
struct hist_entry * a , struct hist_entry * b ) \
2014-03-03 10:59:57 +09:00
{ \
2014-03-03 14:14:03 +09:00
return __hpp__sort ( a , b , he_get_raw_ # # _field ) ; \
2014-03-03 10:59:57 +09:00
}
2014-07-31 14:47:40 +09:00
# define HPP_PERCENT_FNS(_type, _field) \
2013-01-22 18:09:34 +09:00
__HPP_COLOR_PERCENT_FN ( _type , _field ) \
2014-03-03 10:59:57 +09:00
__HPP_ENTRY_PERCENT_FN ( _type , _field ) \
__HPP_SORT_FN ( _type , _field )
2012-09-03 11:53:06 +09:00
2014-07-31 14:47:40 +09:00
# define HPP_PERCENT_ACC_FNS(_type, _field) \
2013-10-30 16:06:59 +09:00
__HPP_COLOR_ACC_PERCENT_FN ( _type , _field ) \
__HPP_ENTRY_ACC_PERCENT_FN ( _type , _field ) \
__HPP_SORT_ACC_FN ( _type , _field )
2014-07-31 14:47:40 +09:00
# define HPP_RAW_FNS(_type, _field) \
2014-03-03 10:59:57 +09:00
__HPP_ENTRY_RAW_FN ( _type , _field ) \
__HPP_SORT_RAW_FN ( _type , _field )
2012-09-03 11:53:06 +09:00
2014-07-31 14:47:40 +09:00
HPP_PERCENT_FNS ( overhead , period )
HPP_PERCENT_FNS ( overhead_sys , period_sys )
HPP_PERCENT_FNS ( overhead_us , period_us )
HPP_PERCENT_FNS ( overhead_guest_sys , period_guest_sys )
HPP_PERCENT_FNS ( overhead_guest_us , period_guest_us )
HPP_PERCENT_ACC_FNS ( overhead_acc , period )
2012-10-04 21:49:40 +09:00
2014-07-31 14:47:40 +09:00
HPP_RAW_FNS ( samples , nr_events )
HPP_RAW_FNS ( period , period )
2012-09-03 11:53:07 +09:00
2015-01-08 09:45:46 +09:00
static int64_t hpp__nop_cmp ( struct perf_hpp_fmt * fmt __maybe_unused ,
struct hist_entry * a __maybe_unused ,
2014-03-03 10:59:57 +09:00
struct hist_entry * b __maybe_unused )
{
return 0 ;
}
2016-01-18 10:24:04 +01:00
static bool perf_hpp__is_hpp_entry ( struct perf_hpp_fmt * a )
{
return a - > header = = hpp__header_fn ;
}
static bool hpp__equal ( struct perf_hpp_fmt * a , struct perf_hpp_fmt * b )
{
if ( ! perf_hpp__is_hpp_entry ( a ) | | ! perf_hpp__is_hpp_entry ( b ) )
return false ;
return a - > idx = = b - > idx ;
}
2016-01-18 10:24:01 +01:00
# define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
2012-10-13 00:06:16 +02:00
{ \
2014-07-31 14:47:40 +09:00
. name = _name , \
. header = hpp__header_fn , \
. width = hpp__width_fn , \
. color = hpp__color_ # # _fn , \
. entry = hpp__entry_ # # _fn , \
2014-03-03 10:59:57 +09:00
. cmp = hpp__nop_cmp , \
. collapse = hpp__nop_cmp , \
2014-07-31 14:47:40 +09:00
. sort = hpp__sort_ # # _fn , \
2016-01-18 10:24:01 +01:00
. idx = PERF_HPP__ # # _idx , \
2016-01-18 10:24:04 +01:00
. equal = hpp__equal , \
2012-10-13 00:06:16 +02:00
}
2012-09-03 11:53:06 +09:00
2016-01-18 10:24:01 +01:00
# define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
2013-10-30 16:06:59 +09:00
{ \
2014-07-31 14:47:40 +09:00
. name = _name , \
. header = hpp__header_fn , \
. width = hpp__width_fn , \
. color = hpp__color_ # # _fn , \
. entry = hpp__entry_ # # _fn , \
2013-10-30 16:06:59 +09:00
. cmp = hpp__nop_cmp , \
. collapse = hpp__nop_cmp , \
2014-07-31 14:47:40 +09:00
. sort = hpp__sort_ # # _fn , \
2016-01-18 10:24:01 +01:00
. idx = PERF_HPP__ # # _idx , \
2016-01-18 10:24:04 +01:00
. equal = hpp__equal , \
2013-10-30 16:06:59 +09:00
}
2016-01-18 10:24:01 +01:00
# define HPP__PRINT_FNS(_name, _fn, _idx) \
2012-10-13 00:06:16 +02:00
{ \
2014-07-31 14:47:40 +09:00
. name = _name , \
. header = hpp__header_fn , \
. width = hpp__width_fn , \
. entry = hpp__entry_ # # _fn , \
2014-03-03 10:59:57 +09:00
. cmp = hpp__nop_cmp , \
. collapse = hpp__nop_cmp , \
2014-07-31 14:47:40 +09:00
. sort = hpp__sort_ # # _fn , \
2016-01-18 10:24:01 +01:00
. idx = PERF_HPP__ # # _idx , \
2016-01-18 10:24:04 +01:00
. equal = hpp__equal , \
2012-10-13 00:06:16 +02:00
}
2012-09-03 11:53:06 +09:00
struct perf_hpp_fmt perf_hpp__format [ ] = {
2016-01-18 10:24:01 +01:00
HPP__COLOR_PRINT_FNS ( " Overhead " , overhead , OVERHEAD ) ,
HPP__COLOR_PRINT_FNS ( " sys " , overhead_sys , OVERHEAD_SYS ) ,
HPP__COLOR_PRINT_FNS ( " usr " , overhead_us , OVERHEAD_US ) ,
HPP__COLOR_PRINT_FNS ( " guest sys " , overhead_guest_sys , OVERHEAD_GUEST_SYS ) ,
HPP__COLOR_PRINT_FNS ( " guest usr " , overhead_guest_us , OVERHEAD_GUEST_US ) ,
HPP__COLOR_ACC_PRINT_FNS ( " Children " , overhead_acc , OVERHEAD_ACC ) ,
HPP__PRINT_FNS ( " Samples " , samples , SAMPLES ) ,
HPP__PRINT_FNS ( " Period " , period , PERIOD )
2012-09-03 11:53:06 +09:00
} ;
2016-01-18 10:24:12 +01:00
struct perf_hpp_list perf_hpp_list = {
. fields = LIST_HEAD_INIT ( perf_hpp_list . fields ) ,
. sorts = LIST_HEAD_INIT ( perf_hpp_list . sorts ) ,
2016-08-07 17:28:26 +02:00
. nr_header_lines = 1 ,
2016-01-18 10:24:12 +01:00
} ;
2013-01-22 18:09:34 +09:00
2012-09-03 11:53:06 +09:00
# undef HPP__COLOR_PRINT_FNS
2013-10-30 16:06:59 +09:00
# undef HPP__COLOR_ACC_PRINT_FNS
2012-09-03 11:53:06 +09:00
# undef HPP__PRINT_FNS
2013-01-22 18:09:34 +09:00
# undef HPP_PERCENT_FNS
2013-10-30 16:06:59 +09:00
# undef HPP_PERCENT_ACC_FNS
2013-01-22 18:09:34 +09:00
# undef HPP_RAW_FNS
# undef __HPP_HEADER_FN
# undef __HPP_WIDTH_FN
# undef __HPP_COLOR_PERCENT_FN
# undef __HPP_ENTRY_PERCENT_FN
2013-10-30 16:06:59 +09:00
# undef __HPP_COLOR_ACC_PERCENT_FN
# undef __HPP_ENTRY_ACC_PERCENT_FN
2013-01-22 18:09:34 +09:00
# undef __HPP_ENTRY_RAW_FN
2013-10-30 16:06:59 +09:00
# undef __HPP_SORT_FN
# undef __HPP_SORT_ACC_FN
# undef __HPP_SORT_RAW_FN
2013-01-22 18:09:34 +09:00
2012-10-04 21:49:39 +09:00
void perf_hpp__init ( void )
2012-09-03 11:53:06 +09:00
{
2014-03-03 16:16:20 +09:00
int i ;
for ( i = 0 ; i < PERF_HPP__MAX_INDEX ; i + + ) {
2014-03-04 09:06:42 +09:00
struct perf_hpp_fmt * fmt = & perf_hpp__format [ i ] ;
INIT_LIST_HEAD ( & fmt - > list ) ;
/* sort_list may be linked by setup_sorting() */
if ( fmt - > sort_list . next = = NULL )
INIT_LIST_HEAD ( & fmt - > sort_list ) ;
2014-03-03 16:16:20 +09:00
}
2014-03-04 10:46:34 +09:00
/*
* If user specified field order , no need to setup default fields .
*/
2014-08-22 15:58:38 +02:00
if ( is_strict_order ( field_order ) )
2014-03-04 10:46:34 +09:00
return ;
2013-10-30 16:06:59 +09:00
if ( symbol_conf . cumulate_callchain ) {
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_ACC ) ;
2014-07-31 14:47:40 +09:00
perf_hpp__format [ PERF_HPP__OVERHEAD ] . name = " Self " ;
2013-10-30 16:06:59 +09:00
}
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__OVERHEAD ) ;
2013-01-31 23:34:25 +01:00
2012-09-03 11:53:06 +09:00
if ( symbol_conf . show_cpu_utilization ) {
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_SYS ) ;
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_US ) ;
2012-09-03 11:53:06 +09:00
if ( perf_guest ) {
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_GUEST_SYS ) ;
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_GUEST_US ) ;
2012-09-03 11:53:06 +09:00
}
}
if ( symbol_conf . show_nr_samples )
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__SAMPLES ) ;
2012-09-03 11:53:06 +09:00
if ( symbol_conf . show_total_period )
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__PERIOD ) ;
2012-10-04 21:49:39 +09:00
}
2012-09-03 11:53:06 +09:00
2016-01-18 10:24:14 +01:00
void perf_hpp_list__column_register ( struct perf_hpp_list * list ,
struct perf_hpp_fmt * format )
2012-10-13 00:06:16 +02:00
{
2016-01-18 10:24:14 +01:00
list_add_tail ( & format - > list , & list - > fields ) ;
2012-10-13 00:06:16 +02:00
}
2016-01-18 10:24:14 +01:00
void perf_hpp_list__register_sort_field ( struct perf_hpp_list * list ,
struct perf_hpp_fmt * format )
2013-12-16 16:55:13 +09:00
{
2016-01-18 10:24:14 +01:00
list_add_tail ( & format - > sort_list , & list - > sorts ) ;
2013-12-16 16:55:13 +09:00
}
2017-01-18 14:14:57 +09:00
void perf_hpp_list__prepend_sort_field ( struct perf_hpp_list * list ,
struct perf_hpp_fmt * format )
{
list_add ( & format - > sort_list , & list - > sorts ) ;
}
2016-01-18 10:24:14 +01:00
void perf_hpp__column_unregister ( struct perf_hpp_fmt * format )
2014-03-03 11:46:55 +09:00
{
2016-01-18 10:24:14 +01:00
list_del ( & format - > list ) ;
2014-03-03 11:46:55 +09:00
}
2013-12-16 16:55:13 +09:00
void perf_hpp__cancel_cumulate ( void )
{
2016-01-18 10:24:07 +01:00
struct perf_hpp_fmt * fmt , * acc , * ovh , * tmp ;
2014-08-22 15:58:38 +02:00
if ( is_strict_order ( field_order ) )
2014-03-20 09:10:29 +09:00
return ;
2016-01-18 10:24:07 +01:00
ovh = & perf_hpp__format [ PERF_HPP__OVERHEAD ] ;
acc = & perf_hpp__format [ PERF_HPP__OVERHEAD_ACC ] ;
2016-01-18 10:24:18 +01:00
perf_hpp_list__for_each_format_safe ( & perf_hpp_list , fmt , tmp ) {
2016-01-18 10:24:07 +01:00
if ( acc - > equal ( acc , fmt ) ) {
perf_hpp__column_unregister ( fmt ) ;
continue ;
}
if ( ovh - > equal ( ovh , fmt ) )
fmt - > name = " Overhead " ;
}
2013-12-16 16:55:13 +09:00
}
2016-01-18 10:24:03 +01:00
static bool fmt_equal ( struct perf_hpp_fmt * a , struct perf_hpp_fmt * b )
{
return a - > equal & & a - > equal ( a , b ) ;
}
2016-01-18 10:24:21 +01:00
void perf_hpp__setup_output_field ( struct perf_hpp_list * list )
2014-03-03 16:16:20 +09:00
{
struct perf_hpp_fmt * fmt ;
/* append sort keys to output field */
2016-01-18 10:24:21 +01:00
perf_hpp_list__for_each_sort_list ( list , fmt ) {
2016-01-18 10:24:05 +01:00
struct perf_hpp_fmt * pos ;
2014-03-04 10:46:34 +09:00
2017-01-18 14:14:56 +09:00
/* skip sort-only fields ("sort_compute" in perf diff) */
if ( ! fmt - > entry & & ! fmt - > color )
continue ;
2016-01-18 10:24:21 +01:00
perf_hpp_list__for_each_format ( list , pos ) {
2016-01-18 10:24:05 +01:00
if ( fmt_equal ( fmt , pos ) )
goto next ;
2014-03-04 10:46:34 +09:00
}
perf_hpp__column_register ( fmt ) ;
next :
continue ;
}
}
2016-01-18 10:24:21 +01:00
void perf_hpp__append_sort_keys ( struct perf_hpp_list * list )
2014-03-04 10:46:34 +09:00
{
struct perf_hpp_fmt * fmt ;
/* append output fields to sort keys */
2016-01-18 10:24:21 +01:00
perf_hpp_list__for_each_format ( list , fmt ) {
2016-01-18 10:24:05 +01:00
struct perf_hpp_fmt * pos ;
2014-03-04 10:46:34 +09:00
2016-01-18 10:24:21 +01:00
perf_hpp_list__for_each_sort_list ( list , pos ) {
2016-01-18 10:24:05 +01:00
if ( fmt_equal ( fmt , pos ) )
goto next ;
2014-03-04 10:46:34 +09:00
}
perf_hpp__register_sort_field ( fmt ) ;
next :
continue ;
2014-03-03 16:16:20 +09:00
}
}
2016-01-18 10:24:21 +01:00
2016-01-18 10:24:09 +01:00
static void fmt_free ( struct perf_hpp_fmt * fmt )
{
if ( fmt - > free )
fmt - > free ( fmt ) ;
}
2016-01-18 10:24:21 +01:00
void perf_hpp__reset_output_field ( struct perf_hpp_list * list )
2014-05-07 18:42:24 +09:00
{
struct perf_hpp_fmt * fmt , * tmp ;
/* reset output fields */
2016-01-18 10:24:21 +01:00
perf_hpp_list__for_each_format_safe ( list , fmt , tmp ) {
2014-05-07 18:42:24 +09:00
list_del_init ( & fmt - > list ) ;
list_del_init ( & fmt - > sort_list ) ;
2016-01-18 10:24:09 +01:00
fmt_free ( fmt ) ;
2014-05-07 18:42:24 +09:00
}
/* reset sort keys */
2016-01-18 10:24:21 +01:00
perf_hpp_list__for_each_sort_list_safe ( list , fmt , tmp ) {
2014-05-07 18:42:24 +09:00
list_del_init ( & fmt - > list ) ;
list_del_init ( & fmt - > sort_list ) ;
2016-01-18 10:24:09 +01:00
fmt_free ( fmt ) ;
2014-05-07 18:42:24 +09:00
}
}
2012-09-03 11:53:08 +09:00
/*
* See hists__fprintf to match the column widths
*/
unsigned int hists__sort_list_width ( struct hists * hists )
{
2012-10-13 00:06:16 +02:00
struct perf_hpp_fmt * fmt ;
2014-05-19 14:19:30 +09:00
int ret = 0 ;
bool first = true ;
2014-03-10 16:43:52 +09:00
struct perf_hpp dummy_hpp ;
2012-09-03 11:53:08 +09:00
2016-01-18 10:24:23 +01:00
hists__for_each_format ( hists , fmt ) {
2015-12-23 02:07:08 +09:00
if ( perf_hpp__should_skip ( fmt , hists ) )
2014-05-19 14:19:30 +09:00
continue ;
if ( first )
first = false ;
else
2012-09-03 11:53:08 +09:00
ret + = 2 ;
2016-06-14 20:19:20 +02:00
ret + = fmt - > width ( fmt , & dummy_hpp , hists ) ;
2012-09-03 11:53:08 +09:00
}
2017-02-17 17:17:38 +09:00
if ( verbose > 0 & & hists__has ( hists , sym ) ) /* Addr + origin */
2012-09-03 11:53:08 +09:00
ret + = 3 + BITS_PER_LONG / 4 ;
return ret ;
}
2014-07-31 14:47:37 +09:00
2016-02-26 21:13:16 +09:00
unsigned int hists__overhead_width ( struct hists * hists )
{
struct perf_hpp_fmt * fmt ;
int ret = 0 ;
bool first = true ;
struct perf_hpp dummy_hpp ;
hists__for_each_format ( hists , fmt ) {
if ( perf_hpp__is_sort_entry ( fmt ) | | perf_hpp__is_dynamic_entry ( fmt ) )
break ;
if ( first )
first = false ;
else
ret + = 2 ;
2016-06-14 20:19:20 +02:00
ret + = fmt - > width ( fmt , & dummy_hpp , hists ) ;
2016-02-26 21:13:16 +09:00
}
return ret ;
}
2014-07-31 14:47:37 +09:00
void perf_hpp__reset_width ( struct perf_hpp_fmt * fmt , struct hists * hists )
{
if ( perf_hpp__is_sort_entry ( fmt ) )
return perf_hpp__reset_sort_width ( fmt , hists ) ;
2016-02-21 23:22:34 +09:00
if ( perf_hpp__is_dynamic_entry ( fmt ) )
return ;
2016-01-18 10:24:02 +01:00
BUG_ON ( fmt - > idx > = PERF_HPP__MAX_INDEX ) ;
2014-07-31 14:47:37 +09:00
2016-01-18 10:24:02 +01:00
switch ( fmt - > idx ) {
2014-07-31 14:47:37 +09:00
case PERF_HPP__OVERHEAD :
case PERF_HPP__OVERHEAD_SYS :
case PERF_HPP__OVERHEAD_US :
case PERF_HPP__OVERHEAD_ACC :
fmt - > len = 8 ;
break ;
case PERF_HPP__OVERHEAD_GUEST_SYS :
case PERF_HPP__OVERHEAD_GUEST_US :
fmt - > len = 9 ;
break ;
case PERF_HPP__SAMPLES :
case PERF_HPP__PERIOD :
fmt - > len = 12 ;
break ;
default :
break ;
}
}
2014-07-31 14:47:38 +09:00
2016-09-20 14:30:24 +09:00
void hists__reset_column_width ( struct hists * hists )
{
struct perf_hpp_fmt * fmt ;
struct perf_hpp_list_node * node ;
hists__for_each_format ( hists , fmt )
perf_hpp__reset_width ( fmt , hists ) ;
/* hierarchy entries have their own hpp list */
list_for_each_entry ( node , & hists - > hpp_formats , list ) {
perf_hpp_list__for_each_format ( & node - > hpp , fmt )
perf_hpp__reset_width ( fmt , hists ) ;
}
}
2014-07-31 14:47:38 +09:00
void perf_hpp__set_user_width ( const char * width_list_str )
{
struct perf_hpp_fmt * fmt ;
const char * ptr = width_list_str ;
2016-01-18 10:24:17 +01:00
perf_hpp_list__for_each_format ( & perf_hpp_list , fmt ) {
2014-07-31 14:47:38 +09:00
char * p ;
int len = strtol ( ptr , & p , 10 ) ;
fmt - > user_len = len ;
if ( * p = = ' , ' )
ptr = p + 1 ;
else
break ;
}
}
2016-03-07 16:44:45 -03:00
static int add_hierarchy_fmt ( struct hists * hists , struct perf_hpp_fmt * fmt )
{
struct perf_hpp_list_node * node = NULL ;
struct perf_hpp_fmt * fmt_copy ;
bool found = false ;
2016-03-07 16:44:46 -03:00
bool skip = perf_hpp__should_skip ( fmt , hists ) ;
2016-03-07 16:44:45 -03:00
list_for_each_entry ( node , & hists - > hpp_formats , list ) {
if ( node - > level = = fmt - > level ) {
found = true ;
break ;
}
}
if ( ! found ) {
node = malloc ( sizeof ( * node ) ) ;
if ( node = = NULL )
return - 1 ;
2016-03-07 16:44:46 -03:00
node - > skip = skip ;
2016-03-07 16:44:45 -03:00
node - > level = fmt - > level ;
perf_hpp_list__init ( & node - > hpp ) ;
2016-03-07 16:44:48 -03:00
hists - > nr_hpp_node + + ;
2016-03-07 16:44:45 -03:00
list_add_tail ( & node - > list , & hists - > hpp_formats ) ;
}
fmt_copy = perf_hpp_fmt__dup ( fmt ) ;
if ( fmt_copy = = NULL )
return - 1 ;
2016-03-07 16:44:46 -03:00
if ( ! skip )
node - > skip = false ;
2016-03-07 16:44:45 -03:00
list_add_tail ( & fmt_copy - > list , & node - > hpp . fields ) ;
list_add_tail ( & fmt_copy - > sort_list , & node - > hpp . sorts ) ;
return 0 ;
}
int perf_hpp__setup_hists_formats ( struct perf_hpp_list * list ,
struct perf_evlist * evlist )
{
struct perf_evsel * evsel ;
struct perf_hpp_fmt * fmt ;
struct hists * hists ;
int ret ;
if ( ! symbol_conf . report_hierarchy )
return 0 ;
2016-06-23 11:26:15 -03:00
evlist__for_each_entry ( evlist , evsel ) {
2016-03-07 16:44:45 -03:00
hists = evsel__hists ( evsel ) ;
perf_hpp_list__for_each_sort_list ( list , fmt ) {
if ( perf_hpp__is_dynamic_entry ( fmt ) & &
! perf_hpp__defined_dynamic_entry ( fmt , hists ) )
continue ;
ret = add_hierarchy_fmt ( hists , fmt ) ;
if ( ret < 0 )
return ret ;
}
}
return 0 ;
}