2012-09-03 11:53:06 +09:00
# include <math.h>
2013-01-31 23:31:11 +01:00
# include <linux/compiler.h>
2012-09-03 11:53:06 +09:00
# include "../util/hist.h"
# include "../util/util.h"
# include "../util/sort.h"
2013-01-22 18:09:34 +09:00
# include "../util/evsel.h"
2012-09-03 11:53:06 +09:00
/* hist period print (hpp) functions */
2014-03-03 10:14:04 +09:00
# define hpp__call_print_fn(hpp, fn, fmt, ...) \
( { \
int __ret = fn ( hpp , fmt , # # __VA_ARGS__ ) ; \
advance_hpp ( hpp , __ret ) ; \
__ret ; \
} )
2014-07-31 14:47:38 +09:00
static int __hpp__fmt ( struct perf_hpp * hpp , struct hist_entry * he ,
hpp_field_fn get_field , const char * fmt , int len ,
hpp_snprint_fn print_fn , bool fmt_percent )
2012-09-03 11:53:06 +09:00
{
2014-03-03 17:05:19 +09:00
int ret ;
2012-10-04 21:49:40 +09:00
struct hists * hists = he - > hists ;
2013-03-05 14:53:26 +09:00
struct perf_evsel * evsel = hists_to_evsel ( hists ) ;
2014-03-03 10:14:04 +09:00
char * buf = hpp - > buf ;
size_t size = hpp - > size ;
2012-09-03 11:53:06 +09:00
2013-02-04 13:32:55 +01:00
if ( fmt_percent ) {
double percent = 0.0 ;
2014-01-14 11:52:48 +09:00
u64 total = hists__total_period ( hists ) ;
2012-09-03 11:53:07 +09:00
2014-01-14 11:52:48 +09:00
if ( total )
percent = 100.0 * get_field ( he ) / total ;
2013-02-04 13:32:55 +01:00
2014-07-31 14:47:36 +09:00
ret = hpp__call_print_fn ( hpp , print_fn , fmt , len , percent ) ;
2013-02-04 13:32:55 +01:00
} else
2014-07-31 14:47:36 +09:00
ret = hpp__call_print_fn ( hpp , print_fn , fmt , len , get_field ( he ) ) ;
2013-01-22 18:09:37 +09:00
2013-03-05 14:53:26 +09:00
if ( perf_evsel__is_group_event ( evsel ) ) {
2013-01-22 18:09:37 +09:00
int prev_idx , idx_delta ;
struct hist_entry * pair ;
int nr_members = evsel - > nr_members ;
prev_idx = perf_evsel__group_idx ( evsel ) ;
list_for_each_entry ( pair , & he - > pairs . head , pairs . node ) {
u64 period = get_field ( pair ) ;
2014-01-14 11:52:48 +09:00
u64 total = hists__total_period ( pair - > hists ) ;
2013-01-22 18:09:37 +09:00
if ( ! total )
continue ;
evsel = hists_to_evsel ( pair - > hists ) ;
idx_delta = perf_evsel__group_idx ( evsel ) - prev_idx - 1 ;
while ( idx_delta - - ) {
/*
* zero - fill group members in the middle which
* have no sample
*/
2014-03-03 10:14:02 +09:00
if ( fmt_percent ) {
2014-03-03 10:14:04 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn ,
2014-07-31 14:47:36 +09:00
fmt , len , 0.0 ) ;
2014-03-03 10:14:02 +09:00
} else {
2014-03-03 10:14:04 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn ,
2014-07-31 14:47:36 +09:00
fmt , len , 0ULL ) ;
2014-03-03 10:14:02 +09:00
}
2013-01-22 18:09:37 +09:00
}
2014-03-03 10:14:04 +09:00
if ( fmt_percent ) {
2014-07-31 14:47:36 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn , fmt , len ,
2014-03-03 10:14:04 +09:00
100.0 * period / total ) ;
} else {
ret + = hpp__call_print_fn ( hpp , print_fn , fmt ,
2014-07-31 14:47:36 +09:00
len , period ) ;
2014-03-03 10:14:04 +09:00
}
2013-01-22 18:09:37 +09:00
prev_idx = perf_evsel__group_idx ( evsel ) ;
}
idx_delta = nr_members - prev_idx - 1 ;
while ( idx_delta - - ) {
/*
* zero - fill group members at last which have no sample
*/
2014-03-03 10:14:02 +09:00
if ( fmt_percent ) {
2014-03-03 10:14:04 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn ,
2014-07-31 14:47:36 +09:00
fmt , len , 0.0 ) ;
2014-03-03 10:14:02 +09:00
} else {
2014-03-03 10:14:04 +09:00
ret + = hpp__call_print_fn ( hpp , print_fn ,
2014-07-31 14:47:36 +09:00
fmt , len , 0ULL ) ;
2014-03-03 10:14:02 +09:00
}
2013-01-22 18:09:37 +09:00
}
}
2014-03-03 10:14:04 +09:00
/*
* Restore original buf and size as it ' s where caller expects
* the result will be saved .
*/
hpp - > buf = buf ;
hpp - > size = size ;
2013-01-22 18:09:34 +09:00
return ret ;
2012-09-03 11:53:06 +09:00
}
2014-07-31 14:47:38 +09:00
int hpp__fmt ( struct perf_hpp_fmt * fmt , struct perf_hpp * hpp ,
struct hist_entry * he , hpp_field_fn get_field ,
const char * fmtstr , hpp_snprint_fn print_fn , bool fmt_percent )
{
int len = fmt - > user_len ? : fmt - > len ;
if ( symbol_conf . field_sep ) {
return __hpp__fmt ( hpp , he , get_field , fmtstr , 1 ,
print_fn , fmt_percent ) ;
}
if ( fmt_percent )
len - = 2 ; /* 2 for a space and a % sign */
else
len - = 1 ;
return __hpp__fmt ( hpp , he , get_field , fmtstr , len , print_fn , fmt_percent ) ;
}
int hpp__fmt_acc ( struct perf_hpp_fmt * fmt , struct perf_hpp * hpp ,
struct hist_entry * he , hpp_field_fn get_field ,
const char * fmtstr , hpp_snprint_fn print_fn , bool fmt_percent )
2013-10-30 16:06:59 +09:00
{
if ( ! symbol_conf . cumulate_callchain ) {
2014-07-31 14:47:38 +09:00
int len = fmt - > user_len ? : fmt - > len ;
return snprintf ( hpp - > buf , hpp - > size , " %*s " , len - 1 , " N/A " ) ;
2013-10-30 16:06:59 +09:00
}
2014-07-31 14:47:38 +09:00
return hpp__fmt ( fmt , hpp , he , get_field , fmtstr , print_fn , fmt_percent ) ;
2013-10-30 16:06:59 +09:00
}
2014-03-03 14:14:03 +09:00
static int field_cmp ( u64 field_a , u64 field_b )
{
if ( field_a > field_b )
return 1 ;
if ( field_a < field_b )
return - 1 ;
return 0 ;
}
static int __hpp__sort ( struct hist_entry * a , struct hist_entry * b ,
hpp_field_fn get_field )
{
s64 ret ;
int i , nr_members ;
struct perf_evsel * evsel ;
struct hist_entry * pair ;
u64 * fields_a , * fields_b ;
ret = field_cmp ( get_field ( a ) , get_field ( b ) ) ;
if ( ret | | ! symbol_conf . event_group )
return ret ;
evsel = hists_to_evsel ( a - > hists ) ;
if ( ! perf_evsel__is_group_event ( evsel ) )
return ret ;
nr_members = evsel - > nr_members ;
2014-12-06 17:10:43 +05:30
fields_a = calloc ( nr_members , sizeof ( * fields_a ) ) ;
fields_b = calloc ( nr_members , sizeof ( * fields_b ) ) ;
2014-03-03 14:14:03 +09:00
if ( ! fields_a | | ! fields_b )
goto out ;
list_for_each_entry ( pair , & a - > pairs . head , pairs . node ) {
evsel = hists_to_evsel ( pair - > hists ) ;
fields_a [ perf_evsel__group_idx ( evsel ) ] = get_field ( pair ) ;
}
list_for_each_entry ( pair , & b - > pairs . head , pairs . node ) {
evsel = hists_to_evsel ( pair - > hists ) ;
fields_b [ perf_evsel__group_idx ( evsel ) ] = get_field ( pair ) ;
}
for ( i = 1 ; i < nr_members ; i + + ) {
ret = field_cmp ( fields_a [ i ] , fields_b [ i ] ) ;
if ( ret )
break ;
}
out :
free ( fields_a ) ;
free ( fields_b ) ;
return ret ;
}
2013-10-30 16:06:59 +09:00
static int __hpp__sort_acc ( struct hist_entry * a , struct hist_entry * b ,
hpp_field_fn get_field )
{
s64 ret = 0 ;
if ( symbol_conf . cumulate_callchain ) {
/*
* Put caller above callee when they have equal period .
*/
ret = field_cmp ( get_field ( a ) , get_field ( b ) ) ;
if ( ret )
return ret ;
2014-12-23 13:36:21 +09:00
if ( a - > thread ! = b - > thread | | ! symbol_conf . use_callchain )
return 0 ;
2013-10-30 16:06:59 +09:00
ret = b - > callchain - > max_depth - a - > callchain - > max_depth ;
}
return ret ;
}
2014-07-31 14:47:40 +09:00
static int hpp__width_fn ( struct perf_hpp_fmt * fmt ,
struct perf_hpp * hpp __maybe_unused ,
struct perf_evsel * evsel )
{
int len = fmt - > user_len ? : fmt - > len ;
if ( symbol_conf . event_group )
len = max ( len , evsel - > nr_members * fmt - > len ) ;
if ( len < ( int ) strlen ( fmt - > name ) )
len = strlen ( fmt - > name ) ;
return len ;
2012-09-03 11:53:06 +09:00
}
2014-07-31 14:47:40 +09:00
static int hpp__header_fn ( struct perf_hpp_fmt * fmt , struct perf_hpp * hpp ,
struct perf_evsel * evsel )
{
int len = hpp__width_fn ( fmt , hpp , evsel ) ;
return scnprintf ( hpp - > buf , hpp - > size , " %*s " , len , fmt - > name ) ;
2014-07-31 14:47:37 +09:00
}
2014-03-03 10:14:04 +09:00
static int hpp_color_scnprintf ( struct perf_hpp * hpp , const char * fmt , . . . )
{
va_list args ;
ssize_t ssize = hpp - > size ;
double percent ;
2014-07-31 14:47:36 +09:00
int ret , len ;
2014-03-03 10:14:04 +09:00
va_start ( args , fmt ) ;
2014-07-31 14:47:36 +09:00
len = va_arg ( args , int ) ;
2014-03-03 10:14:04 +09:00
percent = va_arg ( args , double ) ;
2014-07-31 14:47:36 +09:00
ret = percent_color_len_snprintf ( hpp - > buf , hpp - > size , fmt , len , percent ) ;
2014-03-03 10:14:04 +09:00
va_end ( args ) ;
return ( ret > = ssize ) ? ( ssize - 1 ) : ret ;
}
static int hpp_entry_scnprintf ( struct perf_hpp * hpp , const char * fmt , . . . )
{
va_list args ;
ssize_t ssize = hpp - > size ;
int ret ;
va_start ( args , fmt ) ;
ret = vsnprintf ( hpp - > buf , hpp - > size , fmt , args ) ;
va_end ( args ) ;
return ( ret > = ssize ) ? ( ssize - 1 ) : ret ;
}
2013-01-22 18:09:34 +09:00
# define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_ # # _field ( struct hist_entry * he ) \
{ \
return he - > stat . _field ; \
} \
\
2014-07-31 14:47:37 +09:00
static int hpp__color_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-01-31 23:31:11 +01:00
struct perf_hpp * hpp , struct hist_entry * he ) \
2013-01-22 18:09:34 +09:00
{ \
2014-07-31 14:47:38 +09:00
return hpp__fmt ( fmt , hpp , he , he_get_ # # _field , " %*.2f%% " , \
hpp_color_scnprintf , true ) ; \
2012-09-03 11:53:06 +09:00
}
2013-01-22 18:09:34 +09:00
# define __HPP_ENTRY_PERCENT_FN(_type, _field) \
2014-07-31 14:47:37 +09:00
static int hpp__entry_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-01-31 23:31:11 +01:00
struct perf_hpp * hpp , struct hist_entry * he ) \
2013-01-22 18:09:34 +09:00
{ \
2014-07-31 14:47:38 +09:00
return hpp__fmt ( fmt , hpp , he , he_get_ # # _field , " %*.2f%% " , \
hpp_entry_scnprintf , true ) ; \
2012-09-03 11:53:06 +09:00
}
2014-03-03 10:59:57 +09:00
# define __HPP_SORT_FN(_type, _field) \
2015-01-08 09:45:46 +09:00
static int64_t hpp__sort_ # # _type ( struct perf_hpp_fmt * fmt __maybe_unused , \
struct hist_entry * a , struct hist_entry * b ) \
2014-03-03 10:59:57 +09:00
{ \
2014-03-03 14:14:03 +09:00
return __hpp__sort ( a , b , he_get_ # # _field ) ; \
2014-03-03 10:59:57 +09:00
}
2013-10-30 16:06:59 +09:00
# define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
static u64 he_get_acc_ # # _field ( struct hist_entry * he ) \
{ \
return he - > stat_acc - > _field ; \
} \
\
2014-07-31 14:47:37 +09:00
static int hpp__color_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-10-30 16:06:59 +09:00
struct perf_hpp * hpp , struct hist_entry * he ) \
{ \
2014-07-31 14:47:38 +09:00
return hpp__fmt_acc ( fmt , hpp , he , he_get_acc_ # # _field , " %*.2f%% " , \
hpp_color_scnprintf , true ) ; \
2013-10-30 16:06:59 +09:00
}
# define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
2014-07-31 14:47:37 +09:00
static int hpp__entry_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-10-30 16:06:59 +09:00
struct perf_hpp * hpp , struct hist_entry * he ) \
{ \
2014-08-20 17:07:56 +09:00
return hpp__fmt_acc ( fmt , hpp , he , he_get_acc_ # # _field , " %*.2f%% " , \
2014-07-31 14:47:38 +09:00
hpp_entry_scnprintf , true ) ; \
2013-10-30 16:06:59 +09:00
}
# define __HPP_SORT_ACC_FN(_type, _field) \
2015-01-08 09:45:46 +09:00
static int64_t hpp__sort_ # # _type ( struct perf_hpp_fmt * fmt __maybe_unused , \
struct hist_entry * a , struct hist_entry * b ) \
2013-10-30 16:06:59 +09:00
{ \
return __hpp__sort_acc ( a , b , he_get_acc_ # # _field ) ; \
}
2013-01-22 18:09:34 +09:00
# define __HPP_ENTRY_RAW_FN(_type, _field) \
static u64 he_get_raw_ # # _field ( struct hist_entry * he ) \
{ \
return he - > stat . _field ; \
} \
\
2014-07-31 14:47:37 +09:00
static int hpp__entry_ # # _type ( struct perf_hpp_fmt * fmt , \
2013-01-31 23:31:11 +01:00
struct perf_hpp * hpp , struct hist_entry * he ) \
2013-01-22 18:09:34 +09:00
{ \
2014-07-31 14:47:38 +09:00
return hpp__fmt ( fmt , hpp , he , he_get_raw_ # # _field , " %* " PRIu64 , \
hpp_entry_scnprintf , false ) ; \
2012-09-03 11:53:06 +09:00
}
2014-03-03 10:59:57 +09:00
# define __HPP_SORT_RAW_FN(_type, _field) \
2015-01-08 09:45:46 +09:00
static int64_t hpp__sort_ # # _type ( struct perf_hpp_fmt * fmt __maybe_unused , \
struct hist_entry * a , struct hist_entry * b ) \
2014-03-03 10:59:57 +09:00
{ \
2014-03-03 14:14:03 +09:00
return __hpp__sort ( a , b , he_get_raw_ # # _field ) ; \
2014-03-03 10:59:57 +09:00
}
2014-07-31 14:47:40 +09:00
# define HPP_PERCENT_FNS(_type, _field) \
2013-01-22 18:09:34 +09:00
__HPP_COLOR_PERCENT_FN ( _type , _field ) \
2014-03-03 10:59:57 +09:00
__HPP_ENTRY_PERCENT_FN ( _type , _field ) \
__HPP_SORT_FN ( _type , _field )
2012-09-03 11:53:06 +09:00
2014-07-31 14:47:40 +09:00
# define HPP_PERCENT_ACC_FNS(_type, _field) \
2013-10-30 16:06:59 +09:00
__HPP_COLOR_ACC_PERCENT_FN ( _type , _field ) \
__HPP_ENTRY_ACC_PERCENT_FN ( _type , _field ) \
__HPP_SORT_ACC_FN ( _type , _field )
2014-07-31 14:47:40 +09:00
# define HPP_RAW_FNS(_type, _field) \
2014-03-03 10:59:57 +09:00
__HPP_ENTRY_RAW_FN ( _type , _field ) \
__HPP_SORT_RAW_FN ( _type , _field )
2012-09-03 11:53:06 +09:00
2014-07-31 14:47:40 +09:00
HPP_PERCENT_FNS ( overhead , period )
HPP_PERCENT_FNS ( overhead_sys , period_sys )
HPP_PERCENT_FNS ( overhead_us , period_us )
HPP_PERCENT_FNS ( overhead_guest_sys , period_guest_sys )
HPP_PERCENT_FNS ( overhead_guest_us , period_guest_us )
HPP_PERCENT_ACC_FNS ( overhead_acc , period )
2012-10-04 21:49:40 +09:00
2014-07-31 14:47:40 +09:00
HPP_RAW_FNS ( samples , nr_events )
HPP_RAW_FNS ( period , period )
2012-09-03 11:53:07 +09:00
2015-01-08 09:45:46 +09:00
static int64_t hpp__nop_cmp ( struct perf_hpp_fmt * fmt __maybe_unused ,
struct hist_entry * a __maybe_unused ,
2014-03-03 10:59:57 +09:00
struct hist_entry * b __maybe_unused )
{
return 0 ;
}
2016-01-18 10:24:01 +01:00
# define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
2012-10-13 00:06:16 +02:00
{ \
2014-07-31 14:47:40 +09:00
. name = _name , \
. header = hpp__header_fn , \
. width = hpp__width_fn , \
. color = hpp__color_ # # _fn , \
. entry = hpp__entry_ # # _fn , \
2014-03-03 10:59:57 +09:00
. cmp = hpp__nop_cmp , \
. collapse = hpp__nop_cmp , \
2014-07-31 14:47:40 +09:00
. sort = hpp__sort_ # # _fn , \
2016-01-18 10:24:01 +01:00
. idx = PERF_HPP__ # # _idx , \
2012-10-13 00:06:16 +02:00
}
2012-09-03 11:53:06 +09:00
2016-01-18 10:24:01 +01:00
# define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
2013-10-30 16:06:59 +09:00
{ \
2014-07-31 14:47:40 +09:00
. name = _name , \
. header = hpp__header_fn , \
. width = hpp__width_fn , \
. color = hpp__color_ # # _fn , \
. entry = hpp__entry_ # # _fn , \
2013-10-30 16:06:59 +09:00
. cmp = hpp__nop_cmp , \
. collapse = hpp__nop_cmp , \
2014-07-31 14:47:40 +09:00
. sort = hpp__sort_ # # _fn , \
2016-01-18 10:24:01 +01:00
. idx = PERF_HPP__ # # _idx , \
2013-10-30 16:06:59 +09:00
}
2016-01-18 10:24:01 +01:00
# define HPP__PRINT_FNS(_name, _fn, _idx) \
2012-10-13 00:06:16 +02:00
{ \
2014-07-31 14:47:40 +09:00
. name = _name , \
. header = hpp__header_fn , \
. width = hpp__width_fn , \
. entry = hpp__entry_ # # _fn , \
2014-03-03 10:59:57 +09:00
. cmp = hpp__nop_cmp , \
. collapse = hpp__nop_cmp , \
2014-07-31 14:47:40 +09:00
. sort = hpp__sort_ # # _fn , \
2016-01-18 10:24:01 +01:00
. idx = PERF_HPP__ # # _idx , \
2012-10-13 00:06:16 +02:00
}
2012-09-03 11:53:06 +09:00
struct perf_hpp_fmt perf_hpp__format [ ] = {
2016-01-18 10:24:01 +01:00
HPP__COLOR_PRINT_FNS ( " Overhead " , overhead , OVERHEAD ) ,
HPP__COLOR_PRINT_FNS ( " sys " , overhead_sys , OVERHEAD_SYS ) ,
HPP__COLOR_PRINT_FNS ( " usr " , overhead_us , OVERHEAD_US ) ,
HPP__COLOR_PRINT_FNS ( " guest sys " , overhead_guest_sys , OVERHEAD_GUEST_SYS ) ,
HPP__COLOR_PRINT_FNS ( " guest usr " , overhead_guest_us , OVERHEAD_GUEST_US ) ,
HPP__COLOR_ACC_PRINT_FNS ( " Children " , overhead_acc , OVERHEAD_ACC ) ,
HPP__PRINT_FNS ( " Samples " , samples , SAMPLES ) ,
HPP__PRINT_FNS ( " Period " , period , PERIOD )
2012-09-03 11:53:06 +09:00
} ;
2012-10-13 00:06:16 +02:00
LIST_HEAD ( perf_hpp__list ) ;
2014-03-03 11:46:55 +09:00
LIST_HEAD ( perf_hpp__sort_list ) ;
2012-10-13 00:06:16 +02:00
2013-01-22 18:09:34 +09:00
2012-09-03 11:53:06 +09:00
# undef HPP__COLOR_PRINT_FNS
2013-10-30 16:06:59 +09:00
# undef HPP__COLOR_ACC_PRINT_FNS
2012-09-03 11:53:06 +09:00
# undef HPP__PRINT_FNS
2013-01-22 18:09:34 +09:00
# undef HPP_PERCENT_FNS
2013-10-30 16:06:59 +09:00
# undef HPP_PERCENT_ACC_FNS
2013-01-22 18:09:34 +09:00
# undef HPP_RAW_FNS
# undef __HPP_HEADER_FN
# undef __HPP_WIDTH_FN
# undef __HPP_COLOR_PERCENT_FN
# undef __HPP_ENTRY_PERCENT_FN
2013-10-30 16:06:59 +09:00
# undef __HPP_COLOR_ACC_PERCENT_FN
# undef __HPP_ENTRY_ACC_PERCENT_FN
2013-01-22 18:09:34 +09:00
# undef __HPP_ENTRY_RAW_FN
2013-10-30 16:06:59 +09:00
# undef __HPP_SORT_FN
# undef __HPP_SORT_ACC_FN
# undef __HPP_SORT_RAW_FN
2013-01-22 18:09:34 +09:00
2012-10-04 21:49:39 +09:00
void perf_hpp__init ( void )
2012-09-03 11:53:06 +09:00
{
2014-03-03 16:16:20 +09:00
int i ;
for ( i = 0 ; i < PERF_HPP__MAX_INDEX ; i + + ) {
2014-03-04 09:06:42 +09:00
struct perf_hpp_fmt * fmt = & perf_hpp__format [ i ] ;
INIT_LIST_HEAD ( & fmt - > list ) ;
/* sort_list may be linked by setup_sorting() */
if ( fmt - > sort_list . next = = NULL )
INIT_LIST_HEAD ( & fmt - > sort_list ) ;
2014-03-03 16:16:20 +09:00
}
2014-03-04 10:46:34 +09:00
/*
* If user specified field order , no need to setup default fields .
*/
2014-08-22 15:58:38 +02:00
if ( is_strict_order ( field_order ) )
2014-03-04 10:46:34 +09:00
return ;
2013-10-30 16:06:59 +09:00
if ( symbol_conf . cumulate_callchain ) {
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_ACC ) ;
2014-07-31 14:47:40 +09:00
perf_hpp__format [ PERF_HPP__OVERHEAD ] . name = " Self " ;
2013-10-30 16:06:59 +09:00
}
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__OVERHEAD ) ;
2013-01-31 23:34:25 +01:00
2012-09-03 11:53:06 +09:00
if ( symbol_conf . show_cpu_utilization ) {
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_SYS ) ;
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_US ) ;
2012-09-03 11:53:06 +09:00
if ( perf_guest ) {
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_GUEST_SYS ) ;
hpp_dimension__add_output ( PERF_HPP__OVERHEAD_GUEST_US ) ;
2012-09-03 11:53:06 +09:00
}
}
if ( symbol_conf . show_nr_samples )
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__SAMPLES ) ;
2012-09-03 11:53:06 +09:00
if ( symbol_conf . show_total_period )
2015-10-06 14:25:12 +02:00
hpp_dimension__add_output ( PERF_HPP__PERIOD ) ;
2012-10-04 21:49:39 +09:00
}
2012-09-03 11:53:06 +09:00
2012-10-13 00:06:16 +02:00
void perf_hpp__column_register ( struct perf_hpp_fmt * format )
{
list_add_tail ( & format - > list , & perf_hpp__list ) ;
}
2013-12-16 16:55:13 +09:00
void perf_hpp__column_unregister ( struct perf_hpp_fmt * format )
{
list_del ( & format - > list ) ;
}
2014-03-03 11:46:55 +09:00
void perf_hpp__register_sort_field ( struct perf_hpp_fmt * format )
{
list_add_tail ( & format - > sort_list , & perf_hpp__sort_list ) ;
}
2012-10-13 00:06:16 +02:00
void perf_hpp__column_enable ( unsigned col )
2012-10-04 21:49:39 +09:00
{
BUG_ON ( col > = PERF_HPP__MAX_INDEX ) ;
2012-10-13 00:06:16 +02:00
perf_hpp__column_register ( & perf_hpp__format [ col ] ) ;
2012-09-03 11:53:06 +09:00
}
2013-12-16 16:55:13 +09:00
void perf_hpp__column_disable ( unsigned col )
{
BUG_ON ( col > = PERF_HPP__MAX_INDEX ) ;
perf_hpp__column_unregister ( & perf_hpp__format [ col ] ) ;
}
void perf_hpp__cancel_cumulate ( void )
{
2014-08-22 15:58:38 +02:00
if ( is_strict_order ( field_order ) )
2014-03-20 09:10:29 +09:00
return ;
2013-12-16 16:55:13 +09:00
perf_hpp__column_disable ( PERF_HPP__OVERHEAD_ACC ) ;
2014-07-31 14:47:40 +09:00
perf_hpp__format [ PERF_HPP__OVERHEAD ] . name = " Overhead " ;
2013-12-16 16:55:13 +09:00
}
2014-03-03 16:16:20 +09:00
void perf_hpp__setup_output_field ( void )
{
struct perf_hpp_fmt * fmt ;
/* append sort keys to output field */
perf_hpp__for_each_sort_list ( fmt ) {
2014-03-04 10:46:34 +09:00
if ( ! list_empty ( & fmt - > list ) )
continue ;
/*
* sort entry fields are dynamically created ,
* so they can share a same sort key even though
* the list is empty .
*/
if ( perf_hpp__is_sort_entry ( fmt ) ) {
struct perf_hpp_fmt * pos ;
perf_hpp__for_each_format ( pos ) {
if ( perf_hpp__same_sort_entry ( pos , fmt ) )
goto next ;
}
}
perf_hpp__column_register ( fmt ) ;
next :
continue ;
}
}
void perf_hpp__append_sort_keys ( void )
{
struct perf_hpp_fmt * fmt ;
/* append output fields to sort keys */
perf_hpp__for_each_format ( fmt ) {
if ( ! list_empty ( & fmt - > sort_list ) )
continue ;
/*
* sort entry fields are dynamically created ,
* so they can share a same sort key even though
* the list is empty .
*/
if ( perf_hpp__is_sort_entry ( fmt ) ) {
struct perf_hpp_fmt * pos ;
perf_hpp__for_each_sort_list ( pos ) {
if ( perf_hpp__same_sort_entry ( pos , fmt ) )
goto next ;
}
}
perf_hpp__register_sort_field ( fmt ) ;
next :
continue ;
2014-03-03 16:16:20 +09:00
}
}
2014-05-07 18:42:24 +09:00
void perf_hpp__reset_output_field ( void )
{
struct perf_hpp_fmt * fmt , * tmp ;
/* reset output fields */
perf_hpp__for_each_format_safe ( fmt , tmp ) {
list_del_init ( & fmt - > list ) ;
list_del_init ( & fmt - > sort_list ) ;
}
/* reset sort keys */
perf_hpp__for_each_sort_list_safe ( fmt , tmp ) {
list_del_init ( & fmt - > list ) ;
list_del_init ( & fmt - > sort_list ) ;
}
}
2012-09-03 11:53:08 +09:00
/*
* See hists__fprintf to match the column widths
*/
unsigned int hists__sort_list_width ( struct hists * hists )
{
2012-10-13 00:06:16 +02:00
struct perf_hpp_fmt * fmt ;
2014-05-19 14:19:30 +09:00
int ret = 0 ;
bool first = true ;
2014-03-10 16:43:52 +09:00
struct perf_hpp dummy_hpp ;
2012-09-03 11:53:08 +09:00
2012-10-13 00:06:16 +02:00
perf_hpp__for_each_format ( fmt ) {
2015-12-23 02:07:08 +09:00
if ( perf_hpp__should_skip ( fmt , hists ) )
2014-05-19 14:19:30 +09:00
continue ;
if ( first )
first = false ;
else
2012-09-03 11:53:08 +09:00
ret + = 2 ;
2014-03-10 16:43:52 +09:00
ret + = fmt - > width ( fmt , & dummy_hpp , hists_to_evsel ( hists ) ) ;
2012-09-03 11:53:08 +09:00
}
2014-05-19 14:19:30 +09:00
if ( verbose & & sort__has_sym ) /* Addr + origin */
2012-09-03 11:53:08 +09:00
ret + = 3 + BITS_PER_LONG / 4 ;
return ret ;
}
2014-07-31 14:47:37 +09:00
void perf_hpp__reset_width ( struct perf_hpp_fmt * fmt , struct hists * hists )
{
if ( perf_hpp__is_sort_entry ( fmt ) )
return perf_hpp__reset_sort_width ( fmt , hists ) ;
2016-01-18 10:24:02 +01:00
BUG_ON ( fmt - > idx > = PERF_HPP__MAX_INDEX ) ;
2014-07-31 14:47:37 +09:00
2016-01-18 10:24:02 +01:00
switch ( fmt - > idx ) {
2014-07-31 14:47:37 +09:00
case PERF_HPP__OVERHEAD :
case PERF_HPP__OVERHEAD_SYS :
case PERF_HPP__OVERHEAD_US :
case PERF_HPP__OVERHEAD_ACC :
fmt - > len = 8 ;
break ;
case PERF_HPP__OVERHEAD_GUEST_SYS :
case PERF_HPP__OVERHEAD_GUEST_US :
fmt - > len = 9 ;
break ;
case PERF_HPP__SAMPLES :
case PERF_HPP__PERIOD :
fmt - > len = 12 ;
break ;
default :
break ;
}
}
2014-07-31 14:47:38 +09:00
void perf_hpp__set_user_width ( const char * width_list_str )
{
struct perf_hpp_fmt * fmt ;
const char * ptr = width_list_str ;
perf_hpp__for_each_format ( fmt ) {
char * p ;
int len = strtol ( ptr , & p , 10 ) ;
fmt - > user_len = len ;
if ( * p = = ' , ' )
ptr = p + 1 ;
else
break ;
}
}