2012-08-20 13:52:05 +09:00
# include <stdio.h>
# include "../../util/util.h"
# include "../../util/hist.h"
# include "../../util/sort.h"
2013-01-22 18:09:37 +09:00
# include "../../util/evsel.h"
2012-08-20 13:52:05 +09:00
static size_t callchain__fprintf_left_margin ( FILE * fp , int left_margin )
{
int i ;
int ret = fprintf ( fp , " " ) ;
for ( i = 0 ; i < left_margin ; i + + )
ret + = fprintf ( fp , " " ) ;
return ret ;
}
static size_t ipchain__fprintf_graph_line ( FILE * fp , int depth , int depth_mask ,
int left_margin )
{
int i ;
size_t ret = callchain__fprintf_left_margin ( fp , left_margin ) ;
for ( i = 0 ; i < depth ; i + + )
if ( depth_mask & ( 1 < < i ) )
ret + = fprintf ( fp , " | " ) ;
else
ret + = fprintf ( fp , " " ) ;
ret + = fprintf ( fp , " \n " ) ;
return ret ;
}
2015-11-09 14:45:39 +09:00
static size_t ipchain__fprintf_graph ( FILE * fp , struct callchain_node * node ,
struct callchain_list * chain ,
2012-08-20 13:52:05 +09:00
int depth , int depth_mask , int period ,
2015-11-09 14:45:39 +09:00
u64 total_samples , int left_margin )
2012-08-20 13:52:05 +09:00
{
int i ;
size_t ret = 0 ;
2014-11-12 18:05:23 -08:00
char bf [ 1024 ] ;
2012-08-20 13:52:05 +09:00
ret + = callchain__fprintf_left_margin ( fp , left_margin ) ;
for ( i = 0 ; i < depth ; i + + ) {
if ( depth_mask & ( 1 < < i ) )
ret + = fprintf ( fp , " | " ) ;
else
ret + = fprintf ( fp , " " ) ;
if ( ! period & & i = = depth - 1 ) {
2015-11-09 14:45:39 +09:00
ret + = fprintf ( fp , " -- " ) ;
ret + = callchain_node__fprintf_value ( node , fp , total_samples ) ;
ret + = fprintf ( fp , " -- " ) ;
2012-08-20 13:52:05 +09:00
} else
ret + = fprintf ( fp , " %s " , " " ) ;
}
2014-11-12 18:05:23 -08:00
fputs ( callchain_list__sym_name ( chain , bf , sizeof ( bf ) , false ) , fp ) ;
fputc ( ' \n ' , fp ) ;
2012-08-20 13:52:05 +09:00
return ret ;
}
static struct symbol * rem_sq_bracket ;
static struct callchain_list rem_hits ;
static void init_rem_hits ( void )
{
rem_sq_bracket = malloc ( sizeof ( * rem_sq_bracket ) + 6 ) ;
if ( ! rem_sq_bracket ) {
fprintf ( stderr , " Not enough memory to display remaining hits \n " ) ;
return ;
}
strcpy ( rem_sq_bracket - > name , " [...] " ) ;
rem_hits . ms . sym = rem_sq_bracket ;
}
static size_t __callchain__fprintf_graph ( FILE * fp , struct rb_root * root ,
u64 total_samples , int depth ,
int depth_mask , int left_margin )
{
struct rb_node * node , * next ;
struct callchain_node * child ;
struct callchain_list * chain ;
int new_depth_mask = depth_mask ;
u64 remaining ;
size_t ret = 0 ;
int i ;
uint entries_printed = 0 ;
remaining = total_samples ;
node = rb_first ( root ) ;
while ( node ) {
u64 new_total ;
u64 cumul ;
child = rb_entry ( node , struct callchain_node , rb_node ) ;
cumul = callchain_cumul_hits ( child ) ;
remaining - = cumul ;
/*
* The depth mask manages the output of pipes that show
* the depth . We don ' t want to keep the pipes of the current
* level for the last child of this depth .
* Except if we have remaining filtered hits . They will
* supersede the last child
*/
next = rb_next ( node ) ;
if ( ! next & & ( callchain_param . mode ! = CHAIN_GRAPH_REL | | ! remaining ) )
new_depth_mask & = ~ ( 1 < < ( depth - 1 ) ) ;
/*
* But we keep the older depth mask for the line separator
* to keep the level link until we reach the last child
*/
ret + = ipchain__fprintf_graph_line ( fp , depth , depth_mask ,
left_margin ) ;
i = 0 ;
list_for_each_entry ( chain , & child - > val , list ) {
2015-11-09 14:45:39 +09:00
ret + = ipchain__fprintf_graph ( fp , child , chain , depth ,
2012-08-20 13:52:05 +09:00
new_depth_mask , i + + ,
total_samples ,
left_margin ) ;
}
if ( callchain_param . mode = = CHAIN_GRAPH_REL )
new_total = child - > children_hit ;
else
new_total = total_samples ;
ret + = __callchain__fprintf_graph ( fp , & child - > rb_root , new_total ,
depth + 1 ,
new_depth_mask | ( 1 < < depth ) ,
left_margin ) ;
node = next ;
if ( + + entries_printed = = callchain_param . print_limit )
break ;
}
if ( callchain_param . mode = = CHAIN_GRAPH_REL & &
remaining & & remaining ! = total_samples ) {
2015-11-09 14:45:39 +09:00
struct callchain_node rem_node = {
. hit = remaining ,
} ;
2012-08-20 13:52:05 +09:00
if ( ! rem_sq_bracket )
return ret ;
new_depth_mask & = ~ ( 1 < < ( depth - 1 ) ) ;
2015-11-09 14:45:39 +09:00
ret + = ipchain__fprintf_graph ( fp , & rem_node , & rem_hits , depth ,
2012-08-20 13:52:05 +09:00
new_depth_mask , 0 , total_samples ,
2015-11-09 14:45:39 +09:00
left_margin ) ;
2012-08-20 13:52:05 +09:00
}
return ret ;
}
static size_t callchain__fprintf_graph ( FILE * fp , struct rb_root * root ,
u64 total_samples , int left_margin )
{
struct callchain_node * cnode ;
struct callchain_list * chain ;
u32 entries_printed = 0 ;
bool printed = false ;
struct rb_node * node ;
int i = 0 ;
int ret = 0 ;
2014-11-12 18:05:23 -08:00
char bf [ 1024 ] ;
2012-08-20 13:52:05 +09:00
/*
* If have one single callchain root , don ' t bother printing
* its percentage ( 100 % in fractal mode and the same percentage
* than the hist in graph mode ) . This also avoid one level of column .
*/
node = rb_first ( root ) ;
if ( node & & ! rb_next ( node ) ) {
cnode = rb_entry ( node , struct callchain_node , rb_node ) ;
list_for_each_entry ( chain , & cnode - > val , list ) {
/*
* If we sort by symbol , the first entry is the same than
* the symbol . No need to print it otherwise it appears as
* displayed twice .
*/
2014-05-19 14:19:30 +09:00
if ( ! i + + & & field_order = = NULL & &
sort_order & & ! prefixcmp ( sort_order , " sym " ) )
2012-08-20 13:52:05 +09:00
continue ;
if ( ! printed ) {
ret + = callchain__fprintf_left_margin ( fp , left_margin ) ;
ret + = fprintf ( fp , " | \n " ) ;
ret + = callchain__fprintf_left_margin ( fp , left_margin ) ;
ret + = fprintf ( fp , " --- " ) ;
left_margin + = 3 ;
printed = true ;
} else
ret + = callchain__fprintf_left_margin ( fp , left_margin ) ;
2014-11-12 18:05:23 -08:00
ret + = fprintf ( fp , " %s \n " , callchain_list__sym_name ( chain , bf , sizeof ( bf ) ,
false ) ) ;
2012-08-20 13:52:05 +09:00
if ( + + entries_printed = = callchain_param . print_limit )
break ;
}
root = & cnode - > rb_root ;
}
ret + = __callchain__fprintf_graph ( fp , root , total_samples ,
1 , 1 , left_margin ) ;
ret + = fprintf ( fp , " \n " ) ;
return ret ;
}
2013-11-05 15:32:36 -03:00
static size_t __callchain__fprintf_flat ( FILE * fp , struct callchain_node * node ,
2012-08-20 13:52:05 +09:00
u64 total_samples )
{
struct callchain_list * chain ;
size_t ret = 0 ;
2014-11-12 18:05:23 -08:00
char bf [ 1024 ] ;
2012-08-20 13:52:05 +09:00
2013-11-05 15:32:36 -03:00
if ( ! node )
2012-08-20 13:52:05 +09:00
return 0 ;
2013-11-05 15:32:36 -03:00
ret + = __callchain__fprintf_flat ( fp , node - > parent , total_samples ) ;
2012-08-20 13:52:05 +09:00
2013-11-05 15:32:36 -03:00
list_for_each_entry ( chain , & node - > val , list ) {
2012-08-20 13:52:05 +09:00
if ( chain - > ip > = PERF_CONTEXT_MAX )
continue ;
2014-11-12 18:05:23 -08:00
ret + = fprintf ( fp , " %s \n " , callchain_list__sym_name ( chain ,
bf , sizeof ( bf ) , false ) ) ;
2012-08-20 13:52:05 +09:00
}
return ret ;
}
2013-11-05 15:32:36 -03:00
static size_t callchain__fprintf_flat ( FILE * fp , struct rb_root * tree ,
2012-08-20 13:52:05 +09:00
u64 total_samples )
{
size_t ret = 0 ;
u32 entries_printed = 0 ;
struct callchain_node * chain ;
2013-11-05 15:32:36 -03:00
struct rb_node * rb_node = rb_first ( tree ) ;
2012-08-20 13:52:05 +09:00
while ( rb_node ) {
chain = rb_entry ( rb_node , struct callchain_node , rb_node ) ;
2015-11-09 14:45:39 +09:00
ret + = fprintf ( fp , " " ) ;
ret + = callchain_node__fprintf_value ( chain , fp , total_samples ) ;
ret + = fprintf ( fp , " \n " ) ;
2012-08-20 13:52:05 +09:00
ret + = __callchain__fprintf_flat ( fp , chain , total_samples ) ;
ret + = fprintf ( fp , " \n " ) ;
if ( + + entries_printed = = callchain_param . print_limit )
break ;
rb_node = rb_next ( rb_node ) ;
}
return ret ;
}
2015-11-09 14:45:37 +09:00
static size_t __callchain__fprintf_folded ( FILE * fp , struct callchain_node * node )
{
const char * sep = symbol_conf . field_sep ? : " ; " ;
struct callchain_list * chain ;
size_t ret = 0 ;
char bf [ 1024 ] ;
bool first ;
if ( ! node )
return 0 ;
ret + = __callchain__fprintf_folded ( fp , node - > parent ) ;
first = ( ret = = 0 ) ;
list_for_each_entry ( chain , & node - > val , list ) {
if ( chain - > ip > = PERF_CONTEXT_MAX )
continue ;
ret + = fprintf ( fp , " %s%s " , first ? " " : sep ,
callchain_list__sym_name ( chain ,
bf , sizeof ( bf ) , false ) ) ;
first = false ;
}
return ret ;
}
static size_t callchain__fprintf_folded ( FILE * fp , struct rb_root * tree ,
u64 total_samples )
{
size_t ret = 0 ;
u32 entries_printed = 0 ;
struct callchain_node * chain ;
struct rb_node * rb_node = rb_first ( tree ) ;
while ( rb_node ) {
chain = rb_entry ( rb_node , struct callchain_node , rb_node ) ;
2015-11-09 14:45:39 +09:00
ret + = callchain_node__fprintf_value ( chain , fp , total_samples ) ;
ret + = fprintf ( fp , " " ) ;
2015-11-09 14:45:37 +09:00
ret + = __callchain__fprintf_folded ( fp , chain ) ;
ret + = fprintf ( fp , " \n " ) ;
if ( + + entries_printed = = callchain_param . print_limit )
break ;
rb_node = rb_next ( rb_node ) ;
}
return ret ;
}
2012-08-20 13:52:05 +09:00
static size_t hist_entry_callchain__fprintf ( struct hist_entry * he ,
u64 total_samples , int left_margin ,
FILE * fp )
{
switch ( callchain_param . mode ) {
case CHAIN_GRAPH_REL :
2014-05-23 18:31:52 +09:00
return callchain__fprintf_graph ( fp , & he - > sorted_chain ,
symbol_conf . cumulate_callchain ?
he - > stat_acc - > period : he - > stat . period ,
2012-08-20 13:52:05 +09:00
left_margin ) ;
break ;
case CHAIN_GRAPH_ABS :
return callchain__fprintf_graph ( fp , & he - > sorted_chain , total_samples ,
left_margin ) ;
break ;
case CHAIN_FLAT :
return callchain__fprintf_flat ( fp , & he - > sorted_chain , total_samples ) ;
break ;
2015-11-09 14:45:37 +09:00
case CHAIN_FOLDED :
return callchain__fprintf_folded ( fp , & he - > sorted_chain , total_samples ) ;
break ;
2012-08-20 13:52:05 +09:00
case CHAIN_NONE :
break ;
default :
pr_err ( " Bad callchain mode \n " ) ;
}
return 0 ;
}
2012-08-20 13:52:06 +09:00
static size_t hist_entry__callchain_fprintf ( struct hist_entry * he ,
struct hists * hists ,
2012-10-04 21:49:40 +09:00
FILE * fp )
2012-08-20 13:52:06 +09:00
{
int left_margin = 0 ;
2012-10-04 21:49:40 +09:00
u64 total_period = hists - > stats . total_period ;
2012-08-20 13:52:06 +09:00
2014-05-19 14:19:30 +09:00
if ( field_order = = NULL & & ( sort_order = = NULL | |
! prefixcmp ( sort_order , " comm " ) ) ) {
struct perf_hpp_fmt * fmt ;
perf_hpp__for_each_format ( fmt ) {
if ( ! perf_hpp__is_sort_entry ( fmt ) )
continue ;
2012-08-20 13:52:06 +09:00
2014-05-19 14:19:30 +09:00
/* must be 'comm' sort entry */
left_margin = fmt - > width ( fmt , NULL , hists_to_evsel ( hists ) ) ;
left_margin - = thread__comm_len ( he - > thread ) ;
break ;
}
}
2012-08-20 13:52:06 +09:00
return hist_entry_callchain__fprintf ( he , total_period , left_margin , fp ) ;
}
2014-03-03 16:16:20 +09:00
static int hist_entry__snprintf ( struct hist_entry * he , struct perf_hpp * hpp )
2013-02-04 16:33:19 +01:00
{
const char * sep = symbol_conf . field_sep ;
struct perf_hpp_fmt * fmt ;
char * start = hpp - > buf ;
int ret ;
bool first = true ;
if ( symbol_conf . exclude_other & & ! he - > parent )
return 0 ;
perf_hpp__for_each_format ( fmt ) {
2014-03-18 13:00:59 +09:00
if ( perf_hpp__should_skip ( fmt ) )
continue ;
2013-02-04 16:33:19 +01:00
/*
* If there ' s no field_sep , we still need
* to display initial ' ' .
*/
if ( ! sep | | ! first ) {
ret = scnprintf ( hpp - > buf , hpp - > size , " %s " , sep ? : " " ) ;
advance_hpp ( hpp , ret ) ;
} else
first = false ;
2013-10-25 13:24:53 +02:00
if ( perf_hpp__use_color ( ) & & fmt - > color )
2013-02-04 16:33:19 +01:00
ret = fmt - > color ( fmt , hpp , he ) ;
else
ret = fmt - > entry ( fmt , hpp , he ) ;
advance_hpp ( hpp , ret ) ;
}
return hpp - > buf - start ;
}
2012-08-20 13:52:05 +09:00
static int hist_entry__fprintf ( struct hist_entry * he , size_t size ,
2013-09-05 15:39:12 -03:00
struct hists * hists ,
char * bf , size_t bfsz , FILE * fp )
2012-08-20 13:52:05 +09:00
{
int ret ;
2012-09-03 11:53:06 +09:00
struct perf_hpp hpp = {
. buf = bf ,
. size = size ,
} ;
2012-08-20 13:52:05 +09:00
2013-09-05 15:39:12 -03:00
if ( size = = 0 | | size > bfsz )
size = hpp . size = bfsz ;
2012-08-20 13:52:05 +09:00
2014-03-03 16:16:20 +09:00
hist_entry__snprintf ( he , & hpp ) ;
2012-08-20 13:52:05 +09:00
2012-08-20 13:52:06 +09:00
ret = fprintf ( fp , " %s \n " , bf ) ;
2012-08-20 13:52:05 +09:00
2012-08-20 13:52:06 +09:00
if ( symbol_conf . use_callchain )
2012-10-04 21:49:40 +09:00
ret + = hist_entry__callchain_fprintf ( he , hists , fp ) ;
2012-08-20 13:52:05 +09:00
2012-08-20 13:52:06 +09:00
return ret ;
2012-08-20 13:52:05 +09:00
}
2012-10-04 21:49:38 +09:00
size_t hists__fprintf ( struct hists * hists , bool show_header , int max_rows ,
2013-05-14 11:09:04 +09:00
int max_cols , float min_pcnt , FILE * fp )
2012-08-20 13:52:05 +09:00
{
2012-10-13 00:06:16 +02:00
struct perf_hpp_fmt * fmt ;
2012-08-20 13:52:05 +09:00
struct rb_node * nd ;
size_t ret = 0 ;
unsigned int width ;
const char * sep = symbol_conf . field_sep ;
2012-10-13 00:06:16 +02:00
int nr_rows = 0 ;
2012-10-05 16:44:45 +02:00
char bf [ 96 ] ;
2012-09-03 11:53:06 +09:00
struct perf_hpp dummy_hpp = {
. buf = bf ,
. size = sizeof ( bf ) ,
} ;
2012-10-04 21:49:37 +09:00
bool first = true ;
2013-09-05 15:39:12 -03:00
size_t linesz ;
char * line = NULL ;
2012-08-20 13:52:05 +09:00
init_rem_hits ( ) ;
2014-03-20 11:18:54 +09:00
perf_hpp__for_each_format ( fmt )
perf_hpp__reset_width ( fmt , hists ) ;
2014-03-03 16:16:20 +09:00
2014-07-31 14:47:38 +09:00
if ( symbol_conf . col_width_list_str )
perf_hpp__set_user_width ( symbol_conf . col_width_list_str ) ;
2014-03-03 16:16:20 +09:00
if ( ! show_header )
goto print_entries ;
fprintf ( fp , " # " ) ;
perf_hpp__for_each_format ( fmt ) {
2014-03-18 13:00:59 +09:00
if ( perf_hpp__should_skip ( fmt ) )
continue ;
2014-03-03 16:16:20 +09:00
if ( ! first )
fprintf ( fp , " %s " , sep ? : " " ) ;
else
first = false ;
fmt - > header ( fmt , & dummy_hpp , hists_to_evsel ( hists ) ) ;
fprintf ( fp , " %s " , bf ) ;
2012-08-20 13:52:05 +09:00
}
fprintf ( fp , " \n " ) ;
if ( max_rows & & + + nr_rows > = max_rows )
goto out ;
if ( sep )
goto print_entries ;
2012-10-04 21:49:37 +09:00
first = true ;
2012-09-03 11:53:06 +09:00
fprintf ( fp , " # " ) ;
2012-10-13 00:06:16 +02:00
perf_hpp__for_each_format ( fmt ) {
unsigned int i ;
2012-09-03 11:53:06 +09:00
2014-03-18 13:00:59 +09:00
if ( perf_hpp__should_skip ( fmt ) )
continue ;
2012-10-04 21:49:37 +09:00
if ( ! first )
2012-09-03 11:53:06 +09:00
fprintf ( fp , " %s " , sep ? : " " ) ;
2012-10-04 21:49:37 +09:00
else
first = false ;
2012-09-03 11:53:06 +09:00
2014-03-10 16:43:52 +09:00
width = fmt - > width ( fmt , & dummy_hpp , hists_to_evsel ( hists ) ) ;
2012-09-03 11:53:06 +09:00
for ( i = 0 ; i < width ; i + + )
fprintf ( fp , " . " ) ;
2012-08-20 13:52:05 +09:00
}
2012-09-03 11:53:06 +09:00
2012-08-20 13:52:05 +09:00
fprintf ( fp , " \n " ) ;
if ( max_rows & & + + nr_rows > = max_rows )
goto out ;
fprintf ( fp , " # \n " ) ;
if ( max_rows & & + + nr_rows > = max_rows )
goto out ;
print_entries :
2013-09-05 15:39:12 -03:00
linesz = hists__sort_list_width ( hists ) + 3 + 1 ;
2013-10-25 13:24:53 +02:00
linesz + = perf_hpp__color_overhead ( ) ;
2013-09-05 15:39:12 -03:00
line = malloc ( linesz ) ;
if ( line = = NULL ) {
ret = - 1 ;
goto out ;
}
2012-08-20 13:52:05 +09:00
for ( nd = rb_first ( & hists - > entries ) ; nd ; nd = rb_next ( nd ) ) {
struct hist_entry * h = rb_entry ( nd , struct hist_entry , rb_node ) ;
2013-10-31 10:17:39 +09:00
float percent ;
2012-08-20 13:52:05 +09:00
if ( h - > filtered )
continue ;
2013-10-31 10:17:39 +09:00
percent = hist_entry__get_percent_limit ( h ) ;
2013-05-14 11:09:04 +09:00
if ( percent < min_pcnt )
continue ;
2013-09-05 15:39:12 -03:00
ret + = hist_entry__fprintf ( h , max_cols , hists , line , linesz , fp ) ;
2012-08-20 13:52:05 +09:00
if ( max_rows & & + + nr_rows > = max_rows )
2013-09-05 15:39:12 -03:00
break ;
2012-08-20 13:52:05 +09:00
if ( h - > ms . map = = NULL & & verbose > 1 ) {
2014-03-21 17:57:01 -03:00
__map_groups__fprintf_maps ( h - > thread - > mg ,
2014-07-14 23:46:47 +02:00
MAP__FUNCTION , fp ) ;
2012-08-20 13:52:05 +09:00
fprintf ( fp , " %.10s end \n " , graph_dotted_line ) ;
}
}
2013-09-05 15:39:12 -03:00
free ( line ) ;
2012-08-20 13:52:05 +09:00
out :
2013-12-27 16:55:14 -03:00
zfree ( & rem_sq_bracket ) ;
2012-08-20 13:52:05 +09:00
return ret ;
}
2012-12-18 16:02:17 -03:00
size_t events_stats__fprintf ( struct events_stats * stats , FILE * fp )
2012-08-20 13:52:05 +09:00
{
int i ;
size_t ret = 0 ;
for ( i = 0 ; i < PERF_RECORD_HEADER_MAX ; + + i ) {
const char * name ;
2012-12-18 16:02:17 -03:00
if ( stats - > nr_events [ i ] = = 0 )
2012-08-20 13:52:05 +09:00
continue ;
name = perf_event__name ( i ) ;
if ( ! strcmp ( name , " UNKNOWN " ) )
continue ;
ret + = fprintf ( fp , " %16s events: %10d \n " , name ,
2012-12-18 16:02:17 -03:00
stats - > nr_events [ i ] ) ;
2012-08-20 13:52:05 +09:00
}
return ret ;
}