2011-02-04 14:45:46 +03:00
# ifndef __PERF_ANNOTATE_H
# define __PERF_ANNOTATE_H
# include <stdbool.h>
2012-04-25 21:16:03 +04:00
# include <stdint.h>
2014-04-25 23:31:02 +04:00
# include <linux/types.h>
2011-02-04 14:45:46 +03:00
# include "symbol.h"
2012-11-02 09:50:05 +04:00
# include "hist.h"
2013-02-07 13:02:08 +04:00
# include "sort.h"
2011-02-04 14:45:46 +03:00
# include <linux/list.h>
# include <linux/rbtree.h>
2012-09-08 04:43:19 +04:00
# include <pthread.h>
2011-02-04 14:45:46 +03:00
2012-04-19 17:16:27 +04:00
struct ins ;
2012-04-20 21:38:46 +04:00
struct ins_operands {
char * raw ;
2012-04-25 15:00:23 +04:00
struct {
2012-05-11 23:48:49 +04:00
char * raw ;
2012-04-25 15:00:23 +04:00
char * name ;
u64 addr ;
2012-05-11 23:48:49 +04:00
u64 offset ;
2012-04-25 15:00:23 +04:00
} target ;
2012-05-12 20:15:34 +04:00
union {
struct {
char * raw ;
char * name ;
u64 addr ;
} source ;
struct {
struct ins * ins ;
struct ins_operands * ops ;
} locked ;
} ;
2012-04-20 21:38:46 +04:00
} ;
2012-04-18 20:58:34 +04:00
struct ins_ops {
2012-05-12 20:26:20 +04:00
void ( * free ) ( struct ins_operands * ops ) ;
2016-09-19 23:18:16 +03:00
int ( * parse ) ( struct ins_operands * ops , struct map * map ) ;
2012-04-19 17:16:27 +04:00
int ( * scnprintf ) ( struct ins * ins , char * bf , size_t size ,
2012-05-08 01:54:16 +04:00
struct ins_operands * ops ) ;
2012-04-18 20:58:34 +04:00
} ;
struct ins {
const char * name ;
struct ins_ops * ops ;
} ;
bool ins__is_jump ( const struct ins * ins ) ;
2012-04-18 23:07:38 +04:00
bool ins__is_call ( const struct ins * ins ) ;
2016-06-24 14:53:58 +03:00
bool ins__is_ret ( const struct ins * ins ) ;
2012-05-08 01:54:16 +04:00
int ins__scnprintf ( struct ins * ins , char * bf , size_t size , struct ins_operands * ops ) ;
2012-04-18 20:58:34 +04:00
2013-03-05 09:53:30 +04:00
struct annotation ;
2012-04-15 22:24:39 +04:00
struct disasm_line {
2012-04-20 21:38:46 +04:00
struct list_head node ;
s64 offset ;
char * line ;
char * name ;
struct ins * ins ;
2014-11-13 05:05:26 +03:00
int line_nr ;
2015-07-18 18:24:50 +03:00
float ipc ;
u64 cycles ;
2012-04-20 21:38:46 +04:00
struct ins_operands ops ;
2011-02-04 14:45:46 +03:00
} ;
2012-04-25 21:16:03 +04:00
static inline bool disasm_line__has_offset ( const struct disasm_line * dl )
{
return dl - > ops . target . offset ! = UINT64_MAX ;
}
2012-04-15 22:24:39 +04:00
void disasm_line__free ( struct disasm_line * dl ) ;
struct disasm_line * disasm__get_next_ip_line ( struct list_head * head , struct disasm_line * pos ) ;
2012-05-08 01:54:16 +04:00
int disasm_line__scnprintf ( struct disasm_line * dl , char * bf , size_t size , bool raw ) ;
2012-04-15 22:52:18 +04:00
size_t disasm__fprintf ( struct list_head * head , FILE * fp ) ;
2013-03-05 09:53:30 +04:00
double disasm__calc_percent ( struct annotation * notes , int evidx , s64 offset ,
2015-06-19 22:10:43 +03:00
s64 end , const char * * path , u64 * nr_samples ) ;
2011-02-04 14:45:46 +03:00
struct sym_hist {
u64 sum ;
u64 addr [ 0 ] ;
} ;
2015-07-18 18:24:48 +03:00
struct cyc_hist {
u64 start ;
u64 cycles ;
u64 cycles_aggr ;
u32 num ;
u32 num_aggr ;
u8 have_start ;
/* 1 byte padding */
u16 reset ;
} ;
2015-06-19 22:36:12 +03:00
struct source_line_samples {
2011-02-04 14:45:46 +03:00
double percent ;
2012-11-09 09:58:49 +04:00
double percent_sum ;
2015-06-19 22:36:12 +03:00
double nr ;
2013-03-05 09:53:27 +04:00
} ;
struct source_line {
struct rb_node node ;
2011-02-04 14:45:46 +03:00
char * path ;
2013-03-05 09:53:28 +04:00
int nr_pcnt ;
2015-06-19 22:36:12 +03:00
struct source_line_samples samples [ 1 ] ;
2011-02-04 14:45:46 +03:00
} ;
2011-02-08 18:27:39 +03:00
/** struct annotated_source - symbols with hits have this attached as in sannotation
2011-02-04 18:43:24 +03:00
*
* @ histogram : Array of addr hit histograms per event being monitored
2011-02-08 18:27:39 +03:00
* @ lines : If ' print_lines ' is specified , per source code line percentages
2012-04-15 22:24:39 +04:00
* @ source : source parsed from a disassembler like objdump - dS
2015-07-18 18:24:48 +03:00
* @ cyc_hist : Average cycles per basic block
2011-02-04 18:43:24 +03:00
*
2011-02-08 18:27:39 +03:00
* lines is allocated , percentages calculated and all sorted by percentage
2011-02-04 18:43:24 +03:00
* when the annotation is about to be presented , so the percentages are for
* one of the entries in the histogram array , i . e . for the event / counter being
* presented . It is deallocated right after symbol__ { tui , tty , etc } _annotate
* returns .
*/
2011-02-08 18:27:39 +03:00
struct annotated_source {
struct list_head source ;
struct source_line * lines ;
2011-02-06 19:54:44 +03:00
int nr_histograms ;
2015-10-05 21:06:03 +03:00
size_t sizeof_sym_hist ;
2015-07-18 18:24:48 +03:00
struct cyc_hist * cycles_hist ;
2011-02-08 18:27:39 +03:00
struct sym_hist histograms [ 0 ] ;
} ;
struct annotation {
pthread_mutex_t lock ;
perf annotate: Add branch stack / basic block
I wanted to know the hottest path through a function and figured the
branch-stack (LBR) information should be able to help out with that.
The below uses the branch-stack to create basic blocks and generate
statistics from them.
from to branch_i
* ----> *
|
| block
v
* ----> *
from to branch_i+1
The blocks are broken down into non-overlapping ranges, while tracking
if the start of each range is an entry point and/or the end of a range
is a branch.
Each block iterates all ranges it covers (while splitting where required
to exactly match the block) and increments the 'coverage' count.
For the range including the branch we increment the taken counter, as
well as the pred counter if flags.predicted.
Using these number we can find if an instruction:
- had coverage; given by:
br->coverage / br->sym->max_coverage
This metric ensures each symbol has a 100% spot, which reflects the
observation that each symbol must have a most covered/hottest
block.
- is a branch target: br->is_target && br->start == add
- for targets, how much of a branch's coverages comes from it:
target->entry / branch->coverage
- is a branch: br->is_branch && br->end == addr
- for branches, how often it was taken:
br->taken / br->coverage
after all, all execution that didn't take the branch would have
incremented the coverage and continued onward to a later branch.
- for branches, how often it was predicted:
br->pred / br->taken
The coverage percentage is used to color the address and asm sections;
for low (<1%) coverage we use NORMAL (uncolored), indicating that these
instructions are not 'important'. For high coverage (>75%) we color the
address RED.
For each branch, we add an asm comment after the instruction with
information on how often it was taken and predicted.
Output looks like (sans color, which does loose a lot of the
information :/)
$ perf record --branch-filter u,any -e cycles:p ./branches 27
$ perf annotate branches
Percent | Source code & Disassembly of branches for cycles:pu (217 samples)
---------------------------------------------------------------------------------
: branches():
0.00 : 40057a: push %rbp
0.00 : 40057b: mov %rsp,%rbp
0.00 : 40057e: sub $0x20,%rsp
0.00 : 400582: mov %rdi,-0x18(%rbp)
0.00 : 400586: mov %rsi,-0x20(%rbp)
0.00 : 40058a: mov -0x18(%rbp),%rax
0.00 : 40058e: mov %rax,-0x10(%rbp)
0.00 : 400592: movq $0x0,-0x8(%rbp)
0.00 : 40059a: jmpq 400656 <branches+0xdc>
1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00%
3.23 : 4005a3: and $0x1,%eax
1.84 : 4005a6: test %rax,%rax
0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%)
0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc>
12.90 : 4005b2: add $0x1,%rax
2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc>
0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%)
0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54%
13.82 : 4005c6: sub $0x1,%rax
0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc>
2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46%
0.46 : 4005d5: mov %rax,%rdi
0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00%
0.92 : 4005e1: mov -0x18(%rbp),%rax
0.00 : 4005e5: and $0x1,%eax
0.00 : 4005e8: test %rax,%rax
0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%)
0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc>
0.00 : 4005f4: shr $0x2,%rax
0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc>
0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00%
7.37 : 400603: and $0x1,%eax
3.69 : 400606: test %rax,%rax
0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%)
1.84 : 40060b: mov $0x1,%eax
14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%)
1.38 : 400612: mov $0x0,%eax # +57.65%
10.14 : 400617: test %al,%al # +42.35%
0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%)
0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc>
2.76 : 400622: sub $0x1,%rax
0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc>
0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%)
0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13%
2.30 : 400636: add $0x1,%rax
0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc>
0.92 : 400641: mov -0x10(%rbp),%rax # +43.87%
2.30 : 400645: mov %rax,%rdi
0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00%
1.84 : 400651: addq $0x1,-0x8(%rbp)
0.92 : 400656: mov -0x8(%rbp),%rax
5.07 : 40065a: cmp -0x20(%rbp),%rax
0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%)
0.00 : 400664: nop
0.00 : 400665: leaveq
0.00 : 400666: retq
(Note: the --branch-filter u,any was used to avoid spurious target and
branch points due to interrupts/faults, they show up as very small -/+
annotations on 'weird' locations)
Committer note:
Please take a look at:
http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png
To see the colors.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
[ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-05 22:08:12 +03:00
u64 max_coverage ;
2011-02-08 18:27:39 +03:00
struct annotated_source * src ;
2011-02-04 14:45:46 +03:00
} ;
2011-02-04 18:43:24 +03:00
static inline struct sym_hist * annotation__histogram ( struct annotation * notes , int idx )
{
2011-02-08 18:27:39 +03:00
return ( ( ( void * ) & notes - > src - > histograms ) +
( notes - > src - > sizeof_sym_hist * idx ) ) ;
2011-02-04 18:43:24 +03:00
}
2011-02-04 14:45:46 +03:00
static inline struct annotation * symbol__annotation ( struct symbol * sym )
{
2015-01-14 14:18:05 +03:00
return ( void * ) sym - symbol_conf . priv_size ;
2011-02-04 14:45:46 +03:00
}
2013-12-18 23:48:29 +04:00
int addr_map_symbol__inc_samples ( struct addr_map_symbol * ams , int evidx ) ;
2015-07-18 18:24:48 +03:00
int addr_map_symbol__account_cycles ( struct addr_map_symbol * ams ,
struct addr_map_symbol * start ,
unsigned cycles ) ;
2013-12-19 00:10:15 +04:00
int hist_entry__inc_addr_samples ( struct hist_entry * he , int evidx , u64 addr ) ;
2011-11-12 04:17:32 +04:00
int symbol__alloc_hist ( struct symbol * sym ) ;
2011-02-06 19:54:44 +03:00
void symbol__annotate_zero_histograms ( struct symbol * sym ) ;
2011-02-04 14:45:46 +03:00
2016-07-29 22:44:56 +03:00
int symbol__disassemble ( struct symbol * sym , struct map * map , size_t privsize ) ;
2013-12-19 00:10:15 +04:00
2016-07-29 22:27:18 +03:00
enum symbol_disassemble_errno {
SYMBOL_ANNOTATE_ERRNO__SUCCESS = 0 ,
/*
* Choose an arbitrary negative big number not to clash with standard
* errno since SUS requires the errno has distinct positive values .
* See ' Issue 6 ' in the link below .
*
* http : //pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
*/
__SYMBOL_ANNOTATE_ERRNO__START = - 10000 ,
SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START ,
__SYMBOL_ANNOTATE_ERRNO__END ,
} ;
int symbol__strerror_disassemble ( struct symbol * sym , struct map * map ,
int errnum , char * buf , size_t buflen ) ;
2013-03-05 09:53:21 +04:00
int symbol__annotate_printf ( struct symbol * sym , struct map * map ,
struct perf_evsel * evsel , bool full_paths ,
int min_pcnt , int max_lines , int context ) ;
2011-02-06 19:54:44 +03:00
void symbol__annotate_zero_histogram ( struct symbol * sym , int evidx ) ;
2011-02-08 18:27:39 +03:00
void symbol__annotate_decay_histogram ( struct symbol * sym , int evidx ) ;
2012-04-15 22:24:39 +04:00
void disasm__purge ( struct list_head * head ) ;
2011-02-04 14:45:46 +03:00
2014-02-20 05:32:53 +04:00
bool ui__has_annotation ( void ) ;
2013-03-05 09:53:21 +04:00
int symbol__tty_annotate ( struct symbol * sym , struct map * map ,
struct perf_evsel * evsel , bool print_lines ,
bool full_paths , int min_pcnt , int max_lines ) ;
2011-02-04 14:45:46 +03:00
2013-09-30 14:07:11 +04:00
# ifdef HAVE_SLANG_SUPPORT
2013-03-05 09:53:21 +04:00
int symbol__tui_annotate ( struct symbol * sym , struct map * map ,
struct perf_evsel * evsel ,
2012-11-02 09:50:05 +04:00
struct hist_browser_timer * hbt ) ;
2012-09-28 13:32:02 +04:00
# else
2012-09-11 02:15:03 +04:00
static inline int symbol__tui_annotate ( struct symbol * sym __maybe_unused ,
2013-03-05 09:53:21 +04:00
struct map * map __maybe_unused ,
struct perf_evsel * evsel __maybe_unused ,
struct hist_browser_timer * hbt
__maybe_unused )
2011-02-04 14:45:46 +03:00
{
return 0 ;
}
# endif
2011-09-16 01:31:41 +04:00
extern const char * disassembler_style ;
2011-02-04 14:45:46 +03:00
# endif /* __PERF_ANNOTATE_H */