2019-05-29 17:12:25 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-02-04 14:45:46 +03:00
/*
* Copyright ( C ) 2011 , Red Hat Inc , Arnaldo Carvalho de Melo < acme @ redhat . com >
*
* Parts came from builtin - annotate . c , see those files for further
* copyright notes .
*/
2017-04-18 16:46:11 +03:00
# include <errno.h>
2017-04-17 21:23:08 +03:00
# include <inttypes.h>
2019-01-22 16:14:55 +03:00
# include <libgen.h>
2019-08-30 20:45:20 +03:00
# include <stdlib.h>
2019-09-03 16:56:06 +03:00
# include "util.h" // hex_width()
2014-02-20 05:32:53 +04:00
# include "ui/ui.h"
# include "sort.h"
2011-02-04 14:45:46 +03:00
# include "build-id.h"
# include "color.h"
2018-03-16 20:33:38 +03:00
# include "config.h"
2019-08-30 15:43:25 +03:00
# include "dso.h"
2019-08-30 20:45:20 +03:00
# include "env.h"
2019-01-27 15:42:37 +03:00
# include "map.h"
2019-11-26 04:24:10 +03:00
# include "maps.h"
2011-02-04 14:45:46 +03:00
# include "symbol.h"
2019-08-22 23:10:08 +03:00
# include "srcline.h"
2018-04-03 21:19:47 +03:00
# include "units.h"
2011-02-04 14:45:46 +03:00
# include "debug.h"
# include "annotate.h"
2013-03-05 09:53:21 +04:00
# include "evsel.h"
2018-05-24 23:20:53 +03:00
# include "evlist.h"
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
# include "bpf-event.h"
2021-10-11 11:20:30 +03:00
# include "bpf-utils.h"
perf annotate: Add branch stack / basic block
I wanted to know the hottest path through a function and figured the
branch-stack (LBR) information should be able to help out with that.
The below uses the branch-stack to create basic blocks and generate
statistics from them.
from to branch_i
* ----> *
|
| block
v
* ----> *
from to branch_i+1
The blocks are broken down into non-overlapping ranges, while tracking
if the start of each range is an entry point and/or the end of a range
is a branch.
Each block iterates all ranges it covers (while splitting where required
to exactly match the block) and increments the 'coverage' count.
For the range including the branch we increment the taken counter, as
well as the pred counter if flags.predicted.
Using these number we can find if an instruction:
- had coverage; given by:
br->coverage / br->sym->max_coverage
This metric ensures each symbol has a 100% spot, which reflects the
observation that each symbol must have a most covered/hottest
block.
- is a branch target: br->is_target && br->start == add
- for targets, how much of a branch's coverages comes from it:
target->entry / branch->coverage
- is a branch: br->is_branch && br->end == addr
- for branches, how often it was taken:
br->taken / br->coverage
after all, all execution that didn't take the branch would have
incremented the coverage and continued onward to a later branch.
- for branches, how often it was predicted:
br->pred / br->taken
The coverage percentage is used to color the address and asm sections;
for low (<1%) coverage we use NORMAL (uncolored), indicating that these
instructions are not 'important'. For high coverage (>75%) we color the
address RED.
For each branch, we add an asm comment after the instruction with
information on how often it was taken and predicted.
Output looks like (sans color, which does loose a lot of the
information :/)
$ perf record --branch-filter u,any -e cycles:p ./branches 27
$ perf annotate branches
Percent | Source code & Disassembly of branches for cycles:pu (217 samples)
---------------------------------------------------------------------------------
: branches():
0.00 : 40057a: push %rbp
0.00 : 40057b: mov %rsp,%rbp
0.00 : 40057e: sub $0x20,%rsp
0.00 : 400582: mov %rdi,-0x18(%rbp)
0.00 : 400586: mov %rsi,-0x20(%rbp)
0.00 : 40058a: mov -0x18(%rbp),%rax
0.00 : 40058e: mov %rax,-0x10(%rbp)
0.00 : 400592: movq $0x0,-0x8(%rbp)
0.00 : 40059a: jmpq 400656 <branches+0xdc>
1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00%
3.23 : 4005a3: and $0x1,%eax
1.84 : 4005a6: test %rax,%rax
0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%)
0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc>
12.90 : 4005b2: add $0x1,%rax
2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc>
0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%)
0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54%
13.82 : 4005c6: sub $0x1,%rax
0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc>
2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46%
0.46 : 4005d5: mov %rax,%rdi
0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00%
0.92 : 4005e1: mov -0x18(%rbp),%rax
0.00 : 4005e5: and $0x1,%eax
0.00 : 4005e8: test %rax,%rax
0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%)
0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc>
0.00 : 4005f4: shr $0x2,%rax
0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc>
0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00%
7.37 : 400603: and $0x1,%eax
3.69 : 400606: test %rax,%rax
0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%)
1.84 : 40060b: mov $0x1,%eax
14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%)
1.38 : 400612: mov $0x0,%eax # +57.65%
10.14 : 400617: test %al,%al # +42.35%
0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%)
0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc>
2.76 : 400622: sub $0x1,%rax
0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc>
0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%)
0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13%
2.30 : 400636: add $0x1,%rax
0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc>
0.92 : 400641: mov -0x10(%rbp),%rax # +43.87%
2.30 : 400645: mov %rax,%rdi
0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00%
1.84 : 400651: addq $0x1,-0x8(%rbp)
0.92 : 400656: mov -0x8(%rbp),%rax
5.07 : 40065a: cmp -0x20(%rbp),%rax
0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%)
0.00 : 400664: nop
0.00 : 400665: leaveq
0.00 : 400666: retq
(Note: the --branch-filter u,any was used to avoid spurious target and
branch points due to interrupts/faults, they show up as very small -/+
annotations on 'weird' locations)
Committer note:
Please take a look at:
http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png
To see the colors.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
[ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-05 22:08:12 +03:00
# include "block-range.h"
2017-04-17 22:51:59 +03:00
# include "string2.h"
2019-09-24 22:07:59 +03:00
# include "util/event.h"
2016-11-16 21:39:50 +03:00
# include "arch/common.h"
2014-11-13 05:05:26 +03:00
# include <regex.h>
2011-02-08 18:27:39 +03:00
# include <pthread.h>
2012-10-28 01:18:29 +04:00
# include <linux/bitops.h>
2017-04-17 17:39:06 +03:00
# include <linux/kernel.h>
2019-06-26 18:13:13 +03:00
# include <linux/string.h>
2019-08-22 23:10:08 +03:00
# include <subcmd/parse-options.h>
2019-10-10 21:36:46 +03:00
# include <subcmd/run-command.h>
2011-02-04 14:45:46 +03:00
2018-03-16 01:12:39 +03:00
/* FIXME: For the HE_COLORSET */
# include "ui/browser.h"
/*
* FIXME : Using the same values as slang . h ,
* but that header may not be available everywhere
*/
2018-03-16 05:14:51 +03:00
# define LARROW_CHAR ((unsigned char)',')
# define RARROW_CHAR ((unsigned char)'+')
# define DARROW_CHAR ((unsigned char)'.')
# define UARROW_CHAR ((unsigned char)'-')
2018-03-16 01:12:39 +03:00
tools perf: Move from sane_ctype.h obtained from git to the Linux's original
We got the sane_ctype.h headers from git and kept using it so far, but
since that code originally came from the kernel sources to the git
sources, perhaps its better to just use the one in the kernel, so that
we can leverage tools/perf/check_headers.sh to be notified when our copy
gets out of sync, i.e. when fixes or goodies are added to the code we've
copied.
This will help with things like tools/lib/string.c where we want to have
more things in common with the kernel, such as strim(), skip_spaces(),
etc so as to go on removing the things that we have in tools/perf/util/
and instead using the code in the kernel, indirectly and removing things
like EXPORT_SYMBOL(), etc, getting notified when fixes and improvements
are made to the original code.
Hopefully this also should help with reducing the difference of code
hosted in tools/ to the one in the kernel proper.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-7k9868l713wqtgo01xxygn12@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-25 23:27:31 +03:00
# include <linux/ctype.h>
2017-04-17 22:10:49 +03:00
2018-03-16 20:33:38 +03:00
struct annotation_options annotation__default_options = {
. use_offset = true ,
. jump_arrows = true ,
2018-05-28 17:42:59 +03:00
. annotate_src = true ,
2018-04-11 16:30:03 +03:00
. offset_level = ANNOTATION__OFFSET_JUMP_TARGETS ,
2018-08-04 16:05:18 +03:00
. percent_type = PERCENT_PERIOD_LOCAL ,
2018-03-16 20:33:38 +03:00
} ;
2014-11-13 05:05:26 +03:00
static regex_t file_lineno ;
2011-09-16 01:31:41 +04:00
2016-11-24 17:16:06 +03:00
static struct ins_ops * ins__find ( struct arch * arch , const char * name ) ;
2016-11-24 17:37:08 +03:00
static void ins__sort ( struct arch * arch ) ;
2016-11-24 17:16:06 +03:00
static int disasm_line__parse ( char * line , const char * * namep , char * * rawp ) ;
2012-05-12 20:15:34 +04:00
2016-11-16 21:39:50 +03:00
struct arch {
const char * name ;
2016-11-17 18:31:51 +03:00
struct ins * instructions ;
size_t nr_instructions ;
2016-11-24 17:37:08 +03:00
size_t nr_instructions_allocated ;
struct ins_ops * ( * associate_instruction_ops ) ( struct arch * arch , const char * name ) ;
2016-11-17 18:31:51 +03:00
bool sorted_instructions ;
2016-11-18 18:34:26 +03:00
bool initialized ;
void * priv ;
perf annotate: Check for fused instructions
Macro fusion merges two instructions to a single micro-op. Intel core
platform performs this hardware optimization under limited
circumstances.
For example, CMP + JCC can be "fused" and executed /retired together.
While with sampling this can result in the sample sometimes being on the
JCC and sometimes on the CMP. So for the fused instruction pair, they
could be considered together.
On Nehalem, fused instruction pairs:
cmp/test + jcc.
On other new CPU:
cmp/test/add/sub/and/inc/dec + jcc.
This patch adds an x86-specific function which checks if 2 instructions
are in a "fused" pair. For non-x86 arch, the function is just NULL.
Changelog:
v4: Move the CPU model checking to symbol__disassemble and save the CPU
family/model in arch structure.
It avoids checking every time when jump arrow printed.
v3: Add checking for Nehalem (CMP, TEST). For other newer Intel CPUs
just check it by default (CMP, TEST, ADD, SUB, AND, INC, DEC).
v2: Remove the original weak function. Arnaldo points out that doing it
as a weak function that will be overridden by the host arch doesn't
work. So now it's implemented as an arch-specific function.
Committer fix:
Do not access evsel->evlist->env->cpuid, ->env can be null, introduce
perf_evsel__env_cpuid(), just like perf_evsel__env_arch(), also used in
this function call.
The original patch was segfaulting 'perf top' + annotation.
But this essentially disables this fused instructions augmentation in
'perf top', the right thing is to get the cpuid from the running kernel,
left for a later patch tho.
Signed-off-by: Yao Jin <yao.jin@linux.intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1499403995-19857-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-07 08:06:34 +03:00
unsigned int model ;
unsigned int family ;
2017-10-11 18:01:24 +03:00
int ( * init ) ( struct arch * arch , char * cpuid ) ;
perf annotate: Check for fused instructions
Macro fusion merges two instructions to a single micro-op. Intel core
platform performs this hardware optimization under limited
circumstances.
For example, CMP + JCC can be "fused" and executed /retired together.
While with sampling this can result in the sample sometimes being on the
JCC and sometimes on the CMP. So for the fused instruction pair, they
could be considered together.
On Nehalem, fused instruction pairs:
cmp/test + jcc.
On other new CPU:
cmp/test/add/sub/and/inc/dec + jcc.
This patch adds an x86-specific function which checks if 2 instructions
are in a "fused" pair. For non-x86 arch, the function is just NULL.
Changelog:
v4: Move the CPU model checking to symbol__disassemble and save the CPU
family/model in arch structure.
It avoids checking every time when jump arrow printed.
v3: Add checking for Nehalem (CMP, TEST). For other newer Intel CPUs
just check it by default (CMP, TEST, ADD, SUB, AND, INC, DEC).
v2: Remove the original weak function. Arnaldo points out that doing it
as a weak function that will be overridden by the host arch doesn't
work. So now it's implemented as an arch-specific function.
Committer fix:
Do not access evsel->evlist->env->cpuid, ->env can be null, introduce
perf_evsel__env_cpuid(), just like perf_evsel__env_arch(), also used in
this function call.
The original patch was segfaulting 'perf top' + annotation.
But this essentially disables this fused instructions augmentation in
'perf top', the right thing is to get the cpuid from the running kernel,
left for a later patch tho.
Signed-off-by: Yao Jin <yao.jin@linux.intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1499403995-19857-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-07 08:06:34 +03:00
bool ( * ins_is_fused ) ( struct arch * arch , const char * ins1 ,
const char * ins2 ) ;
2016-11-16 21:39:50 +03:00
struct {
char comment_char ;
2016-11-16 21:50:38 +03:00
char skip_functions_char ;
2016-11-16 21:39:50 +03:00
} objdump ;
} ;
2016-11-17 18:31:51 +03:00
static struct ins_ops call_ops ;
static struct ins_ops dec_ops ;
static struct ins_ops jump_ops ;
static struct ins_ops mov_ops ;
static struct ins_ops nop_ops ;
static struct ins_ops lock_ops ;
static struct ins_ops ret_ops ;
2016-11-24 17:37:08 +03:00
static int arch__grow_instructions ( struct arch * arch )
{
struct ins * new_instructions ;
size_t new_nr_allocated ;
if ( arch - > nr_instructions_allocated = = 0 & & arch - > instructions )
goto grow_from_non_allocated_table ;
new_nr_allocated = arch - > nr_instructions_allocated + 128 ;
new_instructions = realloc ( arch - > instructions , new_nr_allocated * sizeof ( struct ins ) ) ;
if ( new_instructions = = NULL )
return - 1 ;
out_update_instructions :
arch - > instructions = new_instructions ;
arch - > nr_instructions_allocated = new_nr_allocated ;
return 0 ;
grow_from_non_allocated_table :
new_nr_allocated = arch - > nr_instructions + 128 ;
new_instructions = calloc ( new_nr_allocated , sizeof ( struct ins ) ) ;
if ( new_instructions = = NULL )
return - 1 ;
memcpy ( new_instructions , arch - > instructions , arch - > nr_instructions ) ;
goto out_update_instructions ;
}
2016-11-18 22:54:10 +03:00
static int arch__associate_ins_ops ( struct arch * arch , const char * name , struct ins_ops * ops )
2016-11-24 17:37:08 +03:00
{
struct ins * ins ;
if ( arch - > nr_instructions = = arch - > nr_instructions_allocated & &
arch__grow_instructions ( arch ) )
return - 1 ;
ins = & arch - > instructions [ arch - > nr_instructions ] ;
ins - > name = strdup ( name ) ;
if ( ! ins - > name )
return - 1 ;
ins - > ops = ops ;
arch - > nr_instructions + + ;
ins__sort ( arch ) ;
return 0 ;
}
2018-12-04 20:51:18 +03:00
# include "arch/arc/annotate/instructions.c"
2016-11-17 18:31:51 +03:00
# include "arch/arm/annotate/instructions.c"
perf annotate: AArch64 support
This is a regex converted version from the original:
https://lkml.org/lkml/2016/5/19/461
Add basic support to recognise AArch64 assembly. This allows perf to
identify AArch64 instructions that branch to other parts within the
same function, thereby properly annotating them.
Rebased onto new cross-arch annotation bits:
https://lkml.org/lkml/2016/11/25/546
Sample output:
security_file_permission vmlinux
5.80 │ ← ret ▒
│70: ldr w0, [x21,#68] ▒
4.44 │ ↓ tbnz d0 ▒
│ mov w0, #0x24 // #36 ▒
1.37 │ ands w0, w22, w0 ▒
│ ↑ b.eq 60 ▒
1.37 │ ↓ tbnz e4 ▒
│ mov w19, #0x20000 // #131072 ▒
1.02 │ ↓ tbz ec ▒
│90:┌─→ldr x3, [x21,#24] ▒
1.37 │ │ add x21, x21, #0x10 ▒
│ │ mov w2, w19 ▒
1.02 │ │ mov x0, x21 ▒
│ │ mov x1, x3 ▒
1.71 │ │ ldr x20, [x3,#48] ▒
│ │→ bl __fsnotify_parent ▒
0.68 │ │↑ cbnz 60 ▒
│ │ mov x2, x21 ▒
1.37 │ │ mov w1, w19 ▒
│ │ mov x0, x20 ▒
0.68 │ │ mov w5, #0x0 // #0 ▒
│ │ mov x4, #0x0 // #0 ▒
1.71 │ │ mov w3, #0x1 // #1 ▒
│ │→ bl fsnotify ▒
1.37 │ │↑ b 60 ▒
│d0:│ mov w0, #0x0 // #0 ▒
│ │ ldp x19, x20, [sp,#16] ▒
│ │ ldp x21, x22, [sp,#32] ▒
│ │ ldp x29, x30, [sp],#48 ▒
│ │← ret ▒
│e4:│ mov w19, #0x10000 // #65536 ▒
│ └──b 90 ◆
│ec: brk #0x800 ▒
Press 'h' for help on key bindings
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Signed-off-by: Chris Ryder <chris.ryder@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Pawel Moll <pawel.moll@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Link: http://lkml.kernel.org/r/20161130092344.012e18e3e623bea395162f95@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-30 18:23:44 +03:00
# include "arch/arm64/annotate/instructions.c"
perf annotate: Add csky support
This patch add basic arch initialization and instruction associate
support for the csky CPU architecture.
E.g.:
$ perf annotate --stdio2
Samples: 161 of event 'cpu-clock:pppH', 4000 Hz, Event count (approx.):
40250000, [percent: local period]
test_4() /usr/lib/perf-test/callchain_test
Percent
Disassembly of section .text:
00008420 <test_4>:
test_4():
subi sp, sp, 4
st.w r8, (sp, 0x0)
mov r8, sp
subi sp, sp, 8
subi r3, r8, 4
movi r2, 0
st.w r2, (r3, 0x0)
↓ br 2e
100.00 14: subi r3, r8, 4
ld.w r2, (r3, 0x0)
subi r3, r8, 8
st.w r2, (r3, 0x0)
subi r3, r8, 4
ld.w r3, (r3, 0x0)
addi r2, r3, 1
subi r3, r8, 4
st.w r2, (r3, 0x0)
2e: subi r3, r8, 4
ld.w r2, (r3, 0x0)
lrw r3, 0x98967f // 8598 <main+0x28>
cmplt r3, r2
↑ bf 14
mov r0, r0
mov r0, r0
mov sp, r8
ld.w r8, (sp, 0x0)
addi sp, sp, 4
← rts
Signed-off-by: Mao Han <han_mao@c-sky.com>
Acked-by: Guo Ren <guoren@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-csky@vger.kernel.org
Link: http://lkml.kernel.org/r/d874d7782d9acdad5d98f2f5c4a6fb26fbe41c5d.1561531557.git.han_mao@c-sky.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-26 09:52:19 +03:00
# include "arch/csky/annotate/instructions.c"
perf annotate mips: Add perf arch instructions annotate handlers
Support the MIPS architecture using the ins_ops association method. With
this patch, perf-annotate can work well on MIPS.
Testing it with a perf.data file collected on a mips machine:
$./perf annotate -i perf.data
: Disassembly of section .text:
:
: 00000000000be6a0 <get_next_seq>:
: get_next_seq():
0.00 : be6a0: lw v0,0(a0)
0.00 : be6a4: daddiu sp,sp,-128
0.00 : be6a8: ld a7,72(a0)
0.00 : be6ac: gssq s5,s4,80(sp)
0.00 : be6b0: gssq s1,s0,48(sp)
0.00 : be6b4: gssq s8,gp,112(sp)
0.00 : be6b8: gssq s7,s6,96(sp)
0.00 : be6bc: gssq s3,s2,64(sp)
0.00 : be6c0: sd a3,0(sp)
0.00 : be6c4: move s0,a0
0.00 : be6c8: sd v0,32(sp)
0.00 : be6cc: sd a5,8(sp)
0.00 : be6d0: sd zero,8(a0)
0.00 : be6d4: sd a6,16(sp)
0.00 : be6d8: ld s2,48(a0)
8.53 : be6dc: ld s1,40(a0)
9.42 : be6e0: ld v1,32(a0)
0.00 : be6e4: nop
0.00 : be6e8: ld s4,24(a0)
0.00 : be6ec: ld s5,16(a0)
0.00 : be6f0: sd a7,40(sp)
10.11 : be6f4: ld s6,64(a0)
...
The original patch link:
https://lore.kernel.org/patchwork/patch/1180480/
Signed-off-by: Dengcheng Zhu <dzhu@wavecomp.com>
Cc: Dengcheng Zhu <dzhu@wavecomp.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xuefeng Li <lixuefeng@loongson.cn>
Cc: linux-mips@vger.kernel.org
[ fanpeng@loongson.cn: Add missing "bgtzl", "bltzl", "bgezl", "blezl", "beql" and "bnel" for pre-R6processors ]
Signed-off-by: Peng Fan <fanpeng@loongson.cn>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-19 10:21:24 +03:00
# include "arch/mips/annotate/instructions.c"
2016-11-17 18:31:51 +03:00
# include "arch/x86/annotate/instructions.c"
perf annotate: Initial PowerPC support
Support the PowerPC architecture using the ins_ops association
method.
Committer notes:
Testing it with a perf.data file collected on a PowerPC machine and
cross-annotated on a x86_64 workstation, using the associated vmlinux
file:
$ perf report -i perf.data.f22vm.powerdev --vmlinux vmlinux.powerpc
.ktime_get vmlinux.powerpc
│ clrldi r9,r28,63
8.57 │ ┌──bne e0 <- TUI cursor positioned here
│54:│ lwsync
2.86 │ │ std r2,40(r1)
│ │ ld r9,144(r31)
│ │ ld r3,136(r31)
│ │ ld r30,184(r31)
│ │ ld r10,0(r9)
│ │ mtctr r10
│ │ ld r2,8(r9)
8.57 │ │→ bctrl
│ │ ld r2,40(r1)
│ │ ld r10,160(r31)
│ │ ld r5,152(r31)
│ │ lwz r7,168(r31)
│ │ ld r9,176(r31)
8.57 │ │ lwz r6,172(r31)
│ │ lwsync
2.86 │ │ lwz r8,128(r31)
│ │ cmpw cr7,r8,r28
2.86 │ │↑ bne 48
│ │ subf r10,r10,r3
│ │ mr r3,r29
│ │ and r10,r10,r5
2.86 │ │ mulld r10,r10,r7
│ │ add r9,r10,r9
│ │ srd r9,r9,r6
│ │ add r9,r9,r30
│ │ std r9,0(r29)
│ │ addi r1,r1,144
│ │ ld r0,16(r1)
│ │ ld r28,-32(r1)
│ │ ld r29,-24(r1)
│ │ ld r30,-16(r1)
│ │ mtlr r0
│ │ ld r31,-8(r1)
│ │← blr
5.71 │e0:└─→mr r1,r1
11.43 │ mr r2,r2
11.43 │ lwz r28,128(r31)
Press 'h' for help on key bindings
$ perf report -i perf.data.f22vm.powerdev --header-only
# ========
# captured on: Thu Nov 24 12:40:38 2016
# hostname : pdev-f22-qemu
# os release : 4.4.10-200.fc22.ppc64
# perf version : 4.9.rc1.g6298ce
# arch : ppc64
# nrcpus online : 48
# nrcpus avail : 48
# cpudesc : POWER7 (architected), altivec supported
# cpuid : 74,513
# total memory : 4158976 kB
# cmdline : /home/ravi/Workspace/linux/tools/perf/perf record -a
# event : name = cycles:ppp, , size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1
# HEADER_CPU_TOPOLOGY info available, use -I to display
# HEADER_NUMA_TOPOLOGY info available, use -I to display
# pmu mappings: cpu = 4, software = 1, tracepoint = 2, breakpoint = 5
# missing features: HEADER_TRACING_DATA HEADER_BRANCH_STACK HEADER_GROUP_DESC HEADER_AUXTRACE HEADER_STAT HEADER_CACHE
# ========
#
$
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Link: http://lkml.kernel.org/n/tip-tbjnp40ddoxxl474uvhwi6g4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-23 19:03:46 +03:00
# include "arch/powerpc/annotate/instructions.c"
perf annotate: Add riscv64 support
This patch adds basic arch initialization and instruction associate
support for the riscv64 CPU architecture.
Example output:
$ perf annotate --stdio2
Samples: 122K of event 'task-clock:u', 4000 Hz, Event count (approx.): 30637250000, [percent: local period]
strcmp() /usr/lib64/libc-2.32.so
Percent
Disassembly of section .text:
0000000000069a30 <strcmp>:
__GI_strcmp():
const unsigned char *s2 = (const unsigned char *) p2;
unsigned char c1, c2;
do
{
c1 = (unsigned char) *s1++;
37.30 lbu a5,0(a0)
c2 = (unsigned char) *s2++;
1.23 addi a1,a1,1
c1 = (unsigned char) *s1++;
18.68 addi a0,a0,1
c2 = (unsigned char) *s2++;
1.37 lbu a4,-1(a1)
if (c1 == '\0')
18.71 ↓ beqz a5,18
return c1 - c2;
}
Signed-off-by: William Cohen <wcohen@redhat.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-riscv@lists.infradead.org
Link: http://lore.kernel.org/lkml/20210927005115.610264-1-wcohen@redhat.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-09-27 03:51:15 +03:00
# include "arch/riscv64/annotate/instructions.c"
2017-04-06 10:51:52 +03:00
# include "arch/s390/annotate/instructions.c"
perf annotate: Add Sparc support
E.g.:
$ perf annotate --stdio2
Samples: 7K of event 'cycles:ppp', 4000 Hz, Event count (approx.): 3086733887
__gettimeofday /lib32/libc-2.27.so [Percent: local period]
Percent│
│
│
│ Disassembly of section .text:
│
│ 000a6fa0 <__gettimeofday@@GLIBC_2.0>:
0.47 │ save %sp, -96, %sp
0.73 │ sethi %hi(0xe9000), %l7
│ → call __frame_state_for@@GLIBC_2.0+0x480
0.30 │ add %l7, 0x58, %l7 ! e9058 <nftw64@@GLIBC_2.3.3+0x818>
1.33 │ mov %i0, %o0
│ mov %i1, %o1
0.43 │ mov 0x74, %g1
│ ta 0x10
88.92 │ ↓ bcc 30
2.95 │ clr %g1
│ neg %o0
│ mov 1, %g1
0.31 │30: cmp %g1, 0
│ bne,pn %icc, a6fe4 <__gettimeofday@@GLIBC_2.0+0x44>
│ mov %o0, %i0
1.96 │ ← return %i7 + 8
2.62 │ nop
│ sethi %hi(0), %g1
│ neg %o0, %g2
│ add %g1, 0x160, %g1
│ ld [ %l7 + %g1 ], %g1
│ st %g2, [ %g7 + %g1 ]
│ ← return %i7 + 8
│ mov -1, %o0
Signed-off-by: David S. Miller <davem@davemloft.net>
Link: http://lkml.kernel.org/r/20181016.205555.1070918198627611771.davem@davemloft.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-10-17 06:55:55 +03:00
# include "arch/sparc/annotate/instructions.c"
2016-11-17 18:31:51 +03:00
2016-11-16 21:39:50 +03:00
static struct arch architectures [ ] = {
2018-12-04 20:51:18 +03:00
{
. name = " arc " ,
. init = arc__annotate_init ,
} ,
2016-11-16 21:39:50 +03:00
{
. name = " arm " ,
2016-11-18 22:54:10 +03:00
. init = arm__annotate_init ,
2016-11-16 21:39:50 +03:00
} ,
perf annotate: AArch64 support
This is a regex converted version from the original:
https://lkml.org/lkml/2016/5/19/461
Add basic support to recognise AArch64 assembly. This allows perf to
identify AArch64 instructions that branch to other parts within the
same function, thereby properly annotating them.
Rebased onto new cross-arch annotation bits:
https://lkml.org/lkml/2016/11/25/546
Sample output:
security_file_permission vmlinux
5.80 │ ← ret ▒
│70: ldr w0, [x21,#68] ▒
4.44 │ ↓ tbnz d0 ▒
│ mov w0, #0x24 // #36 ▒
1.37 │ ands w0, w22, w0 ▒
│ ↑ b.eq 60 ▒
1.37 │ ↓ tbnz e4 ▒
│ mov w19, #0x20000 // #131072 ▒
1.02 │ ↓ tbz ec ▒
│90:┌─→ldr x3, [x21,#24] ▒
1.37 │ │ add x21, x21, #0x10 ▒
│ │ mov w2, w19 ▒
1.02 │ │ mov x0, x21 ▒
│ │ mov x1, x3 ▒
1.71 │ │ ldr x20, [x3,#48] ▒
│ │→ bl __fsnotify_parent ▒
0.68 │ │↑ cbnz 60 ▒
│ │ mov x2, x21 ▒
1.37 │ │ mov w1, w19 ▒
│ │ mov x0, x20 ▒
0.68 │ │ mov w5, #0x0 // #0 ▒
│ │ mov x4, #0x0 // #0 ▒
1.71 │ │ mov w3, #0x1 // #1 ▒
│ │→ bl fsnotify ▒
1.37 │ │↑ b 60 ▒
│d0:│ mov w0, #0x0 // #0 ▒
│ │ ldp x19, x20, [sp,#16] ▒
│ │ ldp x21, x22, [sp,#32] ▒
│ │ ldp x29, x30, [sp],#48 ▒
│ │← ret ▒
│e4:│ mov w19, #0x10000 // #65536 ▒
│ └──b 90 ◆
│ec: brk #0x800 ▒
Press 'h' for help on key bindings
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Signed-off-by: Chris Ryder <chris.ryder@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Pawel Moll <pawel.moll@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Link: http://lkml.kernel.org/r/20161130092344.012e18e3e623bea395162f95@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-30 18:23:44 +03:00
{
. name = " arm64 " ,
. init = arm64__annotate_init ,
} ,
perf annotate: Add csky support
This patch add basic arch initialization and instruction associate
support for the csky CPU architecture.
E.g.:
$ perf annotate --stdio2
Samples: 161 of event 'cpu-clock:pppH', 4000 Hz, Event count (approx.):
40250000, [percent: local period]
test_4() /usr/lib/perf-test/callchain_test
Percent
Disassembly of section .text:
00008420 <test_4>:
test_4():
subi sp, sp, 4
st.w r8, (sp, 0x0)
mov r8, sp
subi sp, sp, 8
subi r3, r8, 4
movi r2, 0
st.w r2, (r3, 0x0)
↓ br 2e
100.00 14: subi r3, r8, 4
ld.w r2, (r3, 0x0)
subi r3, r8, 8
st.w r2, (r3, 0x0)
subi r3, r8, 4
ld.w r3, (r3, 0x0)
addi r2, r3, 1
subi r3, r8, 4
st.w r2, (r3, 0x0)
2e: subi r3, r8, 4
ld.w r2, (r3, 0x0)
lrw r3, 0x98967f // 8598 <main+0x28>
cmplt r3, r2
↑ bf 14
mov r0, r0
mov r0, r0
mov sp, r8
ld.w r8, (sp, 0x0)
addi sp, sp, 4
← rts
Signed-off-by: Mao Han <han_mao@c-sky.com>
Acked-by: Guo Ren <guoren@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-csky@vger.kernel.org
Link: http://lkml.kernel.org/r/d874d7782d9acdad5d98f2f5c4a6fb26fbe41c5d.1561531557.git.han_mao@c-sky.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-26 09:52:19 +03:00
{
. name = " csky " ,
. init = csky__annotate_init ,
} ,
perf annotate mips: Add perf arch instructions annotate handlers
Support the MIPS architecture using the ins_ops association method. With
this patch, perf-annotate can work well on MIPS.
Testing it with a perf.data file collected on a mips machine:
$./perf annotate -i perf.data
: Disassembly of section .text:
:
: 00000000000be6a0 <get_next_seq>:
: get_next_seq():
0.00 : be6a0: lw v0,0(a0)
0.00 : be6a4: daddiu sp,sp,-128
0.00 : be6a8: ld a7,72(a0)
0.00 : be6ac: gssq s5,s4,80(sp)
0.00 : be6b0: gssq s1,s0,48(sp)
0.00 : be6b4: gssq s8,gp,112(sp)
0.00 : be6b8: gssq s7,s6,96(sp)
0.00 : be6bc: gssq s3,s2,64(sp)
0.00 : be6c0: sd a3,0(sp)
0.00 : be6c4: move s0,a0
0.00 : be6c8: sd v0,32(sp)
0.00 : be6cc: sd a5,8(sp)
0.00 : be6d0: sd zero,8(a0)
0.00 : be6d4: sd a6,16(sp)
0.00 : be6d8: ld s2,48(a0)
8.53 : be6dc: ld s1,40(a0)
9.42 : be6e0: ld v1,32(a0)
0.00 : be6e4: nop
0.00 : be6e8: ld s4,24(a0)
0.00 : be6ec: ld s5,16(a0)
0.00 : be6f0: sd a7,40(sp)
10.11 : be6f4: ld s6,64(a0)
...
The original patch link:
https://lore.kernel.org/patchwork/patch/1180480/
Signed-off-by: Dengcheng Zhu <dzhu@wavecomp.com>
Cc: Dengcheng Zhu <dzhu@wavecomp.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xuefeng Li <lixuefeng@loongson.cn>
Cc: linux-mips@vger.kernel.org
[ fanpeng@loongson.cn: Add missing "bgtzl", "bltzl", "bgezl", "blezl", "beql" and "bnel" for pre-R6processors ]
Signed-off-by: Peng Fan <fanpeng@loongson.cn>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-10-19 10:21:24 +03:00
{
. name = " mips " ,
. init = mips__annotate_init ,
. objdump = {
. comment_char = ' # ' ,
} ,
} ,
2016-11-16 21:39:50 +03:00
{
. name = " x86 " ,
2017-10-11 18:01:24 +03:00
. init = x86__annotate_init ,
2016-11-17 18:31:51 +03:00
. instructions = x86__instructions ,
. nr_instructions = ARRAY_SIZE ( x86__instructions ) ,
2016-11-16 21:39:50 +03:00
. objdump = {
. comment_char = ' # ' ,
} ,
} ,
perf annotate: Initial PowerPC support
Support the PowerPC architecture using the ins_ops association
method.
Committer notes:
Testing it with a perf.data file collected on a PowerPC machine and
cross-annotated on a x86_64 workstation, using the associated vmlinux
file:
$ perf report -i perf.data.f22vm.powerdev --vmlinux vmlinux.powerpc
.ktime_get vmlinux.powerpc
│ clrldi r9,r28,63
8.57 │ ┌──bne e0 <- TUI cursor positioned here
│54:│ lwsync
2.86 │ │ std r2,40(r1)
│ │ ld r9,144(r31)
│ │ ld r3,136(r31)
│ │ ld r30,184(r31)
│ │ ld r10,0(r9)
│ │ mtctr r10
│ │ ld r2,8(r9)
8.57 │ │→ bctrl
│ │ ld r2,40(r1)
│ │ ld r10,160(r31)
│ │ ld r5,152(r31)
│ │ lwz r7,168(r31)
│ │ ld r9,176(r31)
8.57 │ │ lwz r6,172(r31)
│ │ lwsync
2.86 │ │ lwz r8,128(r31)
│ │ cmpw cr7,r8,r28
2.86 │ │↑ bne 48
│ │ subf r10,r10,r3
│ │ mr r3,r29
│ │ and r10,r10,r5
2.86 │ │ mulld r10,r10,r7
│ │ add r9,r10,r9
│ │ srd r9,r9,r6
│ │ add r9,r9,r30
│ │ std r9,0(r29)
│ │ addi r1,r1,144
│ │ ld r0,16(r1)
│ │ ld r28,-32(r1)
│ │ ld r29,-24(r1)
│ │ ld r30,-16(r1)
│ │ mtlr r0
│ │ ld r31,-8(r1)
│ │← blr
5.71 │e0:└─→mr r1,r1
11.43 │ mr r2,r2
11.43 │ lwz r28,128(r31)
Press 'h' for help on key bindings
$ perf report -i perf.data.f22vm.powerdev --header-only
# ========
# captured on: Thu Nov 24 12:40:38 2016
# hostname : pdev-f22-qemu
# os release : 4.4.10-200.fc22.ppc64
# perf version : 4.9.rc1.g6298ce
# arch : ppc64
# nrcpus online : 48
# nrcpus avail : 48
# cpudesc : POWER7 (architected), altivec supported
# cpuid : 74,513
# total memory : 4158976 kB
# cmdline : /home/ravi/Workspace/linux/tools/perf/perf record -a
# event : name = cycles:ppp, , size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1
# HEADER_CPU_TOPOLOGY info available, use -I to display
# HEADER_NUMA_TOPOLOGY info available, use -I to display
# pmu mappings: cpu = 4, software = 1, tracepoint = 2, breakpoint = 5
# missing features: HEADER_TRACING_DATA HEADER_BRANCH_STACK HEADER_GROUP_DESC HEADER_AUXTRACE HEADER_STAT HEADER_CACHE
# ========
#
$
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Link: http://lkml.kernel.org/n/tip-tbjnp40ddoxxl474uvhwi6g4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-23 19:03:46 +03:00
{
. name = " powerpc " ,
. init = powerpc__annotate_init ,
} ,
perf annotate: Add riscv64 support
This patch adds basic arch initialization and instruction associate
support for the riscv64 CPU architecture.
Example output:
$ perf annotate --stdio2
Samples: 122K of event 'task-clock:u', 4000 Hz, Event count (approx.): 30637250000, [percent: local period]
strcmp() /usr/lib64/libc-2.32.so
Percent
Disassembly of section .text:
0000000000069a30 <strcmp>:
__GI_strcmp():
const unsigned char *s2 = (const unsigned char *) p2;
unsigned char c1, c2;
do
{
c1 = (unsigned char) *s1++;
37.30 lbu a5,0(a0)
c2 = (unsigned char) *s2++;
1.23 addi a1,a1,1
c1 = (unsigned char) *s1++;
18.68 addi a0,a0,1
c2 = (unsigned char) *s2++;
1.37 lbu a4,-1(a1)
if (c1 == '\0')
18.71 ↓ beqz a5,18
return c1 - c2;
}
Signed-off-by: William Cohen <wcohen@redhat.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-riscv@lists.infradead.org
Link: http://lore.kernel.org/lkml/20210927005115.610264-1-wcohen@redhat.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-09-27 03:51:15 +03:00
{
. name = " riscv64 " ,
. init = riscv64__annotate_init ,
} ,
2017-04-06 10:51:51 +03:00
{
. name = " s390 " ,
2017-04-06 10:51:52 +03:00
. init = s390__annotate_init ,
2017-04-06 10:51:51 +03:00
. objdump = {
. comment_char = ' # ' ,
} ,
} ,
perf annotate: Add Sparc support
E.g.:
$ perf annotate --stdio2
Samples: 7K of event 'cycles:ppp', 4000 Hz, Event count (approx.): 3086733887
__gettimeofday /lib32/libc-2.27.so [Percent: local period]
Percent│
│
│
│ Disassembly of section .text:
│
│ 000a6fa0 <__gettimeofday@@GLIBC_2.0>:
0.47 │ save %sp, -96, %sp
0.73 │ sethi %hi(0xe9000), %l7
│ → call __frame_state_for@@GLIBC_2.0+0x480
0.30 │ add %l7, 0x58, %l7 ! e9058 <nftw64@@GLIBC_2.3.3+0x818>
1.33 │ mov %i0, %o0
│ mov %i1, %o1
0.43 │ mov 0x74, %g1
│ ta 0x10
88.92 │ ↓ bcc 30
2.95 │ clr %g1
│ neg %o0
│ mov 1, %g1
0.31 │30: cmp %g1, 0
│ bne,pn %icc, a6fe4 <__gettimeofday@@GLIBC_2.0+0x44>
│ mov %o0, %i0
1.96 │ ← return %i7 + 8
2.62 │ nop
│ sethi %hi(0), %g1
│ neg %o0, %g2
│ add %g1, 0x160, %g1
│ ld [ %l7 + %g1 ], %g1
│ st %g2, [ %g7 + %g1 ]
│ ← return %i7 + 8
│ mov -1, %o0
Signed-off-by: David S. Miller <davem@davemloft.net>
Link: http://lkml.kernel.org/r/20181016.205555.1070918198627611771.davem@davemloft.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-10-17 06:55:55 +03:00
{
. name = " sparc " ,
. init = sparc__annotate_init ,
. objdump = {
. comment_char = ' # ' ,
} ,
} ,
2016-11-16 21:39:50 +03:00
} ;
2012-05-12 20:26:20 +04:00
static void ins__delete ( struct ins_operands * ops )
{
2015-03-05 21:27:28 +03:00
if ( ops = = NULL )
return ;
2013-12-27 23:55:14 +04:00
zfree ( & ops - > source . raw ) ;
zfree ( & ops - > source . name ) ;
zfree ( & ops - > target . raw ) ;
zfree ( & ops - > target . name ) ;
2012-05-12 20:26:20 +04:00
}
2012-05-08 01:54:16 +04:00
static int ins__raw_scnprintf ( struct ins * ins , char * bf , size_t size ,
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
struct ins_operands * ops , int max_ins_name )
2012-05-08 01:54:16 +04:00
{
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s %s " , max_ins_name , ins - > name , ops - > raw ) ;
2012-05-08 01:54:16 +04:00
}
int ins__scnprintf ( struct ins * ins , char * bf , size_t size ,
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
struct ins_operands * ops , int max_ins_name )
2012-05-08 01:54:16 +04:00
{
if ( ins - > ops - > scnprintf )
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return ins - > ops - > scnprintf ( ins , bf , size , ops , max_ins_name ) ;
2012-05-08 01:54:16 +04:00
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return ins__raw_scnprintf ( ins , bf , size , ops , max_ins_name ) ;
2012-05-08 01:54:16 +04:00
}
perf annotate: Check for fused instructions
Macro fusion merges two instructions to a single micro-op. Intel core
platform performs this hardware optimization under limited
circumstances.
For example, CMP + JCC can be "fused" and executed /retired together.
While with sampling this can result in the sample sometimes being on the
JCC and sometimes on the CMP. So for the fused instruction pair, they
could be considered together.
On Nehalem, fused instruction pairs:
cmp/test + jcc.
On other new CPU:
cmp/test/add/sub/and/inc/dec + jcc.
This patch adds an x86-specific function which checks if 2 instructions
are in a "fused" pair. For non-x86 arch, the function is just NULL.
Changelog:
v4: Move the CPU model checking to symbol__disassemble and save the CPU
family/model in arch structure.
It avoids checking every time when jump arrow printed.
v3: Add checking for Nehalem (CMP, TEST). For other newer Intel CPUs
just check it by default (CMP, TEST, ADD, SUB, AND, INC, DEC).
v2: Remove the original weak function. Arnaldo points out that doing it
as a weak function that will be overridden by the host arch doesn't
work. So now it's implemented as an arch-specific function.
Committer fix:
Do not access evsel->evlist->env->cpuid, ->env can be null, introduce
perf_evsel__env_cpuid(), just like perf_evsel__env_arch(), also used in
this function call.
The original patch was segfaulting 'perf top' + annotation.
But this essentially disables this fused instructions augmentation in
'perf top', the right thing is to get the cpuid from the running kernel,
left for a later patch tho.
Signed-off-by: Yao Jin <yao.jin@linux.intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1499403995-19857-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-07 08:06:34 +03:00
bool ins__is_fused ( struct arch * arch , const char * ins1 , const char * ins2 )
{
if ( ! arch | | ! arch - > ins_is_fused )
return false ;
return arch - > ins_is_fused ( arch , ins1 , ins2 ) ;
}
2018-03-20 22:19:08 +03:00
static int call__parse ( struct arch * arch , struct ins_operands * ops , struct map_symbol * ms )
2012-04-18 23:07:38 +04:00
{
2012-04-20 22:26:47 +04:00
char * endptr , * tok , * name ;
2018-03-20 22:19:08 +03:00
struct map * map = ms - > map ;
2018-03-02 17:59:36 +03:00
struct addr_map_symbol target = {
2019-11-04 21:57:38 +03:00
. ms = { . map = map , } ,
2018-03-02 17:59:36 +03:00
} ;
2012-04-20 22:26:47 +04:00
2012-04-25 15:00:23 +04:00
ops - > target . addr = strtoull ( ops - > raw , & endptr , 16 ) ;
2012-04-20 22:26:47 +04:00
name = strchr ( endptr , ' < ' ) ;
if ( name = = NULL )
goto indirect_call ;
name + + ;
2016-11-16 21:50:38 +03:00
if ( arch - > objdump . skip_functions_char & &
strchr ( name , arch - > objdump . skip_functions_char ) )
2015-12-07 02:07:13 +03:00
return - 1 ;
2012-04-20 22:26:47 +04:00
tok = strchr ( name , ' > ' ) ;
if ( tok = = NULL )
return - 1 ;
* tok = ' \0 ' ;
2012-04-25 15:00:23 +04:00
ops - > target . name = strdup ( name ) ;
2012-04-20 22:26:47 +04:00
* tok = ' > ' ;
2018-03-02 17:59:36 +03:00
if ( ops - > target . name = = NULL )
return - 1 ;
find_target :
target . addr = map__objdump_2mem ( map , ops - > target . addr ) ;
2012-04-20 22:26:47 +04:00
2019-11-26 04:15:35 +03:00
if ( maps__find_ams ( ms - > maps , & target ) = = 0 & &
2019-11-04 21:57:38 +03:00
map__rip_2objdump ( target . ms . map , map - > map_ip ( target . ms . map , target . addr ) ) = = ops - > target . addr )
ops - > target . sym = target . ms . sym ;
2012-05-11 19:28:55 +04:00
2012-04-18 23:07:38 +04:00
return 0 ;
2018-03-02 17:59:36 +03:00
indirect_call :
tok = strchr ( endptr , ' * ' ) ;
perf annotate: Properly interpret indirect call
The patch changes the parsing of:
callq *0x8(%rbx)
from:
0.26 │ → callq *8
to:
0.26 │ → callq *0x8(%rbx)
in this case an address is followed by a register, thus one can't parse
only the address.
Committer testing:
1) run 'perf record sleep 10'
2) before applying the patch, run:
perf annotate --stdio2 > /tmp/before
3) after applying the patch, run:
perf annotate --stdio2 > /tmp/after
4) diff /tmp/before /tmp/after:
--- /tmp/before 2018-08-28 11:16:03.238384143 -0300
+++ /tmp/after 2018-08-28 11:15:39.335341042 -0300
@@ -13274,7 +13274,7 @@
↓ jle 128
hash_value = hash_table->hash_func (key);
mov 0x8(%rsp),%rdi
- 0.91 → callq *30
+ 0.91 → callq *0x30(%r12)
mov $0x2,%r8d
cmp $0x2,%eax
node_hash = hash_table->hashes[node_index];
@@ -13848,7 +13848,7 @@
mov %r14,%rdi
sub %rbx,%r13
mov %r13,%rdx
- → callq *38
+ → callq *0x38(%r15)
cmp %rax,%r13
1.91 ↓ je 240
1b4: mov $0xffffffff,%r13d
@@ -14026,7 +14026,7 @@
mov %rcx,-0x500(%rbp)
mov %r15,%rsi
mov %r14,%rdi
- → callq *38
+ → callq *0x38(%rax)
mov -0x500(%rbp),%rcx
cmp %rax,%rcx
↓ jne 9b0
<SNIP tons of other such cases>
Signed-off-by: Martin Liška <mliska@suse.cz>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: Kim Phillips <kim.phillips@arm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Link: http://lkml.kernel.org/r/bd1f3932-be2b-85f9-7582-111ee0a43b07@suse.cz
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-23 15:29:34 +03:00
if ( tok ! = NULL ) {
endptr + + ;
/* Indirect call can use a non-rip register and offset: callq *0x8(%rbx).
* Do not parse such instruction . */
if ( strstr ( endptr , " (%r " ) = = NULL )
ops - > target . addr = strtoull ( endptr , NULL , 16 ) ;
}
2018-03-02 17:59:36 +03:00
goto find_target ;
2012-04-18 23:07:38 +04:00
}
2012-04-20 22:26:47 +04:00
static int call__scnprintf ( struct ins * ins , char * bf , size_t size ,
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
struct ins_operands * ops , int max_ins_name )
2012-04-20 22:26:47 +04:00
{
2018-03-02 17:59:36 +03:00
if ( ops - > target . sym )
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s %s " , max_ins_name , ins - > name , ops - > target . sym - > name ) ;
2012-04-20 22:26:47 +04:00
2012-05-11 19:28:55 +04:00
if ( ops - > target . addr = = 0 )
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return ins__raw_scnprintf ( ins , bf , size , ops , max_ins_name ) ;
2012-05-11 19:28:55 +04:00
perf annotate: Use ops->target.name when available for unresolved call targets
There is a bug where when using 'perf annotate timerqueue_add' the
target for its only routine called with the 'callq' instruction,
'rb_insert_color', doesn't get resolved from its address when parsing
that 'callq' instruction.
That symbol resolution works when using 'perf report --tui' and then
doing annotation for 'timerqueue_add' from there, the vmlinux
dso->symbols rb_tree somehow gets in a state that we can't find that
address, that is a bug that has to be further investigated.
But since the objdump output has the function name, i.e. the raw objdump
disassembled line looks like:
So, before:
# perf annotate timerqueue_add
│ mov %rbx,%rdi
│ mov %rbx,(%rdx)
│ → callq *ffffffff8184dc80
│ mov 0x8(%rbp),%rdx
│ test %rdx,%rdx
│ ↓ je 67
# perf report
│ mov %rbx,%rdi
│ mov %rbx,(%rdx)
│ → callq rb_insert_color
│ mov 0x8(%rbp),%rdx
│ test %rdx,%rdx
│ ↓ je 67
And after both look the same:
# perf annotate timerqueue_add
│ mov %rbx,%rdi
│ mov %rbx,(%rdx)
│ → callq rb_insert_color
│ mov 0x8(%rbp),%rdx
│ test %rdx,%rdx
│ ↓ je 67
From 'perf report' one can annotate and navigate to that 'rb_insert_color'
function, but not directly from 'perf annotate timerqueue_add', that
remains to be investigated and fixed.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-nkktz6355rhqtq7o8atr8f8r@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-16 19:28:09 +03:00
if ( ops - > target . name )
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s %s " , max_ins_name , ins - > name , ops - > target . name ) ;
perf annotate: Use ops->target.name when available for unresolved call targets
There is a bug where when using 'perf annotate timerqueue_add' the
target for its only routine called with the 'callq' instruction,
'rb_insert_color', doesn't get resolved from its address when parsing
that 'callq' instruction.
That symbol resolution works when using 'perf report --tui' and then
doing annotation for 'timerqueue_add' from there, the vmlinux
dso->symbols rb_tree somehow gets in a state that we can't find that
address, that is a bug that has to be further investigated.
But since the objdump output has the function name, i.e. the raw objdump
disassembled line looks like:
So, before:
# perf annotate timerqueue_add
│ mov %rbx,%rdi
│ mov %rbx,(%rdx)
│ → callq *ffffffff8184dc80
│ mov 0x8(%rbp),%rdx
│ test %rdx,%rdx
│ ↓ je 67
# perf report
│ mov %rbx,%rdi
│ mov %rbx,(%rdx)
│ → callq rb_insert_color
│ mov 0x8(%rbp),%rdx
│ test %rdx,%rdx
│ ↓ je 67
And after both look the same:
# perf annotate timerqueue_add
│ mov %rbx,%rdi
│ mov %rbx,(%rdx)
│ → callq rb_insert_color
│ mov 0x8(%rbp),%rdx
│ test %rdx,%rdx
│ ↓ je 67
From 'perf report' one can annotate and navigate to that 'rb_insert_color'
function, but not directly from 'perf annotate timerqueue_add', that
remains to be investigated and fixed.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-nkktz6355rhqtq7o8atr8f8r@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-16 19:28:09 +03:00
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s *% " PRIx64 , max_ins_name , ins - > name , ops - > target . addr ) ;
2012-04-20 22:26:47 +04:00
}
2012-04-18 23:07:38 +04:00
static struct ins_ops call_ops = {
2012-04-20 22:26:47 +04:00
. parse = call__parse ,
. scnprintf = call__scnprintf ,
2012-04-18 23:07:38 +04:00
} ;
bool ins__is_call ( const struct ins * ins )
{
2018-03-07 16:43:25 +03:00
return ins - > ops = = & call_ops | | ins - > ops = = & s390_call_ops ;
2012-04-18 23:07:38 +04:00
}
perf annotate: Fix parsing aarch64 branch instructions after objdump update
Starting with binutils 2.28, aarch64 objdump adds comments to the
disassembly output to show the alternative names of a condition code
[1].
It is assumed that commas in objdump comments could occur in other
arches now or in the future, so this fix is arch-independent.
The fix could have been done with arm64 specific jump__parse and
jump__scnprintf functions, but the jump__scnprintf instruction would
have to have its comment character be a literal, since the scnprintf
functions cannot receive a struct arch easily.
This inconvenience also applies to the generic jump__scnprintf, which is
why we add a raw_comment pointer to struct ins_operands, so the __parse
function assigns it to be re-used by its corresponding __scnprintf
function.
Example differences in 'perf annotate --stdio2' output on an aarch64
perf.data file:
BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b
AFTER : ↓ b.cs 18c
BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b
AFTER : ↓ b.cc 31c
The branch target labels 18c and 31c also now appear in the output:
BEFORE: add x26, x29, #0x80
AFTER : 18c: add x26, x29, #0x80
BEFORE: add x21, x21, #0x8
AFTER : 31c: add x21, x21, #0x8
The Fixes: tag below is added so stable branches will get the update; it
doesn't necessarily mean that commit was broken at the time, rather it
didn't withstand the aarch64 objdump update.
Tested no difference in output for sample x86_64, power arch perf.data files.
[1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands")
Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-27 20:53:40 +03:00
/*
* Prevents from matching commas in the comment section , e . g . :
* ffff200008446e70 : b . cs ffff2000084470f4 < generic_exec_single + 0x314 > // b.hs, b.nlast
perf annotate: Fix jump parsing for C++ code.
Considering the following testcase:
int
foo(int a, int b)
{
for (unsigned i = 0; i < 1000000000; i++)
a += b;
return a;
}
int main()
{
foo (3, 4);
return 0;
}
'perf annotate' displays:
86.52 │40055e: → ja 40056c <foo(int, int)+0x26>
13.37 │400560: mov -0x18(%rbp),%eax
│400563: add %eax,-0x14(%rbp)
│400566: addl $0x1,-0x4(%rbp)
0.11 │40056a: → jmp 400557 <foo(int, int)+0x11>
│40056c: mov -0x14(%rbp),%eax
│40056f: pop %rbp
and the 'ja 40056c' does not link to the location in the function. It's
caused by fact that comma is wrongly parsed, it's part of function
signature.
With my patch I see:
86.52 │ ┌──ja 26
13.37 │ │ mov -0x18(%rbp),%eax
│ │ add %eax,-0x14(%rbp)
│ │ addl $0x1,-0x4(%rbp)
0.11 │ │↑ jmp 11
│26:└─→mov -0x14(%rbp),%eax
and 'o' output prints:
86.52 │4005┌── ↓ ja 40056c <foo(int, int)+0x26>
13.37 │4005│0: mov -0x18(%rbp),%eax
│4005│3: add %eax,-0x14(%rbp)
│4005│6: addl $0x1,-0x4(%rbp)
0.11 │4005│a: ↑ jmp 400557 <foo(int, int)+0x11>
│4005└─→ mov -0x14(%rbp),%eax
On the contrary, compiling the very same file with gcc -x c, the parsing
is fine because function arguments are not displayed:
jmp 400543 <foo+0x1d>
Committer testing:
Before:
$ cat cpp_args_annotate.c
int
foo(int a, int b)
{
for (unsigned i = 0; i < 1000000000; i++)
a += b;
return a;
}
int main()
{
foo (3, 4);
return 0;
}
$ gcc --version |& head -1
gcc (GCC) 10.2.1 20201125 (Red Hat 10.2.1-9)
$ gcc -g cpp_args_annotate.c -o cpp_args_annotate
$ perf record ./cpp_args_annotate
[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.275 MB perf.data (7188 samples) ]
$ perf annotate --stdio2 foo
Samples: 7K of event 'cycles:u', 4000 Hz, Event count (approx.): 7468429289, [percent: local period]
foo() /home/acme/c/cpp_args_annotate
Percent
0000000000401106 <foo>:
foo():
int
foo(int a, int b)
{
push %rbp
mov %rsp,%rbp
mov %edi,-0x14(%rbp)
mov %esi,-0x18(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
movl $0x0,-0x4(%rbp)
↓ jmp 1d
a += b;
13.45 13: mov -0x18(%rbp),%eax
add %eax,-0x14(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
addl $0x1,-0x4(%rbp)
0.09 1d: cmpl $0x3b9ac9ff,-0x4(%rbp)
86.46 ↑ jbe 13
return a;
mov -0x14(%rbp),%eax
}
pop %rbp
← retq
$
I.e. works for C, now lets switch to C++:
$ g++ -g cpp_args_annotate.c -o cpp_args_annotate
$ perf record ./cpp_args_annotate
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.268 MB perf.data (6976 samples) ]
$ perf annotate --stdio2 foo
Samples: 6K of event 'cycles:u', 4000 Hz, Event count (approx.): 7380681761, [percent: local period]
foo() /home/acme/c/cpp_args_annotate
Percent
0000000000401106 <foo(int, int)>:
foo(int, int):
int
foo(int a, int b)
{
push %rbp
mov %rsp,%rbp
mov %edi,-0x14(%rbp)
mov %esi,-0x18(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
movl $0x0,-0x4(%rbp)
cmpl $0x3b9ac9ff,-0x4(%rbp)
86.53 → ja 40112c <foo(int, int)+0x26>
a += b;
13.32 mov -0x18(%rbp),%eax
0.00 add %eax,-0x14(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
addl $0x1,-0x4(%rbp)
0.15 → jmp 401117 <foo(int, int)+0x11>
return a;
mov -0x14(%rbp),%eax
}
pop %rbp
← retq
$
Reproduced.
Now with this patch:
Reusing the C++ built binary, as we can see here:
$ readelf -wi cpp_args_annotate | grep producer
<c> DW_AT_producer : (indirect string, offset: 0x2e): GNU C++14 10.2.1 20201125 (Red Hat 10.2.1-9) -mtune=generic -march=x86-64 -g
$
And furthermore:
$ file cpp_args_annotate
cpp_args_annotate: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, BuildID[sha1]=4fe3cab260204765605ec630d0dc7a7e93c361a9, for GNU/Linux 3.2.0, with debug_info, not stripped
$ perf buildid-list -i cpp_args_annotate
4fe3cab260204765605ec630d0dc7a7e93c361a9
$ perf buildid-list | grep cpp_args_annotate
4fe3cab260204765605ec630d0dc7a7e93c361a9 /home/acme/c/cpp_args_annotate
$
It now works:
$ perf annotate --stdio2 foo
Samples: 6K of event 'cycles:u', 4000 Hz, Event count (approx.): 7380681761, [percent: local period]
foo() /home/acme/c/cpp_args_annotate
Percent
0000000000401106 <foo(int, int)>:
foo(int, int):
int
foo(int a, int b)
{
push %rbp
mov %rsp,%rbp
mov %edi,-0x14(%rbp)
mov %esi,-0x18(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
movl $0x0,-0x4(%rbp)
11: cmpl $0x3b9ac9ff,-0x4(%rbp)
86.53 ↓ ja 26
a += b;
13.32 mov -0x18(%rbp),%eax
0.00 add %eax,-0x14(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
addl $0x1,-0x4(%rbp)
0.15 ↑ jmp 11
return a;
26: mov -0x14(%rbp),%eax
}
pop %rbp
← retq
$
Signed-off-by: Martin Liška <mliska@suse.cz>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Link: http://lore.kernel.org/lkml/13e1a405-edf9-e4c2-4327-a9b454353730@suse.cz
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-11 15:37:55 +03:00
*
* and skip comma as part of function arguments , e . g . :
* 1 d8b4ac < linemap_lookup ( line_maps const * , unsigned int ) + 0xcc >
perf annotate: Fix parsing aarch64 branch instructions after objdump update
Starting with binutils 2.28, aarch64 objdump adds comments to the
disassembly output to show the alternative names of a condition code
[1].
It is assumed that commas in objdump comments could occur in other
arches now or in the future, so this fix is arch-independent.
The fix could have been done with arm64 specific jump__parse and
jump__scnprintf functions, but the jump__scnprintf instruction would
have to have its comment character be a literal, since the scnprintf
functions cannot receive a struct arch easily.
This inconvenience also applies to the generic jump__scnprintf, which is
why we add a raw_comment pointer to struct ins_operands, so the __parse
function assigns it to be re-used by its corresponding __scnprintf
function.
Example differences in 'perf annotate --stdio2' output on an aarch64
perf.data file:
BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b
AFTER : ↓ b.cs 18c
BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b
AFTER : ↓ b.cc 31c
The branch target labels 18c and 31c also now appear in the output:
BEFORE: add x26, x29, #0x80
AFTER : 18c: add x26, x29, #0x80
BEFORE: add x21, x21, #0x8
AFTER : 31c: add x21, x21, #0x8
The Fixes: tag below is added so stable branches will get the update; it
doesn't necessarily mean that commit was broken at the time, rather it
didn't withstand the aarch64 objdump update.
Tested no difference in output for sample x86_64, power arch perf.data files.
[1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands")
Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-27 20:53:40 +03:00
*/
static inline const char * validate_comma ( const char * c , struct ins_operands * ops )
{
if ( ops - > raw_comment & & c > ops - > raw_comment )
return NULL ;
perf annotate: Fix jump parsing for C++ code.
Considering the following testcase:
int
foo(int a, int b)
{
for (unsigned i = 0; i < 1000000000; i++)
a += b;
return a;
}
int main()
{
foo (3, 4);
return 0;
}
'perf annotate' displays:
86.52 │40055e: → ja 40056c <foo(int, int)+0x26>
13.37 │400560: mov -0x18(%rbp),%eax
│400563: add %eax,-0x14(%rbp)
│400566: addl $0x1,-0x4(%rbp)
0.11 │40056a: → jmp 400557 <foo(int, int)+0x11>
│40056c: mov -0x14(%rbp),%eax
│40056f: pop %rbp
and the 'ja 40056c' does not link to the location in the function. It's
caused by fact that comma is wrongly parsed, it's part of function
signature.
With my patch I see:
86.52 │ ┌──ja 26
13.37 │ │ mov -0x18(%rbp),%eax
│ │ add %eax,-0x14(%rbp)
│ │ addl $0x1,-0x4(%rbp)
0.11 │ │↑ jmp 11
│26:└─→mov -0x14(%rbp),%eax
and 'o' output prints:
86.52 │4005┌── ↓ ja 40056c <foo(int, int)+0x26>
13.37 │4005│0: mov -0x18(%rbp),%eax
│4005│3: add %eax,-0x14(%rbp)
│4005│6: addl $0x1,-0x4(%rbp)
0.11 │4005│a: ↑ jmp 400557 <foo(int, int)+0x11>
│4005└─→ mov -0x14(%rbp),%eax
On the contrary, compiling the very same file with gcc -x c, the parsing
is fine because function arguments are not displayed:
jmp 400543 <foo+0x1d>
Committer testing:
Before:
$ cat cpp_args_annotate.c
int
foo(int a, int b)
{
for (unsigned i = 0; i < 1000000000; i++)
a += b;
return a;
}
int main()
{
foo (3, 4);
return 0;
}
$ gcc --version |& head -1
gcc (GCC) 10.2.1 20201125 (Red Hat 10.2.1-9)
$ gcc -g cpp_args_annotate.c -o cpp_args_annotate
$ perf record ./cpp_args_annotate
[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.275 MB perf.data (7188 samples) ]
$ perf annotate --stdio2 foo
Samples: 7K of event 'cycles:u', 4000 Hz, Event count (approx.): 7468429289, [percent: local period]
foo() /home/acme/c/cpp_args_annotate
Percent
0000000000401106 <foo>:
foo():
int
foo(int a, int b)
{
push %rbp
mov %rsp,%rbp
mov %edi,-0x14(%rbp)
mov %esi,-0x18(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
movl $0x0,-0x4(%rbp)
↓ jmp 1d
a += b;
13.45 13: mov -0x18(%rbp),%eax
add %eax,-0x14(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
addl $0x1,-0x4(%rbp)
0.09 1d: cmpl $0x3b9ac9ff,-0x4(%rbp)
86.46 ↑ jbe 13
return a;
mov -0x14(%rbp),%eax
}
pop %rbp
← retq
$
I.e. works for C, now lets switch to C++:
$ g++ -g cpp_args_annotate.c -o cpp_args_annotate
$ perf record ./cpp_args_annotate
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.268 MB perf.data (6976 samples) ]
$ perf annotate --stdio2 foo
Samples: 6K of event 'cycles:u', 4000 Hz, Event count (approx.): 7380681761, [percent: local period]
foo() /home/acme/c/cpp_args_annotate
Percent
0000000000401106 <foo(int, int)>:
foo(int, int):
int
foo(int a, int b)
{
push %rbp
mov %rsp,%rbp
mov %edi,-0x14(%rbp)
mov %esi,-0x18(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
movl $0x0,-0x4(%rbp)
cmpl $0x3b9ac9ff,-0x4(%rbp)
86.53 → ja 40112c <foo(int, int)+0x26>
a += b;
13.32 mov -0x18(%rbp),%eax
0.00 add %eax,-0x14(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
addl $0x1,-0x4(%rbp)
0.15 → jmp 401117 <foo(int, int)+0x11>
return a;
mov -0x14(%rbp),%eax
}
pop %rbp
← retq
$
Reproduced.
Now with this patch:
Reusing the C++ built binary, as we can see here:
$ readelf -wi cpp_args_annotate | grep producer
<c> DW_AT_producer : (indirect string, offset: 0x2e): GNU C++14 10.2.1 20201125 (Red Hat 10.2.1-9) -mtune=generic -march=x86-64 -g
$
And furthermore:
$ file cpp_args_annotate
cpp_args_annotate: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, BuildID[sha1]=4fe3cab260204765605ec630d0dc7a7e93c361a9, for GNU/Linux 3.2.0, with debug_info, not stripped
$ perf buildid-list -i cpp_args_annotate
4fe3cab260204765605ec630d0dc7a7e93c361a9
$ perf buildid-list | grep cpp_args_annotate
4fe3cab260204765605ec630d0dc7a7e93c361a9 /home/acme/c/cpp_args_annotate
$
It now works:
$ perf annotate --stdio2 foo
Samples: 6K of event 'cycles:u', 4000 Hz, Event count (approx.): 7380681761, [percent: local period]
foo() /home/acme/c/cpp_args_annotate
Percent
0000000000401106 <foo(int, int)>:
foo(int, int):
int
foo(int a, int b)
{
push %rbp
mov %rsp,%rbp
mov %edi,-0x14(%rbp)
mov %esi,-0x18(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
movl $0x0,-0x4(%rbp)
11: cmpl $0x3b9ac9ff,-0x4(%rbp)
86.53 ↓ ja 26
a += b;
13.32 mov -0x18(%rbp),%eax
0.00 add %eax,-0x14(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
addl $0x1,-0x4(%rbp)
0.15 ↑ jmp 11
return a;
26: mov -0x14(%rbp),%eax
}
pop %rbp
← retq
$
Signed-off-by: Martin Liška <mliska@suse.cz>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Link: http://lore.kernel.org/lkml/13e1a405-edf9-e4c2-4327-a9b454353730@suse.cz
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-11 15:37:55 +03:00
if ( ops - > raw_func_start & & c > ops - > raw_func_start )
return NULL ;
perf annotate: Fix parsing aarch64 branch instructions after objdump update
Starting with binutils 2.28, aarch64 objdump adds comments to the
disassembly output to show the alternative names of a condition code
[1].
It is assumed that commas in objdump comments could occur in other
arches now or in the future, so this fix is arch-independent.
The fix could have been done with arm64 specific jump__parse and
jump__scnprintf functions, but the jump__scnprintf instruction would
have to have its comment character be a literal, since the scnprintf
functions cannot receive a struct arch easily.
This inconvenience also applies to the generic jump__scnprintf, which is
why we add a raw_comment pointer to struct ins_operands, so the __parse
function assigns it to be re-used by its corresponding __scnprintf
function.
Example differences in 'perf annotate --stdio2' output on an aarch64
perf.data file:
BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b
AFTER : ↓ b.cs 18c
BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b
AFTER : ↓ b.cc 31c
The branch target labels 18c and 31c also now appear in the output:
BEFORE: add x26, x29, #0x80
AFTER : 18c: add x26, x29, #0x80
BEFORE: add x21, x21, #0x8
AFTER : 31c: add x21, x21, #0x8
The Fixes: tag below is added so stable branches will get the update; it
doesn't necessarily mean that commit was broken at the time, rather it
didn't withstand the aarch64 objdump update.
Tested no difference in output for sample x86_64, power arch perf.data files.
[1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands")
Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-27 20:53:40 +03:00
return c ;
}
static int jump__parse ( struct arch * arch , struct ins_operands * ops , struct map_symbol * ms )
2012-04-18 20:58:34 +04:00
{
2018-03-20 23:20:43 +03:00
struct map * map = ms - > map ;
struct symbol * sym = ms - > sym ;
struct addr_map_symbol target = {
2019-11-04 21:57:38 +03:00
. ms = { . map = map , } ,
2018-03-20 23:20:43 +03:00
} ;
perf annotate: Support jump instruction with target as second operand
Architectures like PowerPC have jump instructions that includes a target
address as a second operand. For example, 'bne cr7,0xc0000000000f6154'.
Add support for such instruction in perf annotate.
objdump o/p:
c0000000000f6140: ld r9,1032(r31)
c0000000000f6144: cmpdi cr7,r9,0
c0000000000f6148: bne cr7,0xc0000000000f6154
c0000000000f614c: ld r9,2312(r30)
c0000000000f6150: std r9,1032(r31)
c0000000000f6154: ld r9,88(r31)
Corresponding perf annotate o/p:
Before patch:
ld r9,1032(r31)
cmpdi cr7,r9,0
v bne 3ffffffffff09f2c
ld r9,2312(r30)
std r9,1032(r31)
74: ld r9,88(r31)
After patch:
ld r9,1032(r31)
cmpdi cr7,r9,0
v bne 74
ld r9,2312(r30)
std r9,1032(r31)
74: ld r9,88(r31)
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chris Riyder <chris.ryder@arm.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1480953407-7605-2-git-send-email-ravi.bangoria@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 18:56:46 +03:00
const char * c = strchr ( ops - > raw , ' , ' ) ;
2018-03-20 23:20:43 +03:00
u64 start , end ;
perf annotate: Fix parsing aarch64 branch instructions after objdump update
Starting with binutils 2.28, aarch64 objdump adds comments to the
disassembly output to show the alternative names of a condition code
[1].
It is assumed that commas in objdump comments could occur in other
arches now or in the future, so this fix is arch-independent.
The fix could have been done with arm64 specific jump__parse and
jump__scnprintf functions, but the jump__scnprintf instruction would
have to have its comment character be a literal, since the scnprintf
functions cannot receive a struct arch easily.
This inconvenience also applies to the generic jump__scnprintf, which is
why we add a raw_comment pointer to struct ins_operands, so the __parse
function assigns it to be re-used by its corresponding __scnprintf
function.
Example differences in 'perf annotate --stdio2' output on an aarch64
perf.data file:
BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b
AFTER : ↓ b.cs 18c
BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b
AFTER : ↓ b.cc 31c
The branch target labels 18c and 31c also now appear in the output:
BEFORE: add x26, x29, #0x80
AFTER : 18c: add x26, x29, #0x80
BEFORE: add x21, x21, #0x8
AFTER : 31c: add x21, x21, #0x8
The Fixes: tag below is added so stable branches will get the update; it
doesn't necessarily mean that commit was broken at the time, rather it
didn't withstand the aarch64 objdump update.
Tested no difference in output for sample x86_64, power arch perf.data files.
[1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands")
Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-27 20:53:40 +03:00
ops - > raw_comment = strchr ( ops - > raw , arch - > objdump . comment_char ) ;
perf annotate: Fix jump parsing for C++ code.
Considering the following testcase:
int
foo(int a, int b)
{
for (unsigned i = 0; i < 1000000000; i++)
a += b;
return a;
}
int main()
{
foo (3, 4);
return 0;
}
'perf annotate' displays:
86.52 │40055e: → ja 40056c <foo(int, int)+0x26>
13.37 │400560: mov -0x18(%rbp),%eax
│400563: add %eax,-0x14(%rbp)
│400566: addl $0x1,-0x4(%rbp)
0.11 │40056a: → jmp 400557 <foo(int, int)+0x11>
│40056c: mov -0x14(%rbp),%eax
│40056f: pop %rbp
and the 'ja 40056c' does not link to the location in the function. It's
caused by fact that comma is wrongly parsed, it's part of function
signature.
With my patch I see:
86.52 │ ┌──ja 26
13.37 │ │ mov -0x18(%rbp),%eax
│ │ add %eax,-0x14(%rbp)
│ │ addl $0x1,-0x4(%rbp)
0.11 │ │↑ jmp 11
│26:└─→mov -0x14(%rbp),%eax
and 'o' output prints:
86.52 │4005┌── ↓ ja 40056c <foo(int, int)+0x26>
13.37 │4005│0: mov -0x18(%rbp),%eax
│4005│3: add %eax,-0x14(%rbp)
│4005│6: addl $0x1,-0x4(%rbp)
0.11 │4005│a: ↑ jmp 400557 <foo(int, int)+0x11>
│4005└─→ mov -0x14(%rbp),%eax
On the contrary, compiling the very same file with gcc -x c, the parsing
is fine because function arguments are not displayed:
jmp 400543 <foo+0x1d>
Committer testing:
Before:
$ cat cpp_args_annotate.c
int
foo(int a, int b)
{
for (unsigned i = 0; i < 1000000000; i++)
a += b;
return a;
}
int main()
{
foo (3, 4);
return 0;
}
$ gcc --version |& head -1
gcc (GCC) 10.2.1 20201125 (Red Hat 10.2.1-9)
$ gcc -g cpp_args_annotate.c -o cpp_args_annotate
$ perf record ./cpp_args_annotate
[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.275 MB perf.data (7188 samples) ]
$ perf annotate --stdio2 foo
Samples: 7K of event 'cycles:u', 4000 Hz, Event count (approx.): 7468429289, [percent: local period]
foo() /home/acme/c/cpp_args_annotate
Percent
0000000000401106 <foo>:
foo():
int
foo(int a, int b)
{
push %rbp
mov %rsp,%rbp
mov %edi,-0x14(%rbp)
mov %esi,-0x18(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
movl $0x0,-0x4(%rbp)
↓ jmp 1d
a += b;
13.45 13: mov -0x18(%rbp),%eax
add %eax,-0x14(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
addl $0x1,-0x4(%rbp)
0.09 1d: cmpl $0x3b9ac9ff,-0x4(%rbp)
86.46 ↑ jbe 13
return a;
mov -0x14(%rbp),%eax
}
pop %rbp
← retq
$
I.e. works for C, now lets switch to C++:
$ g++ -g cpp_args_annotate.c -o cpp_args_annotate
$ perf record ./cpp_args_annotate
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.268 MB perf.data (6976 samples) ]
$ perf annotate --stdio2 foo
Samples: 6K of event 'cycles:u', 4000 Hz, Event count (approx.): 7380681761, [percent: local period]
foo() /home/acme/c/cpp_args_annotate
Percent
0000000000401106 <foo(int, int)>:
foo(int, int):
int
foo(int a, int b)
{
push %rbp
mov %rsp,%rbp
mov %edi,-0x14(%rbp)
mov %esi,-0x18(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
movl $0x0,-0x4(%rbp)
cmpl $0x3b9ac9ff,-0x4(%rbp)
86.53 → ja 40112c <foo(int, int)+0x26>
a += b;
13.32 mov -0x18(%rbp),%eax
0.00 add %eax,-0x14(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
addl $0x1,-0x4(%rbp)
0.15 → jmp 401117 <foo(int, int)+0x11>
return a;
mov -0x14(%rbp),%eax
}
pop %rbp
← retq
$
Reproduced.
Now with this patch:
Reusing the C++ built binary, as we can see here:
$ readelf -wi cpp_args_annotate | grep producer
<c> DW_AT_producer : (indirect string, offset: 0x2e): GNU C++14 10.2.1 20201125 (Red Hat 10.2.1-9) -mtune=generic -march=x86-64 -g
$
And furthermore:
$ file cpp_args_annotate
cpp_args_annotate: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, BuildID[sha1]=4fe3cab260204765605ec630d0dc7a7e93c361a9, for GNU/Linux 3.2.0, with debug_info, not stripped
$ perf buildid-list -i cpp_args_annotate
4fe3cab260204765605ec630d0dc7a7e93c361a9
$ perf buildid-list | grep cpp_args_annotate
4fe3cab260204765605ec630d0dc7a7e93c361a9 /home/acme/c/cpp_args_annotate
$
It now works:
$ perf annotate --stdio2 foo
Samples: 6K of event 'cycles:u', 4000 Hz, Event count (approx.): 7380681761, [percent: local period]
foo() /home/acme/c/cpp_args_annotate
Percent
0000000000401106 <foo(int, int)>:
foo(int, int):
int
foo(int a, int b)
{
push %rbp
mov %rsp,%rbp
mov %edi,-0x14(%rbp)
mov %esi,-0x18(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
movl $0x0,-0x4(%rbp)
11: cmpl $0x3b9ac9ff,-0x4(%rbp)
86.53 ↓ ja 26
a += b;
13.32 mov -0x18(%rbp),%eax
0.00 add %eax,-0x14(%rbp)
for (unsigned i = 0; i < 1000000000; i++)
addl $0x1,-0x4(%rbp)
0.15 ↑ jmp 11
return a;
26: mov -0x14(%rbp),%eax
}
pop %rbp
← retq
$
Signed-off-by: Martin Liška <mliska@suse.cz>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Link: http://lore.kernel.org/lkml/13e1a405-edf9-e4c2-4327-a9b454353730@suse.cz
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-11 15:37:55 +03:00
ops - > raw_func_start = strchr ( ops - > raw , ' < ' ) ;
perf annotate: Fix parsing aarch64 branch instructions after objdump update
Starting with binutils 2.28, aarch64 objdump adds comments to the
disassembly output to show the alternative names of a condition code
[1].
It is assumed that commas in objdump comments could occur in other
arches now or in the future, so this fix is arch-independent.
The fix could have been done with arm64 specific jump__parse and
jump__scnprintf functions, but the jump__scnprintf instruction would
have to have its comment character be a literal, since the scnprintf
functions cannot receive a struct arch easily.
This inconvenience also applies to the generic jump__scnprintf, which is
why we add a raw_comment pointer to struct ins_operands, so the __parse
function assigns it to be re-used by its corresponding __scnprintf
function.
Example differences in 'perf annotate --stdio2' output on an aarch64
perf.data file:
BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b
AFTER : ↓ b.cs 18c
BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b
AFTER : ↓ b.cc 31c
The branch target labels 18c and 31c also now appear in the output:
BEFORE: add x26, x29, #0x80
AFTER : 18c: add x26, x29, #0x80
BEFORE: add x21, x21, #0x8
AFTER : 31c: add x21, x21, #0x8
The Fixes: tag below is added so stable branches will get the update; it
doesn't necessarily mean that commit was broken at the time, rather it
didn't withstand the aarch64 objdump update.
Tested no difference in output for sample x86_64, power arch perf.data files.
[1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands")
Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-27 20:53:40 +03:00
c = validate_comma ( c , ops ) ;
2018-03-20 23:20:43 +03:00
/*
* Examples of lines to parse for the _cpp_lex_token @ @ Base
* function :
*
* 1159e6 c : jne 115 aa32 < _cpp_lex_token @ @ Base + 0xf92 >
* 1159e8 b : jne c469be < cpp_named_operator2name @ @ Base + 0xa72 >
*
* The first is a jump to an offset inside the same function ,
* the second is to another function , i . e . that 0xa72 is an
* offset in the cpp_named_operator2name @ @ base function .
*/
perf annotate: Fix branch instruction with multiple operands
'perf annotate' is dropping the cr* fields from branch instructions.
Fix it by adding support to display branch instructions having
multiple operands.
Power Arch objdump of int_sqrt:
20.36 | c0000000004d2694: subf r10,r10,r3
| c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40>
1.82 | c0000000004d269c: mr r3,r10
29.18 | c0000000004d26a0: mr r10,r8
| c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c>
| c0000000004d26a8: mr r10,r7
Power Arch Before Patch:
20.36 | subf r10,r10,r3
| v bgt 40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt 4c
| mr r10,r7
Power Arch After patch:
20.36 | subf r10,r10,r3
| v bgt cr6,40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt cr7,4c
| mr r10,r7
Also support AArch64 conditional branch instructions, which can
have up to three operands:
Aarch64 Non-simplified (raw objdump) view:
│ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒
...
4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒
...
1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒
│ffff000│083cd148: mov w19, #0x20000 //▒
1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒
...
0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒
Aarch64 Simplified, before this patch:
│ ↑ cbz 40
...
4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒
...
1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒
...
0.68 │ └──cbnz 60
the cbz operand is missing, and the tbz doesn't get simplified processing
at all because the parsing function failed to match an address.
Aarch64 Simplified, After this patch applied:
│ ↑ cbz w0, 40
...
4.44 │ │↓ tbnz w0, #26, d0
...
1.37 │ │↓ tbnz w22, #5, e4
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ec
...
0.68 │ └──cbnz w0, 60
Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Reported-by: Anton Blanchard <anton@samba.org>
Reported-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 17:29:59 +03:00
/*
* skip over possible up to 2 operands to get to address , e . g . :
* tbnz w0 , # 26 , ffff0000083cd190 < security_file_permission + 0xd0 >
*/
if ( c + + ! = NULL ) {
perf annotate: Support jump instruction with target as second operand
Architectures like PowerPC have jump instructions that includes a target
address as a second operand. For example, 'bne cr7,0xc0000000000f6154'.
Add support for such instruction in perf annotate.
objdump o/p:
c0000000000f6140: ld r9,1032(r31)
c0000000000f6144: cmpdi cr7,r9,0
c0000000000f6148: bne cr7,0xc0000000000f6154
c0000000000f614c: ld r9,2312(r30)
c0000000000f6150: std r9,1032(r31)
c0000000000f6154: ld r9,88(r31)
Corresponding perf annotate o/p:
Before patch:
ld r9,1032(r31)
cmpdi cr7,r9,0
v bne 3ffffffffff09f2c
ld r9,2312(r30)
std r9,1032(r31)
74: ld r9,88(r31)
After patch:
ld r9,1032(r31)
cmpdi cr7,r9,0
v bne 74
ld r9,2312(r30)
std r9,1032(r31)
74: ld r9,88(r31)
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chris Riyder <chris.ryder@arm.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1480953407-7605-2-git-send-email-ravi.bangoria@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 18:56:46 +03:00
ops - > target . addr = strtoull ( c , NULL , 16 ) ;
perf annotate: Fix branch instruction with multiple operands
'perf annotate' is dropping the cr* fields from branch instructions.
Fix it by adding support to display branch instructions having
multiple operands.
Power Arch objdump of int_sqrt:
20.36 | c0000000004d2694: subf r10,r10,r3
| c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40>
1.82 | c0000000004d269c: mr r3,r10
29.18 | c0000000004d26a0: mr r10,r8
| c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c>
| c0000000004d26a8: mr r10,r7
Power Arch Before Patch:
20.36 | subf r10,r10,r3
| v bgt 40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt 4c
| mr r10,r7
Power Arch After patch:
20.36 | subf r10,r10,r3
| v bgt cr6,40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt cr7,4c
| mr r10,r7
Also support AArch64 conditional branch instructions, which can
have up to three operands:
Aarch64 Non-simplified (raw objdump) view:
│ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒
...
4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒
...
1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒
│ffff000│083cd148: mov w19, #0x20000 //▒
1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒
...
0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒
Aarch64 Simplified, before this patch:
│ ↑ cbz 40
...
4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒
...
1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒
...
0.68 │ └──cbnz 60
the cbz operand is missing, and the tbz doesn't get simplified processing
at all because the parsing function failed to match an address.
Aarch64 Simplified, After this patch applied:
│ ↑ cbz w0, 40
...
4.44 │ │↓ tbnz w0, #26, d0
...
1.37 │ │↓ tbnz w22, #5, e4
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ec
...
0.68 │ └──cbnz w0, 60
Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Reported-by: Anton Blanchard <anton@samba.org>
Reported-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 17:29:59 +03:00
if ( ! ops - > target . addr ) {
c = strchr ( c , ' , ' ) ;
perf annotate: Fix parsing aarch64 branch instructions after objdump update
Starting with binutils 2.28, aarch64 objdump adds comments to the
disassembly output to show the alternative names of a condition code
[1].
It is assumed that commas in objdump comments could occur in other
arches now or in the future, so this fix is arch-independent.
The fix could have been done with arm64 specific jump__parse and
jump__scnprintf functions, but the jump__scnprintf instruction would
have to have its comment character be a literal, since the scnprintf
functions cannot receive a struct arch easily.
This inconvenience also applies to the generic jump__scnprintf, which is
why we add a raw_comment pointer to struct ins_operands, so the __parse
function assigns it to be re-used by its corresponding __scnprintf
function.
Example differences in 'perf annotate --stdio2' output on an aarch64
perf.data file:
BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b
AFTER : ↓ b.cs 18c
BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b
AFTER : ↓ b.cc 31c
The branch target labels 18c and 31c also now appear in the output:
BEFORE: add x26, x29, #0x80
AFTER : 18c: add x26, x29, #0x80
BEFORE: add x21, x21, #0x8
AFTER : 31c: add x21, x21, #0x8
The Fixes: tag below is added so stable branches will get the update; it
doesn't necessarily mean that commit was broken at the time, rather it
didn't withstand the aarch64 objdump update.
Tested no difference in output for sample x86_64, power arch perf.data files.
[1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands")
Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-27 20:53:40 +03:00
c = validate_comma ( c , ops ) ;
perf annotate: Fix branch instruction with multiple operands
'perf annotate' is dropping the cr* fields from branch instructions.
Fix it by adding support to display branch instructions having
multiple operands.
Power Arch objdump of int_sqrt:
20.36 | c0000000004d2694: subf r10,r10,r3
| c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40>
1.82 | c0000000004d269c: mr r3,r10
29.18 | c0000000004d26a0: mr r10,r8
| c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c>
| c0000000004d26a8: mr r10,r7
Power Arch Before Patch:
20.36 | subf r10,r10,r3
| v bgt 40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt 4c
| mr r10,r7
Power Arch After patch:
20.36 | subf r10,r10,r3
| v bgt cr6,40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt cr7,4c
| mr r10,r7
Also support AArch64 conditional branch instructions, which can
have up to three operands:
Aarch64 Non-simplified (raw objdump) view:
│ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒
...
4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒
...
1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒
│ffff000│083cd148: mov w19, #0x20000 //▒
1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒
...
0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒
Aarch64 Simplified, before this patch:
│ ↑ cbz 40
...
4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒
...
1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒
...
0.68 │ └──cbnz 60
the cbz operand is missing, and the tbz doesn't get simplified processing
at all because the parsing function failed to match an address.
Aarch64 Simplified, After this patch applied:
│ ↑ cbz w0, 40
...
4.44 │ │↓ tbnz w0, #26, d0
...
1.37 │ │↓ tbnz w22, #5, e4
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ec
...
0.68 │ └──cbnz w0, 60
Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Reported-by: Anton Blanchard <anton@samba.org>
Reported-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 17:29:59 +03:00
if ( c + + ! = NULL )
ops - > target . addr = strtoull ( c , NULL , 16 ) ;
}
} else {
perf annotate: Support jump instruction with target as second operand
Architectures like PowerPC have jump instructions that includes a target
address as a second operand. For example, 'bne cr7,0xc0000000000f6154'.
Add support for such instruction in perf annotate.
objdump o/p:
c0000000000f6140: ld r9,1032(r31)
c0000000000f6144: cmpdi cr7,r9,0
c0000000000f6148: bne cr7,0xc0000000000f6154
c0000000000f614c: ld r9,2312(r30)
c0000000000f6150: std r9,1032(r31)
c0000000000f6154: ld r9,88(r31)
Corresponding perf annotate o/p:
Before patch:
ld r9,1032(r31)
cmpdi cr7,r9,0
v bne 3ffffffffff09f2c
ld r9,2312(r30)
std r9,1032(r31)
74: ld r9,88(r31)
After patch:
ld r9,1032(r31)
cmpdi cr7,r9,0
v bne 74
ld r9,2312(r30)
std r9,1032(r31)
74: ld r9,88(r31)
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chris Riyder <chris.ryder@arm.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1480953407-7605-2-git-send-email-ravi.bangoria@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 18:56:46 +03:00
ops - > target . addr = strtoull ( ops - > raw , NULL , 16 ) ;
perf annotate: Fix branch instruction with multiple operands
'perf annotate' is dropping the cr* fields from branch instructions.
Fix it by adding support to display branch instructions having
multiple operands.
Power Arch objdump of int_sqrt:
20.36 | c0000000004d2694: subf r10,r10,r3
| c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40>
1.82 | c0000000004d269c: mr r3,r10
29.18 | c0000000004d26a0: mr r10,r8
| c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c>
| c0000000004d26a8: mr r10,r7
Power Arch Before Patch:
20.36 | subf r10,r10,r3
| v bgt 40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt 4c
| mr r10,r7
Power Arch After patch:
20.36 | subf r10,r10,r3
| v bgt cr6,40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt cr7,4c
| mr r10,r7
Also support AArch64 conditional branch instructions, which can
have up to three operands:
Aarch64 Non-simplified (raw objdump) view:
│ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒
...
4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒
...
1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒
│ffff000│083cd148: mov w19, #0x20000 //▒
1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒
...
0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒
Aarch64 Simplified, before this patch:
│ ↑ cbz 40
...
4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒
...
1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒
...
0.68 │ └──cbnz 60
the cbz operand is missing, and the tbz doesn't get simplified processing
at all because the parsing function failed to match an address.
Aarch64 Simplified, After this patch applied:
│ ↑ cbz w0, 40
...
4.44 │ │↓ tbnz w0, #26, d0
...
1.37 │ │↓ tbnz w22, #5, e4
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ec
...
0.68 │ └──cbnz w0, 60
Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Reported-by: Anton Blanchard <anton@samba.org>
Reported-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 17:29:59 +03:00
}
2012-04-25 21:16:03 +04:00
2018-03-20 23:20:43 +03:00
target . addr = map__objdump_2mem ( map , ops - > target . addr ) ;
start = map - > unmap_ip ( map , sym - > start ) ,
end = map - > unmap_ip ( map , sym - > end ) ;
ops - > target . outside = target . addr < start | | target . addr > end ;
/*
* FIXME : things like this in _cpp_lex_token ( gcc ' s cc1 program ) :
cpp_named_operator2name @ @ Base + 0xa72
* Point to a place that is after the cpp_named_operator2name
* boundaries , i . e . in the ELF symbol table for cc1
* cpp_named_operator2name is marked as being 32 - bytes long , but it in
* fact is much larger than that , so we seem to need a symbols__find ( )
* routine that looks for > = current - > start and < next_symbol - > start ,
* possibly just for C + + objects ?
*
* For now lets just make some progress by marking jumps to outside the
* current function as call like .
*
* Actual navigation will come next , with further understanding of how
* the symbol searching and disassembly should be done .
perf annotate: Support jumping from one function to another
For instance:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
5.50 │ → callq do_syscall_64
14.56 │ mov 0x58(%rsp),%rcx
7.44 │ mov 0x80(%rsp),%r11
0.32 │ cmp %rcx,%r11
│ → jne swapgs_restore_regs_and_return_to_usermode
0.32 │ shl $0x10,%rcx
0.32 │ sar $0x10,%rcx
3.24 │ cmp %rcx,%r11
│ → jne swapgs_restore_regs_and_return_to_usermode
2.27 │ cmpq $0x33,0x88(%rsp)
1.29 │ → jne swapgs_restore_regs_and_return_to_usermode
│ mov 0x30(%rsp),%r11
8.74 │ cmp %r11,0x90(%rsp)
│ → jne swapgs_restore_regs_and_return_to_usermode
0.32 │ test $0x10100,%r11
│ → jne swapgs_restore_regs_and_return_to_usermode
0.32 │ cmpq $0x2b,0xa0(%rsp)
0.65 │ → jne swapgs_restore_regs_and_return_to_usermode
It'll behave just like a "call" instruction, i.e. press enter or right
arrow over one such line and the browser will navigate to the annotated
disassembly of that function, which when exited, via left arrow or esc,
will come back to the calling function.
Now to support jump to an offset on a different function...
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-78o508mqvr8inhj63ddtw7mo@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 16:50:35 +03:00
*/
2019-11-26 04:15:35 +03:00
if ( maps__find_ams ( ms - > maps , & target ) = = 0 & &
2019-11-04 21:57:38 +03:00
map__rip_2objdump ( target . ms . map , map - > map_ip ( target . ms . map , target . addr ) ) = = ops - > target . addr )
ops - > target . sym = target . ms . sym ;
2018-03-20 23:20:43 +03:00
perf annotate: Use absolute addresses to calculate jump target offsets
These types of jumps were confusing the annotate browser:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
Percent│ffffffff81a00020: swapgs
<SNIP>
│ffffffff81a00128: ↓ jae ffffffff81a00139 <syscall_return_via_sysret+0x53>
<SNIP>
│ffffffff81a00155: → jmpq *0x825d2d(%rip) # ffffffff82225e88 <pv_cpu_ops+0xe8>
I.e. the syscall_return_via_sysret function is actually "inside" the
entry_SYSCALL_64 function, and the offsets in jumps like these (+0x53)
are relative to syscall_return_via_sysret, not to syscall_return_via_sysret.
Or this may be some artifact in how the assembler marks the start and
end of a function and how this ends up in the ELF symtab for vmlinux,
i.e. syscall_return_via_sysret() isn't "inside" entry_SYSCALL_64, but
just right after it.
From readelf -sw vmlinux:
80267: ffffffff81a00020 315 NOTYPE GLOBAL DEFAULT 1 entry_SYSCALL_64
316: ffffffff81a000e6 0 NOTYPE LOCAL DEFAULT 1 syscall_return_via_sysret
0xffffffff81a00020 + 315 > 0xffffffff81a000e6
So instead of looking for offsets after that last '+' sign, calculate
offsets for jump target addresses that are inside the function being
disassembled from the absolute address, 0xffffffff81a00139 in this case,
subtracting from it the objdump address for the start of the function
being disassembled, entry_SYSCALL_64() in this case.
So, before this patch:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
Percent│ pop %r10
│ pop %r9
│ pop %r8
│ pop %rax
│ pop %rsi
│ pop %rdx
│ pop %rsi
│ mov %rsp,%rdi
│ mov %gs:0x5004,%rsp
│ pushq 0x28(%rdi)
│ pushq (%rdi)
│ push %rax
│ ↑ jmp 6c
│ mov %cr3,%rdi
│ ↑ jmp 62
│ mov %rdi,%rax
│ and $0x7ff,%rdi
│ bt %rdi,%gs:0x2219a
│ ↑ jae 53
│ btr %rdi,%gs:0x2219a
│ mov %rax,%rdi
│ ↑ jmp 5b
After:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
0.65 │ → jne swapgs_restore_regs_and_return_to_usermode
│ pop %r10
│ pop %r9
│ pop %r8
│ pop %rax
│ pop %rsi
│ pop %rdx
│ pop %rsi
│ mov %rsp,%rdi
│ mov %gs:0x5004,%rsp
│ pushq 0x28(%rdi)
│ pushq (%rdi)
│ push %rax
│ ↓ jmp 132
│ mov %cr3,%rdi
│ ┌──jmp 128
│ │ mov %rdi,%rax
│ │ and $0x7ff,%rdi
│ │ bt %rdi,%gs:0x2219a
│ │↓ jae 119
│ │ btr %rdi,%gs:0x2219a
│ │ mov %rax,%rdi
│ │↓ jmp 121
│119:│ mov %rax,%rdi
│ │ bts $0x3f,%rdi
│121:│ or $0x800,%rdi
│128:└─→or $0x1000,%rdi
│ mov %rdi,%cr3
│132: pop %rax
│ pop %rdi
│ pop %rsp
│ → jmpq *0x825d2d(%rip) # ffffffff82225e88 <pv_cpu_ops+0xe8>
With those at least navigating to the right destination, an improvement
for these cases seems to be to be to somehow mark those inner functions,
which in this case could be:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
│syscall_return_via_sysret:
│ pop %r15
│ pop %r14
│ pop %r13
│ pop %r12
│ pop %rbp
│ pop %rbx
│ pop %rsi
│ pop %r10
│ pop %r9
│ pop %r8
│ pop %rax
│ pop %rsi
│ pop %rdx
│ pop %rsi
│ mov %rsp,%rdi
│ mov %gs:0x5004,%rsp
│ pushq 0x28(%rdi)
│ pushq (%rdi)
│ push %rax
│ ↓ jmp 132
│ mov %cr3,%rdi
│ ┌──jmp 128
│ │ mov %rdi,%rax
│ │ and $0x7ff,%rdi
│ │ bt %rdi,%gs:0x2219a
│ │↓ jae 119
│ │ btr %rdi,%gs:0x2219a
│ │ mov %rax,%rdi
│ │↓ jmp 121
│119:│ mov %rax,%rdi
│ │ bts $0x3f,%rdi
│121:│ or $0x800,%rdi
│128:└─→or $0x1000,%rdi
│ mov %rdi,%cr3
│132: pop %rax
│ pop %rdi
│ pop %rsp
│ → jmpq *0x825d2d(%rip) # ffffffff82225e88 <pv_cpu_ops+0xe8>
This all gets much better viewed if one uses 'perf report --ignore-vmlinux'
forcing the usage of /proc/kcore + /proc/kallsyms, when the above
actually gets down to:
# perf report --ignore-vmlinux
## do '/64', will show the function names containing '64',
## navigate to /entry_SYSCALL_64_after_hwframe.annotation,
## press 'A' to annotate, then 'P' to print that annotation
## to a file
## From another xterm (or see on screen, this 'P' thing is for
## getting rid of those right side scroll bars/spaces):
# cat /entry_SYSCALL_64_after_hwframe.annotation
entry_SYSCALL_64_after_hwframe() /proc/kcore
Event: cycles:ppp
Percent
Disassembly of section load0:
ffffffff9aa00044 <load0>:
11.97 push %rax
4.85 push %rdi
push %rsi
2.59 push %rdx
2.27 push %rcx
0.32 pushq $0xffffffffffffffda
1.29 push %r8
xor %r8d,%r8d
1.62 push %r9
0.65 xor %r9d,%r9d
1.62 push %r10
xor %r10d,%r10d
5.50 push %r11
xor %r11d,%r11d
3.56 push %rbx
xor %ebx,%ebx
4.21 push %rbp
xor %ebp,%ebp
2.59 push %r12
0.97 xor %r12d,%r12d
3.24 push %r13
xor %r13d,%r13d
2.27 push %r14
xor %r14d,%r14d
4.21 push %r15
xor %r15d,%r15d
0.97 mov %rsp,%rdi
5.50 → callq do_syscall_64
14.56 mov 0x58(%rsp),%rcx
7.44 mov 0x80(%rsp),%r11
0.32 cmp %rcx,%r11
→ jne swapgs_restore_regs_and_return_to_usermode
0.32 shl $0x10,%rcx
0.32 sar $0x10,%rcx
3.24 cmp %rcx,%r11
→ jne swapgs_restore_regs_and_return_to_usermode
2.27 cmpq $0x33,0x88(%rsp)
1.29 → jne swapgs_restore_regs_and_return_to_usermode
mov 0x30(%rsp),%r11
8.74 cmp %r11,0x90(%rsp)
→ jne swapgs_restore_regs_and_return_to_usermode
0.32 test $0x10100,%r11
→ jne swapgs_restore_regs_and_return_to_usermode
0.32 cmpq $0x2b,0xa0(%rsp)
0.65 → jne swapgs_restore_regs_and_return_to_usermode
I.e. using kallsyms makes the function start/end be done differently
than using what is in the vmlinux ELF symtab and actually the hits
goes to entry_SYSCALL_64_after_hwframe, which is a GLOBAL() after the
start of entry_SYSCALL_64:
ENTRY(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
<SNIP>
pushq $__USER_CS /* pt_regs->cs */
pushq %rcx /* pt_regs->ip */
GLOBAL(entry_SYSCALL_64_after_hwframe)
pushq %rax /* pt_regs->orig_ax */
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
And it goes and ends at:
cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
jne swapgs_restore_regs_and_return_to_usermode
/*
* We win! This label is here just for ease of understanding
* perf profiles. Nothing jumps here.
*/
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
UNWIND_HINT_EMPTY
POP_REGS pop_rdi=0 skip_r11rcx=1
So perhaps some people should really just play with '--ignore-vmlinux'
to force /proc/kcore + kallsyms.
One idea is to do both, i.e. have a vmlinux annotation and a
kcore+kallsyms one, when possible, and even show the patched location,
etc.
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-r11knxv8voesav31xokjiuo6@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 18:26:39 +03:00
if ( ! ops - > target . outside ) {
ops - > target . offset = target . addr - start ;
perf annotate: Fix jump target outside of function address range
If jump target is outside of function range, perf is not handling it
correctly. Especially when target address is lesser than function start
address, target offset will be negative. But, target address declared to
be unsigned, converts negative number into 2's complement. See below
example. Here target of 'jumpq' instruction at 34cf8 is 34ac0 which is
lesser than function start address(34cf0).
34ac0 - 34cf0 = -0x230 = 0xfffffffffffffdd0
Objdump output:
0000000000034cf0 <__sigaction>:
__GI___sigaction():
34cf0: lea -0x20(%rdi),%eax
34cf3: cmp -bashx1,%eax
34cf6: jbe 34d00 <__sigaction+0x10>
34cf8: jmpq 34ac0 <__GI___libc_sigaction>
34cfd: nopl (%rax)
34d00: mov 0x386161(%rip),%rax # 3bae68 <_DYNAMIC+0x2e8>
34d07: movl -bashx16,%fs:(%rax)
34d0e: mov -bashxffffffff,%eax
34d13: retq
perf annotate before applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
v jmpq fffffffffffffdd0
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
perf annotate after applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
^ jmpq 34ac0 <__GI___libc_sigaction>
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chris Riyder <chris.ryder@arm.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1480953407-7605-3-git-send-email-ravi.bangoria@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 18:56:47 +03:00
ops - > target . offset_avail = true ;
} else {
ops - > target . offset_avail = false ;
}
2012-04-18 20:58:34 +04:00
return 0 ;
}
2012-04-20 21:38:46 +04:00
static int jump__scnprintf ( struct ins * ins , char * bf , size_t size ,
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
struct ins_operands * ops , int max_ins_name )
2012-04-19 17:16:27 +04:00
{
2018-03-23 16:57:08 +03:00
const char * c ;
perf annotate: Fix branch instruction with multiple operands
'perf annotate' is dropping the cr* fields from branch instructions.
Fix it by adding support to display branch instructions having
multiple operands.
Power Arch objdump of int_sqrt:
20.36 | c0000000004d2694: subf r10,r10,r3
| c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40>
1.82 | c0000000004d269c: mr r3,r10
29.18 | c0000000004d26a0: mr r10,r8
| c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c>
| c0000000004d26a8: mr r10,r7
Power Arch Before Patch:
20.36 | subf r10,r10,r3
| v bgt 40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt 4c
| mr r10,r7
Power Arch After patch:
20.36 | subf r10,r10,r3
| v bgt cr6,40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt cr7,4c
| mr r10,r7
Also support AArch64 conditional branch instructions, which can
have up to three operands:
Aarch64 Non-simplified (raw objdump) view:
│ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒
...
4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒
...
1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒
│ffff000│083cd148: mov w19, #0x20000 //▒
1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒
...
0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒
Aarch64 Simplified, before this patch:
│ ↑ cbz 40
...
4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒
...
1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒
...
0.68 │ └──cbnz 60
the cbz operand is missing, and the tbz doesn't get simplified processing
at all because the parsing function failed to match an address.
Aarch64 Simplified, After this patch applied:
│ ↑ cbz w0, 40
...
4.44 │ │↓ tbnz w0, #26, d0
...
1.37 │ │↓ tbnz w22, #5, e4
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ec
...
0.68 │ └──cbnz w0, 60
Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Reported-by: Anton Blanchard <anton@samba.org>
Reported-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 17:29:59 +03:00
perf annotate: Fix jump target outside of function address range
If jump target is outside of function range, perf is not handling it
correctly. Especially when target address is lesser than function start
address, target offset will be negative. But, target address declared to
be unsigned, converts negative number into 2's complement. See below
example. Here target of 'jumpq' instruction at 34cf8 is 34ac0 which is
lesser than function start address(34cf0).
34ac0 - 34cf0 = -0x230 = 0xfffffffffffffdd0
Objdump output:
0000000000034cf0 <__sigaction>:
__GI___sigaction():
34cf0: lea -0x20(%rdi),%eax
34cf3: cmp -bashx1,%eax
34cf6: jbe 34d00 <__sigaction+0x10>
34cf8: jmpq 34ac0 <__GI___libc_sigaction>
34cfd: nopl (%rax)
34d00: mov 0x386161(%rip),%rax # 3bae68 <_DYNAMIC+0x2e8>
34d07: movl -bashx16,%fs:(%rax)
34d0e: mov -bashxffffffff,%eax
34d13: retq
perf annotate before applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
v jmpq fffffffffffffdd0
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
perf annotate after applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
^ jmpq 34ac0 <__GI___libc_sigaction>
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chris Riyder <chris.ryder@arm.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1480953407-7605-3-git-send-email-ravi.bangoria@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 18:56:47 +03:00
if ( ! ops - > target . addr | | ops - > target . offset < 0 )
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return ins__raw_scnprintf ( ins , bf , size , ops , max_ins_name ) ;
perf annotate: Show raw form for jump instruction with indirect target
For jump instructions that does not include target address as direct operand,
show the original disassembled line for them. This is needed for certain
powerpc jump instructions that use target address in a register (such as bctr,
btar, ...).
Before:
ld r12,32088(r12)
mtctr r12
v bctr ffffffffffffca2c
std r2,24(r1)
addis r12,r2,-1
After:
ld r12,32088(r12)
mtctr r12
v bctr
std r2,24(r1)
addis r12,r2,-1
Committer notes:
Testing it using a perf.data file and vmlinux for powerpc64,
cross-annotating it on a x86_64 workstation:
Before:
.__bpf_prog_run vmlinux.powerpc
│ std r10,512(r9) ▒
│ lbz r9,0(r31) ▒
│ rldicr r9,r9,3,60 ▒
│ ldx r9,r30,r9 ▒
│ mtctr r9 ▒
100.00 │ ↓ bctr 3fffffffffe01510 ▒
│ lwa r10,4(r31) ▒
│ lwz r9,0(r31) ▒
<SNIP>
Invalid jump offset: 3fffffffffe01510
After:
.__bpf_prog_run vmlinux.powerpc
│ std r10,512(r9) ▒
│ lbz r9,0(r31) ▒
│ rldicr r9,r9,3,60 ▒
│ ldx r9,r30,r9 ▒
│ mtctr r9 ▒
100.00 │ ↓ bctr ▒
│ lwa r10,4(r31) ▒
│ lwz r9,0(r31) ▒
<SNIP>
Invalid jump offset: 3fffffffffe01510
This, in turn, uncovers another problem with jumps without operands, the
ENTER/-> operation, to jump to the target, still continues using the bogus
target :-)
BTW, this was the file used for the above tests:
[acme@jouet ravi_bangoria]$ perf report --header-only -i perf.data.f22vm.powerdev
# ========
# captured on: Thu Nov 24 12:40:38 2016
# hostname : pdev-f22-qemu
# os release : 4.4.10-200.fc22.ppc64
# perf version : 4.9.rc1.g6298ce
# arch : ppc64
# nrcpus online : 48
# nrcpus avail : 48
# cpudesc : POWER7 (architected), altivec supported
# cpuid : 74,513
# total memory : 4158976 kB
# cmdline : /home/ravi/Workspace/linux/tools/perf/perf record -a
# event : name = cycles:ppp, , size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, disabled = 1, inherit = 1, mmap = 1, c
# HEADER_CPU_TOPOLOGY info available, use -I to display
# HEADER_NUMA_TOPOLOGY info available, use -I to display
# pmu mappings: cpu = 4, software = 1, tracepoint = 2, breakpoint = 5
# missing features: HEADER_TRACING_DATA HEADER_BRANCH_STACK HEADER_GROUP_DESC HEADER_AUXTRACE HEADER_STAT HEADER_CACHE
# ========
#
[acme@jouet ravi_bangoria]$
Suggested-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chris Riyder <chris.ryder@arm.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1480953407-7605-1-git-send-email-ravi.bangoria@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 18:56:45 +03:00
perf annotate: Support jumping from one function to another
For instance:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
5.50 │ → callq do_syscall_64
14.56 │ mov 0x58(%rsp),%rcx
7.44 │ mov 0x80(%rsp),%r11
0.32 │ cmp %rcx,%r11
│ → jne swapgs_restore_regs_and_return_to_usermode
0.32 │ shl $0x10,%rcx
0.32 │ sar $0x10,%rcx
3.24 │ cmp %rcx,%r11
│ → jne swapgs_restore_regs_and_return_to_usermode
2.27 │ cmpq $0x33,0x88(%rsp)
1.29 │ → jne swapgs_restore_regs_and_return_to_usermode
│ mov 0x30(%rsp),%r11
8.74 │ cmp %r11,0x90(%rsp)
│ → jne swapgs_restore_regs_and_return_to_usermode
0.32 │ test $0x10100,%r11
│ → jne swapgs_restore_regs_and_return_to_usermode
0.32 │ cmpq $0x2b,0xa0(%rsp)
0.65 │ → jne swapgs_restore_regs_and_return_to_usermode
It'll behave just like a "call" instruction, i.e. press enter or right
arrow over one such line and the browser will navigate to the annotated
disassembly of that function, which when exited, via left arrow or esc,
will come back to the calling function.
Now to support jump to an offset on a different function...
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-78o508mqvr8inhj63ddtw7mo@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 16:50:35 +03:00
if ( ops - > target . outside & & ops - > target . sym ! = NULL )
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s %s " , max_ins_name , ins - > name , ops - > target . sym - > name ) ;
perf annotate: Support jumping from one function to another
For instance:
entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux
5.50 │ → callq do_syscall_64
14.56 │ mov 0x58(%rsp),%rcx
7.44 │ mov 0x80(%rsp),%r11
0.32 │ cmp %rcx,%r11
│ → jne swapgs_restore_regs_and_return_to_usermode
0.32 │ shl $0x10,%rcx
0.32 │ sar $0x10,%rcx
3.24 │ cmp %rcx,%r11
│ → jne swapgs_restore_regs_and_return_to_usermode
2.27 │ cmpq $0x33,0x88(%rsp)
1.29 │ → jne swapgs_restore_regs_and_return_to_usermode
│ mov 0x30(%rsp),%r11
8.74 │ cmp %r11,0x90(%rsp)
│ → jne swapgs_restore_regs_and_return_to_usermode
0.32 │ test $0x10100,%r11
│ → jne swapgs_restore_regs_and_return_to_usermode
0.32 │ cmpq $0x2b,0xa0(%rsp)
0.65 │ → jne swapgs_restore_regs_and_return_to_usermode
It'll behave just like a "call" instruction, i.e. press enter or right
arrow over one such line and the browser will navigate to the annotated
disassembly of that function, which when exited, via left arrow or esc,
will come back to the calling function.
Now to support jump to an offset on a different function...
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-78o508mqvr8inhj63ddtw7mo@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 16:50:35 +03:00
2018-03-23 16:57:08 +03:00
c = strchr ( ops - > raw , ' , ' ) ;
perf annotate: Fix parsing aarch64 branch instructions after objdump update
Starting with binutils 2.28, aarch64 objdump adds comments to the
disassembly output to show the alternative names of a condition code
[1].
It is assumed that commas in objdump comments could occur in other
arches now or in the future, so this fix is arch-independent.
The fix could have been done with arm64 specific jump__parse and
jump__scnprintf functions, but the jump__scnprintf instruction would
have to have its comment character be a literal, since the scnprintf
functions cannot receive a struct arch easily.
This inconvenience also applies to the generic jump__scnprintf, which is
why we add a raw_comment pointer to struct ins_operands, so the __parse
function assigns it to be re-used by its corresponding __scnprintf
function.
Example differences in 'perf annotate --stdio2' output on an aarch64
perf.data file:
BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b
AFTER : ↓ b.cs 18c
BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b
AFTER : ↓ b.cc 31c
The branch target labels 18c and 31c also now appear in the output:
BEFORE: add x26, x29, #0x80
AFTER : 18c: add x26, x29, #0x80
BEFORE: add x21, x21, #0x8
AFTER : 31c: add x21, x21, #0x8
The Fixes: tag below is added so stable branches will get the update; it
doesn't necessarily mean that commit was broken at the time, rather it
didn't withstand the aarch64 objdump update.
Tested no difference in output for sample x86_64, power arch perf.data files.
[1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands")
Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-27 20:53:40 +03:00
c = validate_comma ( c , ops ) ;
perf annotate: Fix branch instruction with multiple operands
'perf annotate' is dropping the cr* fields from branch instructions.
Fix it by adding support to display branch instructions having
multiple operands.
Power Arch objdump of int_sqrt:
20.36 | c0000000004d2694: subf r10,r10,r3
| c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40>
1.82 | c0000000004d269c: mr r3,r10
29.18 | c0000000004d26a0: mr r10,r8
| c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c>
| c0000000004d26a8: mr r10,r7
Power Arch Before Patch:
20.36 | subf r10,r10,r3
| v bgt 40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt 4c
| mr r10,r7
Power Arch After patch:
20.36 | subf r10,r10,r3
| v bgt cr6,40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt cr7,4c
| mr r10,r7
Also support AArch64 conditional branch instructions, which can
have up to three operands:
Aarch64 Non-simplified (raw objdump) view:
│ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒
...
4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒
...
1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒
│ffff000│083cd148: mov w19, #0x20000 //▒
1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒
...
0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒
Aarch64 Simplified, before this patch:
│ ↑ cbz 40
...
4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒
...
1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒
...
0.68 │ └──cbnz 60
the cbz operand is missing, and the tbz doesn't get simplified processing
at all because the parsing function failed to match an address.
Aarch64 Simplified, After this patch applied:
│ ↑ cbz w0, 40
...
4.44 │ │↓ tbnz w0, #26, d0
...
1.37 │ │↓ tbnz w22, #5, e4
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ec
...
0.68 │ └──cbnz w0, 60
Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Reported-by: Anton Blanchard <anton@samba.org>
Reported-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 17:29:59 +03:00
if ( c ! = NULL ) {
const char * c2 = strchr ( c + 1 , ' , ' ) ;
perf annotate: Fix parsing aarch64 branch instructions after objdump update
Starting with binutils 2.28, aarch64 objdump adds comments to the
disassembly output to show the alternative names of a condition code
[1].
It is assumed that commas in objdump comments could occur in other
arches now or in the future, so this fix is arch-independent.
The fix could have been done with arm64 specific jump__parse and
jump__scnprintf functions, but the jump__scnprintf instruction would
have to have its comment character be a literal, since the scnprintf
functions cannot receive a struct arch easily.
This inconvenience also applies to the generic jump__scnprintf, which is
why we add a raw_comment pointer to struct ins_operands, so the __parse
function assigns it to be re-used by its corresponding __scnprintf
function.
Example differences in 'perf annotate --stdio2' output on an aarch64
perf.data file:
BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b
AFTER : ↓ b.cs 18c
BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b
AFTER : ↓ b.cc 31c
The branch target labels 18c and 31c also now appear in the output:
BEFORE: add x26, x29, #0x80
AFTER : 18c: add x26, x29, #0x80
BEFORE: add x21, x21, #0x8
AFTER : 31c: add x21, x21, #0x8
The Fixes: tag below is added so stable branches will get the update; it
doesn't necessarily mean that commit was broken at the time, rather it
didn't withstand the aarch64 objdump update.
Tested no difference in output for sample x86_64, power arch perf.data files.
[1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands")
Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-27 20:53:40 +03:00
c2 = validate_comma ( c2 , ops ) ;
perf annotate: Fix branch instruction with multiple operands
'perf annotate' is dropping the cr* fields from branch instructions.
Fix it by adding support to display branch instructions having
multiple operands.
Power Arch objdump of int_sqrt:
20.36 | c0000000004d2694: subf r10,r10,r3
| c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40>
1.82 | c0000000004d269c: mr r3,r10
29.18 | c0000000004d26a0: mr r10,r8
| c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c>
| c0000000004d26a8: mr r10,r7
Power Arch Before Patch:
20.36 | subf r10,r10,r3
| v bgt 40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt 4c
| mr r10,r7
Power Arch After patch:
20.36 | subf r10,r10,r3
| v bgt cr6,40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt cr7,4c
| mr r10,r7
Also support AArch64 conditional branch instructions, which can
have up to three operands:
Aarch64 Non-simplified (raw objdump) view:
│ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒
...
4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒
...
1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒
│ffff000│083cd148: mov w19, #0x20000 //▒
1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒
...
0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒
Aarch64 Simplified, before this patch:
│ ↑ cbz 40
...
4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒
...
1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒
...
0.68 │ └──cbnz 60
the cbz operand is missing, and the tbz doesn't get simplified processing
at all because the parsing function failed to match an address.
Aarch64 Simplified, After this patch applied:
│ ↑ cbz w0, 40
...
4.44 │ │↓ tbnz w0, #26, d0
...
1.37 │ │↓ tbnz w22, #5, e4
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ec
...
0.68 │ └──cbnz w0, 60
Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Reported-by: Anton Blanchard <anton@samba.org>
Reported-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 17:29:59 +03:00
/* check for 3-op insn */
if ( c2 ! = NULL )
c = c2 ;
c + + ;
/* mirror arch objdump's space-after-comma style */
if ( * c = = ' ' )
c + + ;
}
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s %.*s% " PRIx64 , max_ins_name ,
perf annotate: Fix branch instruction with multiple operands
'perf annotate' is dropping the cr* fields from branch instructions.
Fix it by adding support to display branch instructions having
multiple operands.
Power Arch objdump of int_sqrt:
20.36 | c0000000004d2694: subf r10,r10,r3
| c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40>
1.82 | c0000000004d269c: mr r3,r10
29.18 | c0000000004d26a0: mr r10,r8
| c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c>
| c0000000004d26a8: mr r10,r7
Power Arch Before Patch:
20.36 | subf r10,r10,r3
| v bgt 40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt 4c
| mr r10,r7
Power Arch After patch:
20.36 | subf r10,r10,r3
| v bgt cr6,40
1.82 | mr r3,r10
29.18 | 40: mr r10,r8
| v bgt cr7,4c
| mr r10,r7
Also support AArch64 conditional branch instructions, which can
have up to three operands:
Aarch64 Non-simplified (raw objdump) view:
│ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒
...
4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒
...
1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒
│ffff000│083cd148: mov w19, #0x20000 //▒
1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒
...
0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒
Aarch64 Simplified, before this patch:
│ ↑ cbz 40
...
4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒
...
1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒
...
0.68 │ └──cbnz 60
the cbz operand is missing, and the tbz doesn't get simplified processing
at all because the parsing function failed to match an address.
Aarch64 Simplified, After this patch applied:
│ ↑ cbz w0, 40
...
4.44 │ │↓ tbnz w0, #26, d0
...
1.37 │ │↓ tbnz w22, #5, e4
│ │ mov w19, #0x20000 // #131072
1.02 │ │↓ tbz w22, #2, ec
...
0.68 │ └──cbnz w0, 60
Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Reported-by: Anton Blanchard <anton@samba.org>
Reported-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Kim Phillips <kim.phillips@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 17:29:59 +03:00
ins - > name , c ? c - ops - > raw : 0 , ops - > raw ,
ops - > target . offset ) ;
2012-04-19 17:16:27 +04:00
}
2012-04-18 20:58:34 +04:00
static struct ins_ops jump_ops = {
2012-04-20 21:38:46 +04:00
. parse = jump__parse ,
. scnprintf = jump__scnprintf ,
2012-04-18 20:58:34 +04:00
} ;
bool ins__is_jump ( const struct ins * ins )
{
return ins - > ops = = & jump_ops ;
}
2012-05-11 23:48:49 +04:00
static int comment__symbol ( char * raw , char * comment , u64 * addrp , char * * namep )
{
char * endptr , * name , * t ;
if ( strstr ( raw , " (%rip) " ) = = NULL )
return 0 ;
* addrp = strtoull ( comment , & endptr , 16 ) ;
2017-11-28 10:56:32 +03:00
if ( endptr = = comment )
return 0 ;
2012-05-11 23:48:49 +04:00
name = strchr ( endptr , ' < ' ) ;
if ( name = = NULL )
return - 1 ;
name + + ;
t = strchr ( name , ' > ' ) ;
if ( t = = NULL )
return 0 ;
* t = ' \0 ' ;
* namep = strdup ( name ) ;
* t = ' > ' ;
return 0 ;
}
2018-03-20 22:19:08 +03:00
static int lock__parse ( struct arch * arch , struct ins_operands * ops , struct map_symbol * ms )
2012-05-12 20:15:34 +04:00
{
ops - > locked . ops = zalloc ( sizeof ( * ops - > locked . ops ) ) ;
if ( ops - > locked . ops = = NULL )
return 0 ;
2016-11-24 17:16:06 +03:00
if ( disasm_line__parse ( ops - > raw , & ops - > locked . ins . name , & ops - > locked . ops - > raw ) < 0 )
2012-05-12 20:15:34 +04:00
goto out_free_ops ;
2016-11-24 17:16:06 +03:00
ops - > locked . ins . ops = ins__find ( arch , ops - > locked . ins . name ) ;
2015-01-18 22:00:21 +03:00
2016-11-24 17:16:06 +03:00
if ( ops - > locked . ins . ops = = NULL )
2012-11-09 21:27:13 +04:00
goto out_free_ops ;
2012-05-12 20:15:34 +04:00
2016-11-24 17:16:06 +03:00
if ( ops - > locked . ins . ops - > parse & &
2018-03-20 22:19:08 +03:00
ops - > locked . ins . ops - > parse ( arch , ops - > locked . ops , ms ) < 0 )
2015-01-18 22:00:20 +03:00
goto out_free_ops ;
2012-05-12 20:15:34 +04:00
return 0 ;
out_free_ops :
2013-12-27 00:41:15 +04:00
zfree ( & ops - > locked . ops ) ;
2012-05-12 20:15:34 +04:00
return 0 ;
}
static int lock__scnprintf ( struct ins * ins , char * bf , size_t size ,
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
struct ins_operands * ops , int max_ins_name )
2012-05-12 20:15:34 +04:00
{
int printed ;
2016-11-24 17:16:06 +03:00
if ( ops - > locked . ins . ops = = NULL )
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return ins__raw_scnprintf ( ins , bf , size , ops , max_ins_name ) ;
2012-05-12 20:15:34 +04:00
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
printed = scnprintf ( bf , size , " %-*s " , max_ins_name , ins - > name ) ;
2016-11-24 17:16:06 +03:00
return printed + ins__scnprintf ( & ops - > locked . ins , bf + printed ,
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
size - printed , ops - > locked . ops , max_ins_name ) ;
2012-05-12 20:15:34 +04:00
}
2012-05-12 20:26:20 +04:00
static void lock__delete ( struct ins_operands * ops )
{
2016-11-24 17:16:06 +03:00
struct ins * ins = & ops - > locked . ins ;
2015-01-18 22:00:21 +03:00
2016-11-24 17:16:06 +03:00
if ( ins - > ops & & ins - > ops - > free )
2015-01-18 22:00:21 +03:00
ins - > ops - > free ( ops - > locked . ops ) ;
else
ins__delete ( ops - > locked . ops ) ;
2013-12-27 23:55:14 +04:00
zfree ( & ops - > locked . ops ) ;
zfree ( & ops - > target . raw ) ;
zfree ( & ops - > target . name ) ;
2012-05-12 20:26:20 +04:00
}
2012-05-12 20:15:34 +04:00
static struct ins_ops lock_ops = {
2012-05-12 20:26:20 +04:00
. free = lock__delete ,
2012-05-12 20:15:34 +04:00
. parse = lock__parse ,
. scnprintf = lock__scnprintf ,
} ;
2018-03-20 22:19:08 +03:00
static int mov__parse ( struct arch * arch , struct ins_operands * ops , struct map_symbol * ms __maybe_unused )
2012-05-11 23:48:49 +04:00
{
char * s = strchr ( ops - > raw , ' , ' ) , * target , * comment , prev ;
if ( s = = NULL )
return - 1 ;
* s = ' \0 ' ;
ops - > source . raw = strdup ( ops - > raw ) ;
* s = ' , ' ;
2014-12-17 23:24:45 +03:00
2012-05-11 23:48:49 +04:00
if ( ops - > source . raw = = NULL )
return - 1 ;
target = + + s ;
2016-11-16 21:39:50 +03:00
comment = strchr ( s , arch - > objdump . comment_char ) ;
2014-08-15 01:03:00 +04:00
if ( comment ! = NULL )
s = comment - 1 ;
else
s = strchr ( s , ' \0 ' ) - 1 ;
2012-05-11 23:48:49 +04:00
2014-08-15 01:03:00 +04:00
while ( s > target & & isspace ( s [ 0 ] ) )
- - s ;
s + + ;
2012-05-11 23:48:49 +04:00
prev = * s ;
* s = ' \0 ' ;
ops - > target . raw = strdup ( target ) ;
* s = prev ;
if ( ops - > target . raw = = NULL )
goto out_free_source ;
if ( comment = = NULL )
return 0 ;
2019-06-26 17:42:03 +03:00
comment = skip_spaces ( comment ) ;
2017-11-28 10:56:32 +03:00
comment__symbol ( ops - > source . raw , comment + 1 , & ops - > source . addr , & ops - > source . name ) ;
comment__symbol ( ops - > target . raw , comment + 1 , & ops - > target . addr , & ops - > target . name ) ;
2012-05-11 23:48:49 +04:00
return 0 ;
out_free_source :
2013-12-27 00:41:15 +04:00
zfree ( & ops - > source . raw ) ;
2012-05-11 23:48:49 +04:00
return - 1 ;
}
static int mov__scnprintf ( struct ins * ins , char * bf , size_t size ,
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
struct ins_operands * ops , int max_ins_name )
2012-05-11 23:48:49 +04:00
{
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s %s,%s " , max_ins_name , ins - > name ,
2012-05-11 23:48:49 +04:00
ops - > source . name ? : ops - > source . raw ,
ops - > target . name ? : ops - > target . raw ) ;
}
static struct ins_ops mov_ops = {
. parse = mov__parse ,
. scnprintf = mov__scnprintf ,
} ;
2018-03-20 22:19:08 +03:00
static int dec__parse ( struct arch * arch __maybe_unused , struct ins_operands * ops , struct map_symbol * ms __maybe_unused )
2012-05-12 00:21:09 +04:00
{
char * target , * comment , * s , prev ;
target = s = ops - > raw ;
while ( s [ 0 ] ! = ' \0 ' & & ! isspace ( s [ 0 ] ) )
+ + s ;
prev = * s ;
* s = ' \0 ' ;
ops - > target . raw = strdup ( target ) ;
* s = prev ;
if ( ops - > target . raw = = NULL )
return - 1 ;
2016-11-30 18:23:33 +03:00
comment = strchr ( s , arch - > objdump . comment_char ) ;
2012-05-12 00:21:09 +04:00
if ( comment = = NULL )
return 0 ;
2019-06-26 17:42:03 +03:00
comment = skip_spaces ( comment ) ;
2017-11-28 10:56:32 +03:00
comment__symbol ( ops - > target . raw , comment + 1 , & ops - > target . addr , & ops - > target . name ) ;
2012-05-12 00:21:09 +04:00
return 0 ;
}
static int dec__scnprintf ( struct ins * ins , char * bf , size_t size ,
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
struct ins_operands * ops , int max_ins_name )
2012-05-12 00:21:09 +04:00
{
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s %s " , max_ins_name , ins - > name ,
2012-05-12 00:21:09 +04:00
ops - > target . name ? : ops - > target . raw ) ;
}
static struct ins_ops dec_ops = {
. parse = dec__parse ,
. scnprintf = dec__scnprintf ,
} ;
2012-09-11 02:15:03 +04:00
static int nop__scnprintf ( struct ins * ins __maybe_unused , char * bf , size_t size ,
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
struct ins_operands * ops __maybe_unused , int max_ins_name )
2012-05-08 01:57:02 +04:00
{
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s " , max_ins_name , " nop " ) ;
2012-05-08 01:57:02 +04:00
}
static struct ins_ops nop_ops = {
. scnprintf = nop__scnprintf ,
} ;
2016-06-24 14:53:58 +03:00
static struct ins_ops ret_ops = {
. scnprintf = ins__raw_scnprintf ,
} ;
bool ins__is_ret ( const struct ins * ins )
{
return ins - > ops = = & ret_ops ;
}
2017-07-07 08:06:35 +03:00
bool ins__is_lock ( const struct ins * ins )
{
return ins - > ops = = & lock_ops ;
}
2016-05-19 19:59:46 +03:00
static int ins__key_cmp ( const void * name , const void * insp )
2012-04-18 20:58:34 +04:00
{
const struct ins * ins = insp ;
return strcmp ( name , ins - > name ) ;
}
2016-05-19 19:59:46 +03:00
static int ins__cmp ( const void * a , const void * b )
{
const struct ins * ia = a ;
const struct ins * ib = b ;
return strcmp ( ia - > name , ib - > name ) ;
}
2016-11-17 18:31:51 +03:00
static void ins__sort ( struct arch * arch )
2016-05-19 19:59:46 +03:00
{
2016-11-17 18:31:51 +03:00
const int nmemb = arch - > nr_instructions ;
2016-05-19 19:59:46 +03:00
2016-11-17 18:31:51 +03:00
qsort ( arch - > instructions , nmemb , sizeof ( struct ins ) , ins__cmp ) ;
2016-05-19 19:59:46 +03:00
}
2016-11-24 17:37:08 +03:00
static struct ins_ops * __ins__find ( struct arch * arch , const char * name )
2012-04-18 20:58:34 +04:00
{
2016-11-24 17:16:06 +03:00
struct ins * ins ;
2016-11-17 18:31:51 +03:00
const int nmemb = arch - > nr_instructions ;
2016-05-19 19:59:46 +03:00
2016-11-17 18:31:51 +03:00
if ( ! arch - > sorted_instructions ) {
ins__sort ( arch ) ;
arch - > sorted_instructions = true ;
2016-05-19 19:59:46 +03:00
}
2012-04-18 20:58:34 +04:00
2016-11-24 17:16:06 +03:00
ins = bsearch ( name , arch - > instructions , nmemb , sizeof ( struct ins ) , ins__key_cmp ) ;
return ins ? ins - > ops : NULL ;
2012-04-18 20:58:34 +04:00
}
2016-11-24 17:37:08 +03:00
static struct ins_ops * ins__find ( struct arch * arch , const char * name )
{
struct ins_ops * ops = __ins__find ( arch , name ) ;
if ( ! ops & & arch - > associate_instruction_ops )
ops = arch - > associate_instruction_ops ( arch , name ) ;
return ops ;
}
2016-11-16 21:39:50 +03:00
static int arch__key_cmp ( const void * name , const void * archp )
{
const struct arch * arch = archp ;
return strcmp ( name , arch - > name ) ;
}
static int arch__cmp ( const void * a , const void * b )
{
const struct arch * aa = a ;
const struct arch * ab = b ;
return strcmp ( aa - > name , ab - > name ) ;
}
static void arch__sort ( void )
{
const int nmemb = ARRAY_SIZE ( architectures ) ;
qsort ( architectures , nmemb , sizeof ( struct arch ) , arch__cmp ) ;
}
static struct arch * arch__find ( const char * name )
{
const int nmemb = ARRAY_SIZE ( architectures ) ;
static bool sorted ;
if ( ! sorted ) {
arch__sort ( ) ;
sorted = true ;
}
return bsearch ( name , architectures , nmemb , sizeof ( struct arch ) , arch__key_cmp ) ;
}
2018-05-24 22:01:31 +03:00
static struct annotated_source * annotated_source__new ( void )
{
struct annotated_source * src = zalloc ( sizeof ( * src ) ) ;
if ( src ! = NULL )
INIT_LIST_HEAD ( & src - > source ) ;
return src ;
}
2018-05-24 23:33:18 +03:00
static __maybe_unused void annotated_source__delete ( struct annotated_source * src )
2018-05-24 22:01:31 +03:00
{
if ( src = = NULL )
return ;
zfree ( & src - > histograms ) ;
zfree ( & src - > cycles_hist ) ;
free ( src ) ;
}
2018-05-24 22:23:08 +03:00
static int annotated_source__alloc_histograms ( struct annotated_source * src ,
size_t size , int nr_hists )
2011-02-08 18:27:39 +03:00
{
2012-07-20 07:05:25 +04:00
size_t sizeof_sym_hist ;
2017-10-24 17:20:06 +03:00
/*
* Add buffer of one element for zero length symbol .
* When sample is taken from first instruction of
* zero length symbol , perf still resolves it and
* shows symbol name in perf report and allows to
* annotate it .
*/
if ( size = = 0 )
size = 1 ;
2012-07-20 07:05:25 +04:00
/* Check for overflow when calculating sizeof_sym_hist */
2017-07-20 00:36:45 +03:00
if ( size > ( SIZE_MAX - sizeof ( struct sym_hist ) ) / sizeof ( struct sym_hist_entry ) )
2012-07-20 07:05:25 +04:00
return - 1 ;
2017-07-20 00:36:45 +03:00
sizeof_sym_hist = ( sizeof ( struct sym_hist ) + size * sizeof ( struct sym_hist_entry ) ) ;
2012-07-20 07:05:25 +04:00
/* Check for overflow in zalloc argument */
2018-05-24 22:23:08 +03:00
if ( sizeof_sym_hist > SIZE_MAX / nr_hists )
2012-07-20 07:05:25 +04:00
return - 1 ;
2011-02-08 18:27:39 +03:00
2018-05-24 22:23:08 +03:00
src - > sizeof_sym_hist = sizeof_sym_hist ;
src - > nr_histograms = nr_hists ;
src - > histograms = calloc ( nr_hists , sizeof_sym_hist ) ;
return src - > histograms ? 0 : - 1 ;
}
2015-07-18 18:24:48 +03:00
/* The cycles histogram is lazily allocated. */
static int symbol__alloc_hist_cycles ( struct symbol * sym )
{
struct annotation * notes = symbol__annotation ( sym ) ;
const size_t size = symbol__size ( sym ) ;
notes - > src - > cycles_hist = calloc ( size , sizeof ( struct cyc_hist ) ) ;
if ( notes - > src - > cycles_hist = = NULL )
return - 1 ;
return 0 ;
}
2011-02-06 19:54:44 +03:00
void symbol__annotate_zero_histograms ( struct symbol * sym )
{
struct annotation * notes = symbol__annotation ( sym ) ;
2011-02-08 18:27:39 +03:00
pthread_mutex_lock ( & notes - > lock ) ;
2015-07-18 18:24:48 +03:00
if ( notes - > src ! = NULL ) {
2011-02-08 18:27:39 +03:00
memset ( notes - > src - > histograms , 0 ,
notes - > src - > nr_histograms * notes - > src - > sizeof_sym_hist ) ;
2015-07-18 18:24:48 +03:00
if ( notes - > src - > cycles_hist )
memset ( notes - > src - > cycles_hist , 0 ,
symbol__size ( sym ) * sizeof ( struct cyc_hist ) ) ;
}
2011-02-08 18:27:39 +03:00
pthread_mutex_unlock ( & notes - > lock ) ;
2011-02-06 19:54:44 +03:00
}
2018-05-24 21:20:18 +03:00
static int __symbol__account_cycles ( struct cyc_hist * ch ,
2015-07-18 18:24:48 +03:00
u64 start ,
unsigned offset , unsigned cycles ,
unsigned have_start )
{
/*
* For now we can only account one basic block per
* final jump . But multiple could be overlapping .
* Always account the longest one . So when
* a shorter one has been already seen throw it away .
*
* We separately always account the full cycles .
*/
ch [ offset ] . num_aggr + + ;
ch [ offset ] . cycles_aggr + = cycles ;
2018-05-17 17:58:37 +03:00
if ( cycles > ch [ offset ] . cycles_max )
ch [ offset ] . cycles_max = cycles ;
if ( ch [ offset ] . cycles_min ) {
if ( cycles & & cycles < ch [ offset ] . cycles_min )
ch [ offset ] . cycles_min = cycles ;
} else
ch [ offset ] . cycles_min = cycles ;
2015-07-18 18:24:48 +03:00
if ( ! have_start & & ch [ offset ] . have_start )
return 0 ;
if ( ch [ offset ] . num ) {
if ( have_start & & ( ! ch [ offset ] . have_start | |
ch [ offset ] . start > start ) ) {
ch [ offset ] . have_start = 0 ;
ch [ offset ] . cycles = 0 ;
ch [ offset ] . num = 0 ;
if ( ch [ offset ] . reset < 0xffff )
ch [ offset ] . reset + + ;
} else if ( have_start & &
ch [ offset ] . start < start )
return 0 ;
}
2019-09-25 04:14:46 +03:00
if ( ch [ offset ] . num < NUM_SPARKS )
ch [ offset ] . cycles_spark [ ch [ offset ] . num ] = cycles ;
2015-07-18 18:24:48 +03:00
ch [ offset ] . have_start = have_start ;
ch [ offset ] . start = start ;
ch [ offset ] . cycles + = cycles ;
ch [ offset ] . num + + ;
return 0 ;
}
2019-11-04 21:57:38 +03:00
static int __symbol__inc_addr_samples ( struct map_symbol * ms ,
2018-05-24 22:28:29 +03:00
struct annotated_source * src , int evidx , u64 addr ,
2017-07-20 23:18:05 +03:00
struct perf_sample * sample )
2011-02-04 14:45:46 +03:00
{
2019-11-04 21:57:38 +03:00
struct symbol * sym = ms - > sym ;
2011-02-04 18:43:24 +03:00
unsigned offset ;
2011-02-04 14:45:46 +03:00
struct sym_hist * h ;
2019-11-04 21:57:38 +03:00
pr_debug3 ( " %s: addr=%# " PRIx64 " \n " , __func__ , ms - > map - > unmap_ip ( ms - > map , addr ) ) ;
2011-02-04 14:45:46 +03:00
2016-11-22 11:40:50 +03:00
if ( ( addr < sym - > start | | addr > = sym - > end ) & &
( addr ! = sym - > end | | sym - > start ! = sym - > end ) ) {
2015-10-21 21:45:13 +03:00
pr_debug ( " %s(%d): ERANGE! sym->name=%s, start=%# " PRIx64 " , addr=%# " PRIx64 " , end=%# " PRIx64 " \n " ,
__func__ , __LINE__ , sym - > name , sym - > start , addr , sym - > end ) ;
2012-03-27 19:55:57 +04:00
return - ERANGE ;
2015-10-21 21:45:13 +03:00
}
2011-02-04 14:45:46 +03:00
2011-02-04 18:43:24 +03:00
offset = addr - sym - > start ;
2018-05-24 22:28:29 +03:00
h = annotated_source__histogram ( src , evidx ) ;
2018-06-05 22:31:21 +03:00
if ( h = = NULL ) {
pr_debug ( " %s(%d): ENOMEM! sym->name=%s, start=%# " PRIx64 " , addr=%# " PRIx64 " , end=%# " PRIx64 " , func: %d \n " ,
__func__ , __LINE__ , sym - > name , sym - > start , addr , sym - > end , sym - > type = = STT_FUNC ) ;
return - ENOMEM ;
}
2017-07-20 00:36:51 +03:00
h - > nr_samples + + ;
2017-07-20 00:36:45 +03:00
h - > addr [ offset ] . nr_samples + + ;
2017-07-20 23:18:05 +03:00
h - > period + = sample - > period ;
h - > addr [ offset ] . period + = sample - > period ;
2011-02-04 14:45:46 +03:00
pr_debug3 ( " %# " PRIx64 " %s: period++ [addr: %# " PRIx64 " , %# " PRIx64
2017-07-20 23:18:05 +03:00
" , evidx=%d] => nr_samples: % " PRIu64 " , period: % " PRIu64 " \n " ,
sym - > start , sym - > name , addr , addr - sym - > start , evidx ,
h - > addr [ offset ] . nr_samples , h - > addr [ offset ] . period ) ;
2011-02-04 14:45:46 +03:00
return 0 ;
}
2018-05-24 23:17:05 +03:00
static struct cyc_hist * symbol__cycles_hist ( struct symbol * sym )
2015-05-27 20:51:46 +03:00
{
struct annotation * notes = symbol__annotation ( sym ) ;
if ( notes - > src = = NULL ) {
2018-05-24 23:17:05 +03:00
notes - > src = annotated_source__new ( ) ;
if ( notes - > src = = NULL )
2015-05-27 20:51:46 +03:00
return NULL ;
2018-05-24 23:17:05 +03:00
goto alloc_cycles_hist ;
2015-05-27 20:51:46 +03:00
}
2018-05-24 23:17:05 +03:00
if ( ! notes - > src - > cycles_hist ) {
alloc_cycles_hist :
symbol__alloc_hist_cycles ( sym ) ;
2015-07-18 18:24:48 +03:00
}
2018-05-24 23:17:05 +03:00
return notes - > src - > cycles_hist ;
2015-05-27 20:51:46 +03:00
}
2018-05-24 23:33:18 +03:00
struct annotated_source * symbol__hists ( struct symbol * sym , int nr_hists )
2018-05-24 22:37:53 +03:00
{
struct annotation * notes = symbol__annotation ( sym ) ;
if ( notes - > src = = NULL ) {
notes - > src = annotated_source__new ( ) ;
if ( notes - > src = = NULL )
return NULL ;
goto alloc_histograms ;
}
if ( notes - > src - > histograms = = NULL ) {
alloc_histograms :
annotated_source__alloc_histograms ( notes - > src , symbol__size ( sym ) ,
2018-05-24 23:20:53 +03:00
nr_hists ) ;
2018-05-24 22:37:53 +03:00
}
return notes - > src ;
}
2019-11-04 21:57:38 +03:00
static int symbol__inc_addr_samples ( struct map_symbol * ms ,
2019-07-21 14:23:51 +03:00
struct evsel * evsel , u64 addr ,
2017-07-20 22:28:53 +03:00
struct perf_sample * sample )
2013-12-18 22:37:41 +04:00
{
2019-11-04 21:57:38 +03:00
struct symbol * sym = ms - > sym ;
2018-05-24 22:37:53 +03:00
struct annotated_source * src ;
2013-12-18 22:37:41 +04:00
2014-02-20 05:32:53 +04:00
if ( sym = = NULL )
2013-12-18 22:37:41 +04:00
return 0 ;
2019-07-21 14:24:28 +03:00
src = symbol__hists ( sym , evsel - > evlist - > core . nr_entries ) ;
2021-07-06 18:16:59 +03:00
return src ? __symbol__inc_addr_samples ( ms , src , evsel - > core . idx , addr , sample ) : 0 ;
2013-12-18 22:37:41 +04:00
}
2015-07-18 18:24:48 +03:00
static int symbol__account_cycles ( u64 addr , u64 start ,
struct symbol * sym , unsigned cycles )
{
2018-05-24 23:17:05 +03:00
struct cyc_hist * cycles_hist ;
2015-07-18 18:24:48 +03:00
unsigned offset ;
if ( sym = = NULL )
return 0 ;
2018-05-24 23:17:05 +03:00
cycles_hist = symbol__cycles_hist ( sym ) ;
if ( cycles_hist = = NULL )
2015-07-18 18:24:48 +03:00
return - ENOMEM ;
if ( addr < sym - > start | | addr > = sym - > end )
return - ERANGE ;
if ( start ) {
if ( start < sym - > start | | start > = sym - > end )
return - ERANGE ;
if ( start > = addr )
start = 0 ;
}
offset = addr - sym - > start ;
2018-05-24 23:17:05 +03:00
return __symbol__account_cycles ( cycles_hist ,
2015-07-18 18:24:48 +03:00
start ? start - sym - > start : 0 ,
offset , cycles ,
! ! start ) ;
}
int addr_map_symbol__account_cycles ( struct addr_map_symbol * ams ,
struct addr_map_symbol * start ,
unsigned cycles )
{
2015-08-14 10:11:34 +03:00
u64 saddr = 0 ;
2015-07-18 18:24:48 +03:00
int err ;
if ( ! cycles )
return 0 ;
/*
* Only set start when IPC can be computed . We can only
* compute it when the basic block is completely in a single
* function .
* Special case the case when the jump is elsewhere , but
* it starts on the function start .
*/
if ( start & &
2019-11-04 21:57:38 +03:00
( start - > ms . sym = = ams - > ms . sym | |
( ams - > ms . sym & &
start - > addr = = ams - > ms . sym - > start + ams - > ms . map - > start ) ) )
2015-07-18 18:24:48 +03:00
saddr = start - > al_addr ;
if ( saddr = = 0 )
2015-08-14 10:11:34 +03:00
pr_debug2 ( " BB with bad start: addr % " PRIx64 " start % " PRIx64 " sym % " PRIx64 " saddr % " PRIx64 " \n " ,
2015-07-18 18:24:48 +03:00
ams - > addr ,
start ? start - > addr : 0 ,
2019-11-04 21:57:38 +03:00
ams - > ms . sym ? ams - > ms . sym - > start + ams - > ms . map - > start : 0 ,
2015-07-18 18:24:48 +03:00
saddr ) ;
2019-11-04 21:57:38 +03:00
err = symbol__account_cycles ( ams - > al_addr , saddr , ams - > ms . sym , cycles ) ;
2015-07-18 18:24:48 +03:00
if ( err )
pr_debug2 ( " account_cycles failed %d \n " , err ) ;
return err ;
}
2018-03-15 17:46:23 +03:00
static unsigned annotation__count_insn ( struct annotation * notes , u64 start , u64 end )
{
unsigned n_insn = 0 ;
u64 offset ;
for ( offset = start ; offset < = end ; offset + + ) {
if ( notes - > offsets [ offset ] )
n_insn + + ;
}
return n_insn ;
}
static void annotation__count_and_fill ( struct annotation * notes , u64 start , u64 end , struct cyc_hist * ch )
{
unsigned n_insn ;
perf annotate: Compute average IPC and IPC coverage per symbol
Add support to 'perf report' annotate view or 'perf annotate --stdio2'
to aggregate the IPC derived from timed LBRs per symbol. We compute the
average IPC and the IPC coverage percentage.
For example:
$ perf annotate --stdio2
Percent IPC Cycle (Average IPC: 2.30, IPC Coverage: 54.8%)
Disassembly of section .text:
000000000003aac0 <random@@GLIBC_2.2.5>:
8.32 3.28 sub $0x18,%rsp
3.28 mov $0x1,%esi
3.28 xor %eax,%eax
3.28 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
11.57 3.28 1 ↓ je 20
lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 29
↓ jmp 43
11.57 1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
0.00 1.10 1 ↓ je 43
29: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_lock_wait_private
add $0x80,%rsp
0.00 3.00 43: lea __ctype_b@GLIBC_2.2.5+0x38,%rdi
3.00 lea 0xc(%rsp),%rsi
8.49 3.00 1 → callq __random_r
7.91 1.94 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
0.00 1.94 1 ↓ je 68
lock decl __abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 70
↓ jmp 8a
0.00 2.00 68: decl __abort_msg@@GLIBC_PRIVATE+0x8a0
21.56 2.00 1 ↓ je 8a
70: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_unlock_wake_private
add $0x80,%rsp
21.56 2.90 8a: movslq 0xc(%rsp),%rax
2.90 add $0x18,%rsp
9.03 2.90 1 ← retq
It shows for this symbol the average IPC is 2.30 and the IPC coverage is
54.8%.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1543586097-27632-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-30 16:54:54 +03:00
unsigned int cover_insn = 0 ;
2018-03-15 17:46:23 +03:00
u64 offset ;
n_insn = annotation__count_insn ( notes , start , end ) ;
if ( n_insn & & ch - > num & & ch - > cycles ) {
float ipc = n_insn / ( ( double ) ch - > cycles / ( double ) ch - > num ) ;
/* Hide data when there are too many overlaps. */
2019-03-16 00:16:17 +03:00
if ( ch - > reset > = 0x7fff )
2018-03-15 17:46:23 +03:00
return ;
for ( offset = start ; offset < = end ; offset + + ) {
struct annotation_line * al = notes - > offsets [ offset ] ;
perf annotate: Compute average IPC and IPC coverage per symbol
Add support to 'perf report' annotate view or 'perf annotate --stdio2'
to aggregate the IPC derived from timed LBRs per symbol. We compute the
average IPC and the IPC coverage percentage.
For example:
$ perf annotate --stdio2
Percent IPC Cycle (Average IPC: 2.30, IPC Coverage: 54.8%)
Disassembly of section .text:
000000000003aac0 <random@@GLIBC_2.2.5>:
8.32 3.28 sub $0x18,%rsp
3.28 mov $0x1,%esi
3.28 xor %eax,%eax
3.28 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
11.57 3.28 1 ↓ je 20
lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 29
↓ jmp 43
11.57 1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
0.00 1.10 1 ↓ je 43
29: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_lock_wait_private
add $0x80,%rsp
0.00 3.00 43: lea __ctype_b@GLIBC_2.2.5+0x38,%rdi
3.00 lea 0xc(%rsp),%rsi
8.49 3.00 1 → callq __random_r
7.91 1.94 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
0.00 1.94 1 ↓ je 68
lock decl __abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 70
↓ jmp 8a
0.00 2.00 68: decl __abort_msg@@GLIBC_PRIVATE+0x8a0
21.56 2.00 1 ↓ je 8a
70: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_unlock_wake_private
add $0x80,%rsp
21.56 2.90 8a: movslq 0xc(%rsp),%rax
2.90 add $0x18,%rsp
9.03 2.90 1 ← retq
It shows for this symbol the average IPC is 2.30 and the IPC coverage is
54.8%.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1543586097-27632-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-30 16:54:54 +03:00
if ( al & & al - > ipc = = 0.0 ) {
2018-03-15 17:46:23 +03:00
al - > ipc = ipc ;
perf annotate: Compute average IPC and IPC coverage per symbol
Add support to 'perf report' annotate view or 'perf annotate --stdio2'
to aggregate the IPC derived from timed LBRs per symbol. We compute the
average IPC and the IPC coverage percentage.
For example:
$ perf annotate --stdio2
Percent IPC Cycle (Average IPC: 2.30, IPC Coverage: 54.8%)
Disassembly of section .text:
000000000003aac0 <random@@GLIBC_2.2.5>:
8.32 3.28 sub $0x18,%rsp
3.28 mov $0x1,%esi
3.28 xor %eax,%eax
3.28 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
11.57 3.28 1 ↓ je 20
lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 29
↓ jmp 43
11.57 1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
0.00 1.10 1 ↓ je 43
29: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_lock_wait_private
add $0x80,%rsp
0.00 3.00 43: lea __ctype_b@GLIBC_2.2.5+0x38,%rdi
3.00 lea 0xc(%rsp),%rsi
8.49 3.00 1 → callq __random_r
7.91 1.94 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
0.00 1.94 1 ↓ je 68
lock decl __abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 70
↓ jmp 8a
0.00 2.00 68: decl __abort_msg@@GLIBC_PRIVATE+0x8a0
21.56 2.00 1 ↓ je 8a
70: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_unlock_wake_private
add $0x80,%rsp
21.56 2.90 8a: movslq 0xc(%rsp),%rax
2.90 add $0x18,%rsp
9.03 2.90 1 ← retq
It shows for this symbol the average IPC is 2.30 and the IPC coverage is
54.8%.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1543586097-27632-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-30 16:54:54 +03:00
cover_insn + + ;
}
}
if ( cover_insn ) {
notes - > hit_cycles + = ch - > cycles ;
notes - > hit_insn + = n_insn * ch - > num ;
notes - > cover_insn + = cover_insn ;
2018-03-15 17:46:23 +03:00
}
}
}
void annotation__compute_ipc ( struct annotation * notes , size_t size )
{
perf annotate: Compute average IPC and IPC coverage per symbol
Add support to 'perf report' annotate view or 'perf annotate --stdio2'
to aggregate the IPC derived from timed LBRs per symbol. We compute the
average IPC and the IPC coverage percentage.
For example:
$ perf annotate --stdio2
Percent IPC Cycle (Average IPC: 2.30, IPC Coverage: 54.8%)
Disassembly of section .text:
000000000003aac0 <random@@GLIBC_2.2.5>:
8.32 3.28 sub $0x18,%rsp
3.28 mov $0x1,%esi
3.28 xor %eax,%eax
3.28 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
11.57 3.28 1 ↓ je 20
lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 29
↓ jmp 43
11.57 1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
0.00 1.10 1 ↓ je 43
29: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_lock_wait_private
add $0x80,%rsp
0.00 3.00 43: lea __ctype_b@GLIBC_2.2.5+0x38,%rdi
3.00 lea 0xc(%rsp),%rsi
8.49 3.00 1 → callq __random_r
7.91 1.94 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
0.00 1.94 1 ↓ je 68
lock decl __abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 70
↓ jmp 8a
0.00 2.00 68: decl __abort_msg@@GLIBC_PRIVATE+0x8a0
21.56 2.00 1 ↓ je 8a
70: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_unlock_wake_private
add $0x80,%rsp
21.56 2.90 8a: movslq 0xc(%rsp),%rax
2.90 add $0x18,%rsp
9.03 2.90 1 ← retq
It shows for this symbol the average IPC is 2.30 and the IPC coverage is
54.8%.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1543586097-27632-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-30 16:54:54 +03:00
s64 offset ;
2018-03-15 17:46:23 +03:00
if ( ! notes - > src | | ! notes - > src - > cycles_hist )
return ;
perf annotate: Compute average IPC and IPC coverage per symbol
Add support to 'perf report' annotate view or 'perf annotate --stdio2'
to aggregate the IPC derived from timed LBRs per symbol. We compute the
average IPC and the IPC coverage percentage.
For example:
$ perf annotate --stdio2
Percent IPC Cycle (Average IPC: 2.30, IPC Coverage: 54.8%)
Disassembly of section .text:
000000000003aac0 <random@@GLIBC_2.2.5>:
8.32 3.28 sub $0x18,%rsp
3.28 mov $0x1,%esi
3.28 xor %eax,%eax
3.28 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
11.57 3.28 1 ↓ je 20
lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 29
↓ jmp 43
11.57 1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
0.00 1.10 1 ↓ je 43
29: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_lock_wait_private
add $0x80,%rsp
0.00 3.00 43: lea __ctype_b@GLIBC_2.2.5+0x38,%rdi
3.00 lea 0xc(%rsp),%rsi
8.49 3.00 1 → callq __random_r
7.91 1.94 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
0.00 1.94 1 ↓ je 68
lock decl __abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 70
↓ jmp 8a
0.00 2.00 68: decl __abort_msg@@GLIBC_PRIVATE+0x8a0
21.56 2.00 1 ↓ je 8a
70: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_unlock_wake_private
add $0x80,%rsp
21.56 2.90 8a: movslq 0xc(%rsp),%rax
2.90 add $0x18,%rsp
9.03 2.90 1 ← retq
It shows for this symbol the average IPC is 2.30 and the IPC coverage is
54.8%.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1543586097-27632-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-30 16:54:54 +03:00
notes - > total_insn = annotation__count_insn ( notes , 0 , size - 1 ) ;
notes - > hit_cycles = 0 ;
notes - > hit_insn = 0 ;
notes - > cover_insn = 0 ;
2018-03-15 17:46:23 +03:00
pthread_mutex_lock ( & notes - > lock ) ;
perf annotate: Compute average IPC and IPC coverage per symbol
Add support to 'perf report' annotate view or 'perf annotate --stdio2'
to aggregate the IPC derived from timed LBRs per symbol. We compute the
average IPC and the IPC coverage percentage.
For example:
$ perf annotate --stdio2
Percent IPC Cycle (Average IPC: 2.30, IPC Coverage: 54.8%)
Disassembly of section .text:
000000000003aac0 <random@@GLIBC_2.2.5>:
8.32 3.28 sub $0x18,%rsp
3.28 mov $0x1,%esi
3.28 xor %eax,%eax
3.28 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
11.57 3.28 1 ↓ je 20
lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 29
↓ jmp 43
11.57 1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
0.00 1.10 1 ↓ je 43
29: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_lock_wait_private
add $0x80,%rsp
0.00 3.00 43: lea __ctype_b@GLIBC_2.2.5+0x38,%rdi
3.00 lea 0xc(%rsp),%rsi
8.49 3.00 1 → callq __random_r
7.91 1.94 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
0.00 1.94 1 ↓ je 68
lock decl __abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 70
↓ jmp 8a
0.00 2.00 68: decl __abort_msg@@GLIBC_PRIVATE+0x8a0
21.56 2.00 1 ↓ je 8a
70: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_unlock_wake_private
add $0x80,%rsp
21.56 2.90 8a: movslq 0xc(%rsp),%rax
2.90 add $0x18,%rsp
9.03 2.90 1 ← retq
It shows for this symbol the average IPC is 2.30 and the IPC coverage is
54.8%.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1543586097-27632-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-30 16:54:54 +03:00
for ( offset = size - 1 ; offset > = 0 ; - - offset ) {
2018-03-15 17:46:23 +03:00
struct cyc_hist * ch ;
ch = & notes - > src - > cycles_hist [ offset ] ;
if ( ch & & ch - > cycles ) {
struct annotation_line * al ;
if ( ch - > have_start )
annotation__count_and_fill ( notes , ch - > start , offset , ch ) ;
al = notes - > offsets [ offset ] ;
2018-05-17 17:58:37 +03:00
if ( al & & ch - > num_aggr ) {
2018-03-15 17:46:23 +03:00
al - > cycles = ch - > cycles_aggr / ch - > num_aggr ;
2018-05-17 17:58:37 +03:00
al - > cycles_max = ch - > cycles_max ;
al - > cycles_min = ch - > cycles_min ;
}
2018-03-15 17:46:23 +03:00
notes - > have_cycles = true ;
}
}
pthread_mutex_unlock ( & notes - > lock ) ;
}
2017-07-20 22:28:53 +03:00
int addr_map_symbol__inc_samples ( struct addr_map_symbol * ams , struct perf_sample * sample ,
2019-07-21 14:23:51 +03:00
struct evsel * evsel )
2013-12-18 23:48:29 +04:00
{
2019-11-04 21:57:38 +03:00
return symbol__inc_addr_samples ( & ams - > ms , evsel , ams - > al_addr , sample ) ;
2013-12-18 23:48:29 +04:00
}
2017-07-20 22:28:53 +03:00
int hist_entry__inc_addr_samples ( struct hist_entry * he , struct perf_sample * sample ,
2019-07-21 14:23:51 +03:00
struct evsel * evsel , u64 ip )
2013-12-19 00:10:15 +04:00
{
2019-11-04 21:57:38 +03:00
return symbol__inc_addr_samples ( & he - > ms , evsel , ip , sample ) ;
2013-12-19 00:10:15 +04:00
}
2018-03-20 22:19:08 +03:00
static void disasm_line__init_ins ( struct disasm_line * dl , struct arch * arch , struct map_symbol * ms )
2012-04-18 20:58:34 +04:00
{
2016-11-24 17:16:06 +03:00
dl - > ins . ops = ins__find ( arch , dl - > ins . name ) ;
2012-04-18 20:58:34 +04:00
2016-11-24 17:16:06 +03:00
if ( ! dl - > ins . ops )
2012-04-18 20:58:34 +04:00
return ;
2018-03-20 22:19:08 +03:00
if ( dl - > ins . ops - > parse & & dl - > ins . ops - > parse ( arch , & dl - > ops , ms ) < 0 )
2016-11-24 17:16:06 +03:00
dl - > ins . ops = NULL ;
2012-04-18 20:58:34 +04:00
}
2016-11-24 17:16:06 +03:00
static int disasm_line__parse ( char * line , const char * * namep , char * * rawp )
2012-05-12 20:15:34 +04:00
{
2019-06-26 17:42:03 +03:00
char tmp , * name = skip_spaces ( line ) ;
2012-05-12 20:15:34 +04:00
if ( name [ 0 ] = = ' \0 ' )
return - 1 ;
* rawp = name + 1 ;
while ( ( * rawp ) [ 0 ] ! = ' \0 ' & & ! isspace ( ( * rawp ) [ 0 ] ) )
+ + * rawp ;
tmp = ( * rawp ) [ 0 ] ;
( * rawp ) [ 0 ] = ' \0 ' ;
* namep = strdup ( name ) ;
if ( * namep = = NULL )
perf annotate: Fix dereferencing freed memory found by the smatch tool
Based on the following report from Smatch, fix the potential
dereferencing freed memory check.
tools/perf/util/annotate.c:1125
disasm_line__parse() error: dereferencing freed memory 'namep'
tools/perf/util/annotate.c
1100 static int disasm_line__parse(char *line, const char **namep, char **rawp)
1101 {
1102 char tmp, *name = ltrim(line);
[...]
1114 *namep = strdup(name);
1115
1116 if (*namep == NULL)
1117 goto out_free_name;
[...]
1124 out_free_name:
1125 free((void *)namep);
^^^^^
1126 *namep = NULL;
^^^^^^
1127 return -1;
1128 }
If strdup() fails to allocate memory space for *namep, we don't need to
free memory with pointer 'namep', which is resident in data structure
disasm_line::ins::name; and *namep is NULL pointer for this failure, so
it's pointless to assign NULL to *namep again.
Committer note:
Freeing namep, which is the address of the first entry of the 'struct
ins' that is the first member of struct disasm_line would in fact free
that disasm_line instance, if it was allocated via malloc/calloc, which,
later, would a dereference of freed memory.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Alexios Zavras <alexios.zavras@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Changbin Du <changbin.du@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Eric Saint-Etienne <eric.saint.etienne@oracle.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Song Liu <songliubraving@fb.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20190702103420.27540-5-leo.yan@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-02 13:34:13 +03:00
goto out ;
2012-05-12 20:15:34 +04:00
( * rawp ) [ 0 ] = tmp ;
2019-08-06 17:24:09 +03:00
* rawp = strim ( * rawp ) ;
2012-05-12 20:15:34 +04:00
return 0 ;
perf annotate: Fix dereferencing freed memory found by the smatch tool
Based on the following report from Smatch, fix the potential
dereferencing freed memory check.
tools/perf/util/annotate.c:1125
disasm_line__parse() error: dereferencing freed memory 'namep'
tools/perf/util/annotate.c
1100 static int disasm_line__parse(char *line, const char **namep, char **rawp)
1101 {
1102 char tmp, *name = ltrim(line);
[...]
1114 *namep = strdup(name);
1115
1116 if (*namep == NULL)
1117 goto out_free_name;
[...]
1124 out_free_name:
1125 free((void *)namep);
^^^^^
1126 *namep = NULL;
^^^^^^
1127 return -1;
1128 }
If strdup() fails to allocate memory space for *namep, we don't need to
free memory with pointer 'namep', which is resident in data structure
disasm_line::ins::name; and *namep is NULL pointer for this failure, so
it's pointless to assign NULL to *namep again.
Committer note:
Freeing namep, which is the address of the first entry of the 'struct
ins' that is the first member of struct disasm_line would in fact free
that disasm_line instance, if it was allocated via malloc/calloc, which,
later, would a dereference of freed memory.
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Alexios Zavras <alexios.zavras@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Changbin Du <changbin.du@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Eric Saint-Etienne <eric.saint.etienne@oracle.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Song Liu <songliubraving@fb.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20190702103420.27540-5-leo.yan@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-02 13:34:13 +03:00
out :
2012-05-12 20:15:34 +04:00
return - 1 ;
}
2017-10-11 18:01:29 +03:00
struct annotate_args {
2020-02-04 07:52:30 +03:00
struct arch * arch ;
struct map_symbol ms ;
struct evsel * evsel ;
2018-05-28 17:27:40 +03:00
struct annotation_options * options ;
2020-02-04 07:52:30 +03:00
s64 offset ;
char * line ;
int line_nr ;
2021-02-15 14:34:46 +03:00
char * fileloc ;
2017-10-11 18:01:29 +03:00
} ;
2020-02-04 07:52:29 +03:00
static void annotation_line__init ( struct annotation_line * al ,
struct annotate_args * args ,
int nr )
2017-10-11 18:01:37 +03:00
{
2020-02-04 07:52:29 +03:00
al - > offset = args - > offset ;
al - > line = strdup ( args - > line ) ;
al - > line_nr = args - > line_nr ;
2021-02-15 14:34:46 +03:00
al - > fileloc = args - > fileloc ;
2020-02-04 07:52:29 +03:00
al - > data_nr = nr ;
}
2017-10-11 18:01:37 +03:00
2020-02-04 07:52:29 +03:00
static void annotation_line__exit ( struct annotation_line * al )
{
2017-10-11 18:01:41 +03:00
free_srcline ( al - > path ) ;
2017-10-11 18:01:37 +03:00
zfree ( & al - > line ) ;
}
2020-02-04 07:52:29 +03:00
static size_t disasm_line_size ( int nr )
2017-10-11 18:01:37 +03:00
{
struct annotation_line * al ;
2020-02-04 07:52:29 +03:00
return ( sizeof ( struct disasm_line ) + ( sizeof ( al - > data [ 0 ] ) * nr ) ) ;
2017-10-11 18:01:37 +03:00
}
/*
* Allocating the disasm annotation line data with
* following structure :
*
2020-02-04 07:52:29 +03:00
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* struct disasm_line | struct annotation_line
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2017-10-11 18:01:37 +03:00
*
* We have ' struct annotation_line ' member as last member
* of ' struct disasm_line ' to have an easy access .
*/
2017-10-11 18:01:32 +03:00
static struct disasm_line * disasm_line__new ( struct annotate_args * args )
2011-02-04 14:45:46 +03:00
{
2017-10-11 18:01:37 +03:00
struct disasm_line * dl = NULL ;
2020-02-04 07:52:29 +03:00
int nr = 1 ;
2011-02-04 14:45:46 +03:00
2020-04-30 16:51:16 +03:00
if ( evsel__is_group_event ( args - > evsel ) )
2020-02-04 07:52:29 +03:00
nr = args - > evsel - > core . nr_members ;
2017-10-11 18:01:26 +03:00
2020-02-04 07:52:29 +03:00
dl = zalloc ( disasm_line_size ( nr ) ) ;
if ( ! dl )
return NULL ;
2012-04-15 22:52:18 +04:00
2020-02-04 07:52:29 +03:00
annotation_line__init ( & dl - > al , args , nr ) ;
if ( dl - > al . line = = NULL )
goto out_delete ;
2012-04-15 22:52:18 +04:00
2020-02-04 07:52:29 +03:00
if ( args - > offset ! = - 1 ) {
if ( disasm_line__parse ( dl - > al . line , & dl - > ins . name , & dl - > ops . raw ) < 0 )
goto out_free_line ;
disasm_line__init_ins ( dl , args - > arch , & args - > ms ) ;
2011-02-04 14:45:46 +03:00
}
2012-04-15 22:24:39 +04:00
return dl ;
2012-04-15 22:52:18 +04:00
out_free_line :
2017-10-11 18:01:26 +03:00
zfree ( & dl - > al . line ) ;
2012-04-02 19:59:01 +04:00
out_delete :
2012-04-15 22:24:39 +04:00
free ( dl ) ;
2012-04-02 19:59:01 +04:00
return NULL ;
2011-02-04 14:45:46 +03:00
}
2012-04-15 22:24:39 +04:00
void disasm_line__free ( struct disasm_line * dl )
2011-02-04 14:45:46 +03:00
{
2016-11-24 17:16:06 +03:00
if ( dl - > ins . ops & & dl - > ins . ops - > free )
dl - > ins . ops - > free ( & dl - > ops ) ;
2012-05-12 20:26:20 +04:00
else
ins__delete ( & dl - > ops ) ;
2019-07-04 18:06:20 +03:00
zfree ( & dl - > ins . name ) ;
2020-02-04 07:52:29 +03:00
annotation_line__exit ( & dl - > al ) ;
free ( dl ) ;
2011-02-04 14:45:46 +03:00
}
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
int disasm_line__scnprintf ( struct disasm_line * dl , char * bf , size_t size , bool raw , int max_ins_name )
2012-05-08 01:54:16 +04:00
{
2016-11-24 17:16:06 +03:00
if ( raw | | ! dl - > ins . ops )
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return scnprintf ( bf , size , " %-*s %s " , max_ins_name , dl - > ins . name , dl - > ops . raw ) ;
2012-05-08 01:54:16 +04:00
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
return ins__scnprintf ( & dl - > ins , bf , size , & dl - > ops , max_ins_name ) ;
2012-05-08 01:54:16 +04:00
}
2021-11-12 06:51:24 +03:00
void annotation__init ( struct annotation * notes )
{
pthread_mutex_init ( & notes - > lock , NULL ) ;
}
void annotation__exit ( struct annotation * notes )
{
annotated_source__delete ( notes - > src ) ;
pthread_mutex_destroy ( & notes - > lock ) ;
}
2017-10-11 18:01:35 +03:00
static void annotation_line__add ( struct annotation_line * al , struct list_head * head )
2011-02-04 14:45:46 +03:00
{
2017-10-11 18:01:35 +03:00
list_add_tail ( & al - > node , head ) ;
2011-02-04 14:45:46 +03:00
}
2017-10-11 18:01:34 +03:00
struct annotation_line *
annotation_line__next ( struct annotation_line * pos , struct list_head * head )
2011-02-04 14:45:46 +03:00
{
2017-10-11 18:01:34 +03:00
list_for_each_entry_continue ( pos , head , node )
if ( pos - > offset > = 0 )
2011-02-04 14:45:46 +03:00
return pos ;
return NULL ;
}
perf annotate: Add branch stack / basic block
I wanted to know the hottest path through a function and figured the
branch-stack (LBR) information should be able to help out with that.
The below uses the branch-stack to create basic blocks and generate
statistics from them.
from to branch_i
* ----> *
|
| block
v
* ----> *
from to branch_i+1
The blocks are broken down into non-overlapping ranges, while tracking
if the start of each range is an entry point and/or the end of a range
is a branch.
Each block iterates all ranges it covers (while splitting where required
to exactly match the block) and increments the 'coverage' count.
For the range including the branch we increment the taken counter, as
well as the pred counter if flags.predicted.
Using these number we can find if an instruction:
- had coverage; given by:
br->coverage / br->sym->max_coverage
This metric ensures each symbol has a 100% spot, which reflects the
observation that each symbol must have a most covered/hottest
block.
- is a branch target: br->is_target && br->start == add
- for targets, how much of a branch's coverages comes from it:
target->entry / branch->coverage
- is a branch: br->is_branch && br->end == addr
- for branches, how often it was taken:
br->taken / br->coverage
after all, all execution that didn't take the branch would have
incremented the coverage and continued onward to a later branch.
- for branches, how often it was predicted:
br->pred / br->taken
The coverage percentage is used to color the address and asm sections;
for low (<1%) coverage we use NORMAL (uncolored), indicating that these
instructions are not 'important'. For high coverage (>75%) we color the
address RED.
For each branch, we add an asm comment after the instruction with
information on how often it was taken and predicted.
Output looks like (sans color, which does loose a lot of the
information :/)
$ perf record --branch-filter u,any -e cycles:p ./branches 27
$ perf annotate branches
Percent | Source code & Disassembly of branches for cycles:pu (217 samples)
---------------------------------------------------------------------------------
: branches():
0.00 : 40057a: push %rbp
0.00 : 40057b: mov %rsp,%rbp
0.00 : 40057e: sub $0x20,%rsp
0.00 : 400582: mov %rdi,-0x18(%rbp)
0.00 : 400586: mov %rsi,-0x20(%rbp)
0.00 : 40058a: mov -0x18(%rbp),%rax
0.00 : 40058e: mov %rax,-0x10(%rbp)
0.00 : 400592: movq $0x0,-0x8(%rbp)
0.00 : 40059a: jmpq 400656 <branches+0xdc>
1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00%
3.23 : 4005a3: and $0x1,%eax
1.84 : 4005a6: test %rax,%rax
0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%)
0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc>
12.90 : 4005b2: add $0x1,%rax
2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc>
0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%)
0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54%
13.82 : 4005c6: sub $0x1,%rax
0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc>
2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46%
0.46 : 4005d5: mov %rax,%rdi
0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00%
0.92 : 4005e1: mov -0x18(%rbp),%rax
0.00 : 4005e5: and $0x1,%eax
0.00 : 4005e8: test %rax,%rax
0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%)
0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc>
0.00 : 4005f4: shr $0x2,%rax
0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc>
0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00%
7.37 : 400603: and $0x1,%eax
3.69 : 400606: test %rax,%rax
0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%)
1.84 : 40060b: mov $0x1,%eax
14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%)
1.38 : 400612: mov $0x0,%eax # +57.65%
10.14 : 400617: test %al,%al # +42.35%
0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%)
0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc>
2.76 : 400622: sub $0x1,%rax
0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc>
0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%)
0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13%
2.30 : 400636: add $0x1,%rax
0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc>
0.92 : 400641: mov -0x10(%rbp),%rax # +43.87%
2.30 : 400645: mov %rax,%rdi
0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00%
1.84 : 400651: addq $0x1,-0x8(%rbp)
0.92 : 400656: mov -0x8(%rbp),%rax
5.07 : 40065a: cmp -0x20(%rbp),%rax
0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%)
0.00 : 400664: nop
0.00 : 400665: leaveq
0.00 : 400666: retq
(Note: the --branch-filter u,any was used to avoid spurious target and
branch points due to interrupts/faults, they show up as very small -/+
annotations on 'weird' locations)
Committer note:
Please take a look at:
http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png
To see the colors.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
[ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-05 22:08:12 +03:00
static const char * annotate__address_color ( struct block_range * br )
{
double cov = block_range__coverage ( br ) ;
if ( cov > = 0 ) {
/* mark red for >75% coverage */
if ( cov > 0.75 )
return PERF_COLOR_RED ;
/* mark dull for <1% coverage */
if ( cov < 0.01 )
return PERF_COLOR_NORMAL ;
}
return PERF_COLOR_MAGENTA ;
}
static const char * annotate__asm_color ( struct block_range * br )
{
double cov = block_range__coverage ( br ) ;
if ( cov > = 0 ) {
/* mark dull for <1% coverage */
if ( cov < 0.01 )
return PERF_COLOR_NORMAL ;
}
return PERF_COLOR_BLUE ;
}
static void annotate__branch_printf ( struct block_range * br , u64 addr )
{
bool emit_comment = true ;
if ( ! br )
return ;
# if 1
if ( br - > is_target & & br - > start = = addr ) {
struct block_range * branch = br ;
double p ;
/*
* Find matching branch to our target .
*/
while ( ! branch - > is_branch )
branch = block_range__next ( branch ) ;
p = 100 * ( double ) br - > entry / branch - > coverage ;
if ( p > 0.1 ) {
if ( emit_comment ) {
emit_comment = false ;
printf ( " \t # " ) ;
}
/*
* The percentage of coverage joined at this target in relation
* to the next branch .
*/
printf ( " +%.2f%% " , p ) ;
}
}
# endif
if ( br - > is_branch & & br - > end = = addr ) {
double p = 100 * ( double ) br - > taken / br - > coverage ;
if ( p > 0.1 ) {
if ( emit_comment ) {
emit_comment = false ;
printf ( " \t # " ) ;
}
/*
* The percentage of coverage leaving at this branch , and
* its prediction ratio .
*/
printf ( " -%.2f%% (p:%.2f%%) " , p , 100 * ( double ) br - > pred / br - > taken ) ;
}
}
}
perf annotate: Align source and offset lines
Align source with offset lines, which are more advanced, because of the
address column.
Before:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
After:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
It makes bigger different when displaying script sources, where the
comment lines looks oddly shifted from the lines which actually hold
code. I'll send script support separately.
Committer note:
Do not use a fixed column width for the addresses, as kernel ones se
more than 10 columns, look at the last offset and get the right width.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 18:01:58 +03:00
static int disasm_line__print ( struct disasm_line * dl , u64 start , int addr_fmt_width )
2011-02-04 14:45:46 +03:00
{
2017-10-11 18:01:47 +03:00
s64 offset = dl - > al . offset ;
const u64 addr = start + offset ;
struct block_range * br ;
br = block_range__find ( addr ) ;
perf annotate: Align source and offset lines
Align source with offset lines, which are more advanced, because of the
address column.
Before:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
After:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
It makes bigger different when displaying script sources, where the
comment lines looks oddly shifted from the lines which actually hold
code. I'll send script support separately.
Committer note:
Do not use a fixed column width for the addresses, as kernel ones se
more than 10 columns, look at the last offset and get the right width.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 18:01:58 +03:00
color_fprintf ( stdout , annotate__address_color ( br ) , " %* " PRIx64 " : " , addr_fmt_width , addr ) ;
2017-10-11 18:01:47 +03:00
color_fprintf ( stdout , annotate__asm_color ( br ) , " %s " , dl - > al . line ) ;
annotate__branch_printf ( br , addr ) ;
return 0 ;
}
static int
annotation_line__print ( struct annotation_line * al , struct symbol * sym , u64 start ,
2019-07-21 14:23:51 +03:00
struct evsel * evsel , u64 len , int min_pcnt , int printed ,
2018-08-04 16:05:13 +03:00
int max_lines , struct annotation_line * queue , int addr_fmt_width ,
int percent_type )
2017-10-11 18:01:47 +03:00
{
struct disasm_line * dl = container_of ( al , struct disasm_line , al ) ;
2011-02-04 14:45:46 +03:00
static const char * prev_line ;
2017-10-11 18:01:47 +03:00
if ( al - > offset ! = - 1 ) {
2017-10-11 18:01:42 +03:00
double max_percent = 0.0 ;
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
int i , nr_percent = 1 ;
2011-02-04 14:45:46 +03:00
const char * color ;
struct annotation * notes = symbol__annotation ( sym ) ;
2011-02-08 18:27:39 +03:00
2018-08-04 16:05:05 +03:00
for ( i = 0 ; i < al - > data_nr ; i + + ) {
2018-08-04 16:05:09 +03:00
double percent ;
percent = annotation_data__percent ( & al - > data [ i ] ,
2018-08-04 16:05:13 +03:00
percent_type ) ;
2011-02-04 14:45:46 +03:00
2018-08-04 16:05:09 +03:00
if ( percent > max_percent )
max_percent = percent ;
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
}
2018-08-04 16:05:05 +03:00
if ( al - > data_nr > nr_percent )
nr_percent = al - > data_nr ;
2018-05-09 18:57:15 +03:00
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
if ( max_percent < min_pcnt )
2011-02-06 19:54:44 +03:00
return - 1 ;
2011-02-08 20:01:39 +03:00
if ( max_lines & & printed > = max_lines )
2011-02-06 19:54:44 +03:00
return 1 ;
2011-02-05 20:37:31 +03:00
2011-02-08 20:29:25 +03:00
if ( queue ! = NULL ) {
2017-10-11 18:01:47 +03:00
list_for_each_entry_from ( queue , & notes - > src - > source , node ) {
if ( queue = = al )
2011-02-08 20:29:25 +03:00
break ;
2017-10-11 18:01:47 +03:00
annotation_line__print ( queue , sym , start , evsel , len ,
2018-08-04 16:05:13 +03:00
0 , 0 , 1 , NULL , addr_fmt_width ,
percent_type ) ;
2011-02-08 20:29:25 +03:00
}
}
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
color = get_percent_color ( max_percent ) ;
2011-02-04 14:45:46 +03:00
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
for ( i = 0 ; i < nr_percent ; i + + ) {
2018-08-04 16:05:06 +03:00
struct annotation_data * data = & al - > data [ i ] ;
2018-08-04 16:05:09 +03:00
double percent ;
2017-10-11 18:01:42 +03:00
2018-08-04 16:05:13 +03:00
percent = annotation_data__percent ( data , percent_type ) ;
2018-08-04 16:05:09 +03:00
color = get_percent_color ( percent ) ;
2015-06-19 22:10:43 +03:00
if ( symbol_conf . show_total_period )
2017-07-26 23:16:46 +03:00
color_fprintf ( stdout , color , " %11 " PRIu64 ,
2018-08-04 16:05:06 +03:00
data - > he . period ) ;
2017-08-18 11:46:48 +03:00
else if ( symbol_conf . show_nr_samples )
color_fprintf ( stdout , color , " %7 " PRIu64 ,
2018-08-04 16:05:06 +03:00
data - > he . nr_samples ) ;
2015-06-19 22:10:43 +03:00
else
2018-08-04 16:05:09 +03:00
color_fprintf ( stdout , color , " %7.2f " , percent ) ;
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
}
perf annotate: Align source and offset lines
Align source with offset lines, which are more advanced, because of the
address column.
Before:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
After:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
It makes bigger different when displaying script sources, where the
comment lines looks oddly shifted from the lines which actually hold
code. I'll send script support separately.
Committer note:
Do not use a fixed column width for the addresses, as kernel ones se
more than 10 columns, look at the last offset and get the right width.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 18:01:58 +03:00
printf ( " : " ) ;
perf annotate: Add branch stack / basic block
I wanted to know the hottest path through a function and figured the
branch-stack (LBR) information should be able to help out with that.
The below uses the branch-stack to create basic blocks and generate
statistics from them.
from to branch_i
* ----> *
|
| block
v
* ----> *
from to branch_i+1
The blocks are broken down into non-overlapping ranges, while tracking
if the start of each range is an entry point and/or the end of a range
is a branch.
Each block iterates all ranges it covers (while splitting where required
to exactly match the block) and increments the 'coverage' count.
For the range including the branch we increment the taken counter, as
well as the pred counter if flags.predicted.
Using these number we can find if an instruction:
- had coverage; given by:
br->coverage / br->sym->max_coverage
This metric ensures each symbol has a 100% spot, which reflects the
observation that each symbol must have a most covered/hottest
block.
- is a branch target: br->is_target && br->start == add
- for targets, how much of a branch's coverages comes from it:
target->entry / branch->coverage
- is a branch: br->is_branch && br->end == addr
- for branches, how often it was taken:
br->taken / br->coverage
after all, all execution that didn't take the branch would have
incremented the coverage and continued onward to a later branch.
- for branches, how often it was predicted:
br->pred / br->taken
The coverage percentage is used to color the address and asm sections;
for low (<1%) coverage we use NORMAL (uncolored), indicating that these
instructions are not 'important'. For high coverage (>75%) we color the
address RED.
For each branch, we add an asm comment after the instruction with
information on how often it was taken and predicted.
Output looks like (sans color, which does loose a lot of the
information :/)
$ perf record --branch-filter u,any -e cycles:p ./branches 27
$ perf annotate branches
Percent | Source code & Disassembly of branches for cycles:pu (217 samples)
---------------------------------------------------------------------------------
: branches():
0.00 : 40057a: push %rbp
0.00 : 40057b: mov %rsp,%rbp
0.00 : 40057e: sub $0x20,%rsp
0.00 : 400582: mov %rdi,-0x18(%rbp)
0.00 : 400586: mov %rsi,-0x20(%rbp)
0.00 : 40058a: mov -0x18(%rbp),%rax
0.00 : 40058e: mov %rax,-0x10(%rbp)
0.00 : 400592: movq $0x0,-0x8(%rbp)
0.00 : 40059a: jmpq 400656 <branches+0xdc>
1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00%
3.23 : 4005a3: and $0x1,%eax
1.84 : 4005a6: test %rax,%rax
0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%)
0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc>
12.90 : 4005b2: add $0x1,%rax
2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc>
0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%)
0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54%
13.82 : 4005c6: sub $0x1,%rax
0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc>
2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46%
0.46 : 4005d5: mov %rax,%rdi
0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00%
0.92 : 4005e1: mov -0x18(%rbp),%rax
0.00 : 4005e5: and $0x1,%eax
0.00 : 4005e8: test %rax,%rax
0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%)
0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc>
0.00 : 4005f4: shr $0x2,%rax
0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc>
0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00%
7.37 : 400603: and $0x1,%eax
3.69 : 400606: test %rax,%rax
0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%)
1.84 : 40060b: mov $0x1,%eax
14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%)
1.38 : 400612: mov $0x0,%eax # +57.65%
10.14 : 400617: test %al,%al # +42.35%
0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%)
0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc>
2.76 : 400622: sub $0x1,%rax
0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc>
0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%)
0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13%
2.30 : 400636: add $0x1,%rax
0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc>
0.92 : 400641: mov -0x10(%rbp),%rax # +43.87%
2.30 : 400645: mov %rax,%rdi
0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00%
1.84 : 400651: addq $0x1,-0x8(%rbp)
0.92 : 400656: mov -0x8(%rbp),%rax
5.07 : 40065a: cmp -0x20(%rbp),%rax
0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%)
0.00 : 400664: nop
0.00 : 400665: leaveq
0.00 : 400666: retq
(Note: the --branch-filter u,any was used to avoid spurious target and
branch points due to interrupts/faults, they show up as very small -/+
annotations on 'weird' locations)
Committer note:
Please take a look at:
http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png
To see the colors.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
[ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-05 22:08:12 +03:00
perf annotate: Align source and offset lines
Align source with offset lines, which are more advanced, because of the
address column.
Before:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
After:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
It makes bigger different when displaying script sources, where the
comment lines looks oddly shifted from the lines which actually hold
code. I'll send script support separately.
Committer note:
Do not use a fixed column width for the addresses, as kernel ones se
more than 10 columns, look at the last offset and get the right width.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 18:01:58 +03:00
disasm_line__print ( dl , start , addr_fmt_width ) ;
perf annotate: Add line number like in TUI and source location at EOL
The patch changes the output format in 2 ways:
- line number is displayed for all source lines (matching TUI mode)
- source locations for the hottest lines are printed
at the line end in order to preserve layout
Before:
0.00 : 405ef1: inc %r15
: tmpsd * (TD + tmpsd * TDD)));
0.01 : 405ef4: vfmadd213sd 0x2b9b3(%rip),%xmm0,%xmm3 # 4318b0 <_IO_stdin_used+0x8b0>
: tmpsd * (TC +
eff.c:1811 0.67 : 405efd: vfmadd213sd 0x2b9b2(%rip),%xmm0,%xmm3 # 4318b8 <_IO_stdin_used+0x8b8>
: TA + tmpsd * (TB +
0.35 : 405f06: vfmadd213sd 0x2b9b1(%rip),%xmm0,%xmm3 # 4318c0 <_IO_stdin_used+0x8c0>
: dumbo =
eff.c:1809 1.41 : 405f0f: vfmadd213sd 0x2b9b0(%rip),%xmm0,%xmm3 # 4318c8 <_IO_stdin_used+0x8c8>
: sumi -= sj * tmpsd * dij2i * dumbo;
eff.c:1813 2.58 : 405f18: vmulsd %xmm3,%xmm0,%xmm0
2.81 : 405f1c: vfnmadd213sd 0x30(%rsp),%xmm1,%xmm0
3.78 : 405f23: vmovsd %xmm0,0x30(%rsp)
: for (k = 0; k < lpears[i] + upears[i]; k++) {
eff.c:1761 0.90 : 405f29: cmp %r15d,%r12d
After:
0.00 : 405ef1: inc %r15
: 1812 tmpsd * (TD + tmpsd * TDD)));
0.01 : 405ef4: vfmadd213sd 0x2b9b3(%rip),%xmm0,%xmm3 # 4318b0 <_IO_stdin_used+0x8b0>
: 1811 tmpsd * (TC +
0.67 : 405efd: vfmadd213sd 0x2b9b2(%rip),%xmm0,%xmm3 # 4318b8 <_IO_stdin_used+0x8b8> // eff.c:1811
: 1810 TA + tmpsd * (TB +
0.35 : 405f06: vfmadd213sd 0x2b9b1(%rip),%xmm0,%xmm3 # 4318c0 <_IO_stdin_used+0x8c0>
: 1809 dumbo =
1.41 : 405f0f: vfmadd213sd 0x2b9b0(%rip),%xmm0,%xmm3 # 4318c8 <_IO_stdin_used+0x8c8> // eff.c:1809
: 1813 sumi -= sj * tmpsd * dij2i * dumbo;
2.58 : 405f18: vmulsd %xmm3,%xmm0,%xmm0 // eff.c:1813
2.81 : 405f1c: vfnmadd213sd 0x30(%rsp),%xmm1,%xmm0
3.78 : 405f23: vmovsd %xmm0,0x30(%rsp)
: 1761 for (k = 0; k < lpears[i] + upears[i]; k++) {
Where e.g. '// eff.c:1811' shares the same color as the percentantage
at the line beginning.
Signed-off-by: Martin Liška <mliska@suse.cz>
Link: http://lore.kernel.org/lkml/a0d53f31-f633-5013-c386-a4452391b081@suse.cz
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-21 15:46:36 +03:00
/*
* Also color the filename and line if needed , with
* the same color than the percentage . Don ' t print it
* twice for close colored addr with the same filename : line
*/
if ( al - > path ) {
if ( ! prev_line | | strcmp ( prev_line , al - > path ) ) {
color_fprintf ( stdout , color , " // %s " , al - > path ) ;
prev_line = al - > path ;
}
}
perf annotate: Add branch stack / basic block
I wanted to know the hottest path through a function and figured the
branch-stack (LBR) information should be able to help out with that.
The below uses the branch-stack to create basic blocks and generate
statistics from them.
from to branch_i
* ----> *
|
| block
v
* ----> *
from to branch_i+1
The blocks are broken down into non-overlapping ranges, while tracking
if the start of each range is an entry point and/or the end of a range
is a branch.
Each block iterates all ranges it covers (while splitting where required
to exactly match the block) and increments the 'coverage' count.
For the range including the branch we increment the taken counter, as
well as the pred counter if flags.predicted.
Using these number we can find if an instruction:
- had coverage; given by:
br->coverage / br->sym->max_coverage
This metric ensures each symbol has a 100% spot, which reflects the
observation that each symbol must have a most covered/hottest
block.
- is a branch target: br->is_target && br->start == add
- for targets, how much of a branch's coverages comes from it:
target->entry / branch->coverage
- is a branch: br->is_branch && br->end == addr
- for branches, how often it was taken:
br->taken / br->coverage
after all, all execution that didn't take the branch would have
incremented the coverage and continued onward to a later branch.
- for branches, how often it was predicted:
br->pred / br->taken
The coverage percentage is used to color the address and asm sections;
for low (<1%) coverage we use NORMAL (uncolored), indicating that these
instructions are not 'important'. For high coverage (>75%) we color the
address RED.
For each branch, we add an asm comment after the instruction with
information on how often it was taken and predicted.
Output looks like (sans color, which does loose a lot of the
information :/)
$ perf record --branch-filter u,any -e cycles:p ./branches 27
$ perf annotate branches
Percent | Source code & Disassembly of branches for cycles:pu (217 samples)
---------------------------------------------------------------------------------
: branches():
0.00 : 40057a: push %rbp
0.00 : 40057b: mov %rsp,%rbp
0.00 : 40057e: sub $0x20,%rsp
0.00 : 400582: mov %rdi,-0x18(%rbp)
0.00 : 400586: mov %rsi,-0x20(%rbp)
0.00 : 40058a: mov -0x18(%rbp),%rax
0.00 : 40058e: mov %rax,-0x10(%rbp)
0.00 : 400592: movq $0x0,-0x8(%rbp)
0.00 : 40059a: jmpq 400656 <branches+0xdc>
1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00%
3.23 : 4005a3: and $0x1,%eax
1.84 : 4005a6: test %rax,%rax
0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%)
0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc>
12.90 : 4005b2: add $0x1,%rax
2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc>
0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%)
0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54%
13.82 : 4005c6: sub $0x1,%rax
0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc>
2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46%
0.46 : 4005d5: mov %rax,%rdi
0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00%
0.92 : 4005e1: mov -0x18(%rbp),%rax
0.00 : 4005e5: and $0x1,%eax
0.00 : 4005e8: test %rax,%rax
0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%)
0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc>
0.00 : 4005f4: shr $0x2,%rax
0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc>
0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00%
7.37 : 400603: and $0x1,%eax
3.69 : 400606: test %rax,%rax
0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%)
1.84 : 40060b: mov $0x1,%eax
14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%)
1.38 : 400612: mov $0x0,%eax # +57.65%
10.14 : 400617: test %al,%al # +42.35%
0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%)
0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc>
2.76 : 400622: sub $0x1,%rax
0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc>
0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%)
0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13%
2.30 : 400636: add $0x1,%rax
0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc>
0.92 : 400641: mov -0x10(%rbp),%rax # +43.87%
2.30 : 400645: mov %rax,%rdi
0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%)
0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00%
1.84 : 400651: addq $0x1,-0x8(%rbp)
0.92 : 400656: mov -0x8(%rbp),%rax
5.07 : 40065a: cmp -0x20(%rbp),%rax
0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%)
0.00 : 400664: nop
0.00 : 400665: leaveq
0.00 : 400666: retq
(Note: the --branch-filter u,any was used to avoid spurious target and
branch points due to interrupts/faults, they show up as very small -/+
annotations on 'weird' locations)
Committer note:
Please take a look at:
http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png
To see the colors.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
[ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-05 22:08:12 +03:00
printf ( " \n " ) ;
2011-02-08 20:01:39 +03:00
} else if ( max_lines & & printed > = max_lines )
2011-02-06 19:54:44 +03:00
return 1 ;
else {
2017-07-26 23:16:46 +03:00
int width = symbol_conf . show_total_period ? 12 : 8 ;
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
2011-02-08 20:29:25 +03:00
if ( queue )
return - 1 ;
2020-04-30 16:51:16 +03:00
if ( evsel__is_group_event ( evsel ) )
2019-07-21 14:24:46 +03:00
width * = evsel - > core . nr_members ;
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
2017-10-11 18:01:47 +03:00
if ( ! * al - > line )
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
printf ( " %*s: \n " , width , " " ) ;
2011-02-04 14:45:46 +03:00
else
perf annotate: Add line number like in TUI and source location at EOL
The patch changes the output format in 2 ways:
- line number is displayed for all source lines (matching TUI mode)
- source locations for the hottest lines are printed
at the line end in order to preserve layout
Before:
0.00 : 405ef1: inc %r15
: tmpsd * (TD + tmpsd * TDD)));
0.01 : 405ef4: vfmadd213sd 0x2b9b3(%rip),%xmm0,%xmm3 # 4318b0 <_IO_stdin_used+0x8b0>
: tmpsd * (TC +
eff.c:1811 0.67 : 405efd: vfmadd213sd 0x2b9b2(%rip),%xmm0,%xmm3 # 4318b8 <_IO_stdin_used+0x8b8>
: TA + tmpsd * (TB +
0.35 : 405f06: vfmadd213sd 0x2b9b1(%rip),%xmm0,%xmm3 # 4318c0 <_IO_stdin_used+0x8c0>
: dumbo =
eff.c:1809 1.41 : 405f0f: vfmadd213sd 0x2b9b0(%rip),%xmm0,%xmm3 # 4318c8 <_IO_stdin_used+0x8c8>
: sumi -= sj * tmpsd * dij2i * dumbo;
eff.c:1813 2.58 : 405f18: vmulsd %xmm3,%xmm0,%xmm0
2.81 : 405f1c: vfnmadd213sd 0x30(%rsp),%xmm1,%xmm0
3.78 : 405f23: vmovsd %xmm0,0x30(%rsp)
: for (k = 0; k < lpears[i] + upears[i]; k++) {
eff.c:1761 0.90 : 405f29: cmp %r15d,%r12d
After:
0.00 : 405ef1: inc %r15
: 1812 tmpsd * (TD + tmpsd * TDD)));
0.01 : 405ef4: vfmadd213sd 0x2b9b3(%rip),%xmm0,%xmm3 # 4318b0 <_IO_stdin_used+0x8b0>
: 1811 tmpsd * (TC +
0.67 : 405efd: vfmadd213sd 0x2b9b2(%rip),%xmm0,%xmm3 # 4318b8 <_IO_stdin_used+0x8b8> // eff.c:1811
: 1810 TA + tmpsd * (TB +
0.35 : 405f06: vfmadd213sd 0x2b9b1(%rip),%xmm0,%xmm3 # 4318c0 <_IO_stdin_used+0x8c0>
: 1809 dumbo =
1.41 : 405f0f: vfmadd213sd 0x2b9b0(%rip),%xmm0,%xmm3 # 4318c8 <_IO_stdin_used+0x8c8> // eff.c:1809
: 1813 sumi -= sj * tmpsd * dij2i * dumbo;
2.58 : 405f18: vmulsd %xmm3,%xmm0,%xmm0 // eff.c:1813
2.81 : 405f1c: vfnmadd213sd 0x30(%rsp),%xmm1,%xmm0
3.78 : 405f23: vmovsd %xmm0,0x30(%rsp)
: 1761 for (k = 0; k < lpears[i] + upears[i]; k++) {
Where e.g. '// eff.c:1811' shares the same color as the percentantage
at the line beginning.
Signed-off-by: Martin Liška <mliska@suse.cz>
Link: http://lore.kernel.org/lkml/a0d53f31-f633-5013-c386-a4452391b081@suse.cz
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-02-21 15:46:36 +03:00
printf ( " %*s: %-*d %s \n " , width , " " , addr_fmt_width , al - > line_nr , al - > line ) ;
2011-02-04 14:45:46 +03:00
}
2011-02-06 19:54:44 +03:00
return 0 ;
2011-02-04 14:45:46 +03:00
}
2013-03-05 09:53:22 +04:00
/*
* symbol__parse_objdump_line ( ) parses objdump output ( with - d - - no - show - raw )
* which looks like following
*
* 0000000000415500 < _init > :
* 415500 : sub $ 0x8 , % rsp
* 415504 : mov 0x2f5ad5 ( % rip ) , % rax # 70 afe0 < _DYNAMIC + 0x2f8 >
* 41550 b : test % rax , % rax
* 41550 e : je 415515 < _init + 0x15 >
* 415510 : callq 416e70 < __gmon_start__ @ plt >
* 415515 : add $ 0x8 , % rsp
* 415519 : retq
*
* it will be parsed and saved into struct disasm_line as
* < offset > < name > < ops . raw >
*
* The offset will be a relative offset from the start of the symbol and - 1
* means that it ' s not a disassembly line so should be treated differently .
* The ops . raw part will be parsed further according to type of the instruction .
*/
2019-10-10 21:36:45 +03:00
static int symbol__parse_objdump_line ( struct symbol * sym ,
2017-10-11 18:01:29 +03:00
struct annotate_args * args ,
2021-02-15 14:34:46 +03:00
char * parsed_line , int * line_nr , char * * fileloc )
2011-02-04 14:45:46 +03:00
{
2018-03-20 22:19:08 +03:00
struct map * map = args - > ms . map ;
2011-02-08 18:27:39 +03:00
struct annotation * notes = symbol__annotation ( sym ) ;
2012-04-15 22:24:39 +04:00
struct disasm_line * dl ;
2019-10-10 21:36:48 +03:00
char * tmp ;
2011-02-04 14:45:46 +03:00
s64 line_ip , offset = - 1 ;
2014-11-13 05:05:26 +03:00
regmatch_t match [ 2 ] ;
2011-02-04 14:45:46 +03:00
2014-11-13 05:05:26 +03:00
/* /filename:linenr ? Save line number and ignore. */
2017-04-08 03:52:25 +03:00
if ( regexec ( & file_lineno , parsed_line , 2 , match , 0 ) = = 0 ) {
* line_nr = atoi ( parsed_line + match [ 1 ] . rm_so ) ;
2021-02-15 14:34:46 +03:00
* fileloc = strdup ( parsed_line ) ;
2014-11-13 05:05:26 +03:00
return 0 ;
}
2019-10-10 21:36:48 +03:00
/* Process hex address followed by ':'. */
line_ip = strtoull ( parsed_line , & tmp , 16 ) ;
if ( parsed_line ! = tmp & & tmp [ 0 ] = = ' : ' & & tmp [ 1 ] ! = ' \0 ' ) {
2011-02-04 14:45:46 +03:00
u64 start = map__rip_2objdump ( map , sym - > start ) ,
end = map__rip_2objdump ( map , sym - > end ) ;
offset = line_ip - start ;
2014-10-15 00:19:44 +04:00
if ( ( u64 ) line_ip < start | | ( u64 ) line_ip > = end )
2011-02-04 14:45:46 +03:00
offset = - 1 ;
2012-04-02 19:59:01 +04:00
else
2019-10-10 21:36:48 +03:00
parsed_line = tmp + 1 ;
2012-04-12 00:04:59 +04:00
}
2011-02-04 14:45:46 +03:00
2017-10-11 18:01:32 +03:00
args - > offset = offset ;
args - > line = parsed_line ;
args - > line_nr = * line_nr ;
2021-02-15 14:34:46 +03:00
args - > fileloc = * fileloc ;
2018-03-20 22:19:08 +03:00
args - > ms . sym = sym ;
2017-10-11 18:01:32 +03:00
dl = disasm_line__new ( args ) ;
2014-11-13 05:05:26 +03:00
( * line_nr ) + + ;
2012-04-02 19:59:01 +04:00
2012-04-15 22:24:39 +04:00
if ( dl = = NULL )
2011-02-04 14:45:46 +03:00
return - 1 ;
2012-04-02 19:59:01 +04:00
perf annotate: Add "_local" to jump/offset validation routines
Because they all really check if we can access data structures/visual
constructs where a "jump" instruction targets code in the same function,
i.e. things like:
__pthread_mutex_lock /usr/lib64/libpthread-2.26.so
1.95 │ mov __pthread_force_elision,%ecx
│ ┌──test %ecx,%ecx
0.07 │ ├──je 60
│ │ test $0x300,%esi
│ │↓ jne 60
│ │ or $0x100,%esi
│ │ mov %esi,0x10(%rdi)
│ 42:│ mov %esi,%edx
│ │ lea 0x16(%r8),%rsi
│ │ mov %r8,%rdi
│ │ and $0x80,%edx
│ │ add $0x8,%rsp
│ │→ jmpq __lll_lock_elision
│ │ nop
0.29 │ 60:└─→and $0x80,%esi
0.07 │ mov $0x1,%edi
0.29 │ xor %eax,%eax
2.53 │ lock cmpxchg %edi,(%r8)
And not things like that "jmpq __lll_lock_elision", that instead should behave
like a "call" instruction and "jump" to the disassembly of "___lll_lock_elision".
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-3cwx39u3h66dfw9xjrlt7ca2@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 16:12:33 +03:00
if ( ! disasm_line__has_local_offset ( dl ) ) {
2013-08-07 15:38:54 +04:00
dl - > ops . target . offset = dl - > ops . target . addr -
map__rip_2objdump ( map , sym - > start ) ;
perf annotate: Fix jump target outside of function address range
If jump target is outside of function range, perf is not handling it
correctly. Especially when target address is lesser than function start
address, target offset will be negative. But, target address declared to
be unsigned, converts negative number into 2's complement. See below
example. Here target of 'jumpq' instruction at 34cf8 is 34ac0 which is
lesser than function start address(34cf0).
34ac0 - 34cf0 = -0x230 = 0xfffffffffffffdd0
Objdump output:
0000000000034cf0 <__sigaction>:
__GI___sigaction():
34cf0: lea -0x20(%rdi),%eax
34cf3: cmp -bashx1,%eax
34cf6: jbe 34d00 <__sigaction+0x10>
34cf8: jmpq 34ac0 <__GI___libc_sigaction>
34cfd: nopl (%rax)
34d00: mov 0x386161(%rip),%rax # 3bae68 <_DYNAMIC+0x2e8>
34d07: movl -bashx16,%fs:(%rax)
34d0e: mov -bashxffffffff,%eax
34d13: retq
perf annotate before applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
v jmpq fffffffffffffdd0
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
perf annotate after applying patch:
__GI___sigaction /usr/lib64/libc-2.22.so
lea -0x20(%rdi),%eax
cmp -bashx1,%eax
v jbe 10
^ jmpq 34ac0 <__GI___libc_sigaction>
nop
10: mov _DYNAMIC+0x2e8,%rax
movl -bashx16,%fs:(%rax)
mov -bashxffffffff,%eax
retq
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Chris Riyder <chris.ryder@arm.com>
Cc: Kim Phillips <kim.phillips@arm.com>
Cc: Markus Trippelsdorf <markus@trippelsdorf.de>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1480953407-7605-3-git-send-email-ravi.bangoria@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 18:56:47 +03:00
dl - > ops . target . offset_avail = true ;
}
2013-08-07 15:38:54 +04:00
2018-03-02 17:59:36 +03:00
/* kcore has no symbols, so add the call target symbol */
if ( dl - > ins . ops & & ins__is_call ( & dl - > ins ) & & ! dl - > ops . target . sym ) {
2013-10-14 14:43:40 +04:00
struct addr_map_symbol target = {
. addr = dl - > ops . target . addr ,
2019-11-04 21:57:38 +03:00
. ms = { . map = map , } ,
2013-10-14 14:43:40 +04:00
} ;
2019-11-26 04:15:35 +03:00
if ( ! maps__find_ams ( args - > ms . maps , & target ) & &
2019-11-04 21:57:38 +03:00
target . ms . sym - > start = = target . al_addr )
dl - > ops . target . sym = target . ms . sym ;
2013-08-07 15:38:57 +04:00
}
2017-10-11 18:01:35 +03:00
annotation_line__add ( & dl - > al , & notes - > src - > source ) ;
2011-02-04 14:45:46 +03:00
return 0 ;
}
2014-11-13 05:05:26 +03:00
static __attribute__ ( ( constructor ) ) void symbol__init_regexpr ( void )
{
regcomp ( & file_lineno , " ^/[^:]+:([0-9]+) " , REG_EXTENDED ) ;
}
2013-08-07 15:38:56 +04:00
static void delete_last_nop ( struct symbol * sym )
{
struct annotation * notes = symbol__annotation ( sym ) ;
struct list_head * list = & notes - > src - > source ;
struct disasm_line * dl ;
while ( ! list_empty ( list ) ) {
2017-10-11 18:01:25 +03:00
dl = list_entry ( list - > prev , struct disasm_line , al . node ) ;
2013-08-07 15:38:56 +04:00
2016-11-24 17:16:06 +03:00
if ( dl - > ins . ops ) {
if ( dl - > ins . ops ! = & nop_ops )
2013-08-07 15:38:56 +04:00
return ;
} else {
2017-10-11 18:01:26 +03:00
if ( ! strstr ( dl - > al . line , " nop " ) & &
! strstr ( dl - > al . line , " nopl " ) & &
! strstr ( dl - > al . line , " nopw " ) )
2013-08-07 15:38:56 +04:00
return ;
}
2019-07-04 18:13:46 +03:00
list_del_init ( & dl - > al . node ) ;
2013-08-07 15:38:56 +04:00
disasm_line__free ( dl ) ;
}
}
2019-11-04 17:10:00 +03:00
int symbol__strerror_disassemble ( struct map_symbol * ms , int errnum , char * buf , size_t buflen )
2016-07-29 22:27:18 +03:00
{
2019-11-04 17:10:00 +03:00
struct dso * dso = ms - > map - > dso ;
2016-07-29 22:27:18 +03:00
BUG_ON ( buflen = = 0 ) ;
if ( errnum > = 0 ) {
str_error_r ( errnum , buf , buflen ) ;
return 0 ;
}
switch ( errnum ) {
case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX : {
char bf [ SBUILD_ID_SIZE + 15 ] = " with build id " ;
char * build_id_msg = NULL ;
if ( dso - > has_build_id ) {
2020-10-13 22:24:36 +03:00
build_id__sprintf ( & dso - > bid , bf + 15 ) ;
2016-07-29 22:27:18 +03:00
build_id_msg = bf ;
}
scnprintf ( buf , buflen ,
" No vmlinux file%s \n was found in the path. \n \n "
" Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability. \n \n "
" Please use: \n \n "
" perf buildid-cache -vu vmlinux \n \n "
" or: \n \n "
" --vmlinux vmlinux \n " , build_id_msg ? : " " ) ;
}
break ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF :
scnprintf ( buf , buflen , " Please link with binutils's libopcode to enable BPF annotation " ) ;
break ;
2019-09-30 21:48:12 +03:00
case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP :
scnprintf ( buf , buflen , " Problems with arch specific instruction name regular expressions. " ) ;
break ;
case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING :
scnprintf ( buf , buflen , " Problems while parsing the CPUID in the arch specific initialization. " ) ;
break ;
2019-09-30 22:04:21 +03:00
case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE :
scnprintf ( buf , buflen , " Invalid BPF file: %s. " , dso - > long_name ) ;
break ;
case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF :
scnprintf ( buf , buflen , " The %s BPF file has no BTF section, compile with -g or use pahole -J. " ,
dso - > long_name ) ;
break ;
2016-07-29 22:27:18 +03:00
default :
scnprintf ( buf , buflen , " Internal error: Invalid %d error code \n " , errnum ) ;
break ;
}
return 0 ;
}
2016-08-09 21:32:53 +03:00
static int dso__disassemble_filename ( struct dso * dso , char * filename , size_t filename_size )
2011-02-04 14:45:46 +03:00
{
2016-08-09 21:32:53 +03:00
char linkname [ PATH_MAX ] ;
char * build_id_filename ;
2017-03-27 10:10:36 +03:00
char * build_id_path = NULL ;
2017-06-08 10:31:01 +03:00
char * pos ;
2019-07-29 23:57:50 +03:00
int len ;
2011-02-04 14:45:46 +03:00
2016-08-09 20:56:13 +03:00
if ( dso - > symtab_type = = DSO_BINARY_TYPE__KALLSYMS & &
! dso__is_kcore ( dso ) )
2016-08-09 21:32:53 +03:00
return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX ;
2016-08-09 20:56:13 +03:00
2017-07-06 04:48:13 +03:00
build_id_filename = dso__build_id_filename ( dso , NULL , 0 , false ) ;
2016-08-09 21:32:53 +03:00
if ( build_id_filename ) {
__symbol__join_symfs ( filename , filename_size , build_id_filename ) ;
free ( build_id_filename ) ;
2016-08-09 21:16:37 +03:00
} else {
2016-07-29 22:27:18 +03:00
if ( dso - > has_build_id )
return ENOMEM ;
2011-02-04 14:45:46 +03:00
goto fallback ;
2016-08-09 21:16:37 +03:00
}
2017-03-27 10:10:36 +03:00
build_id_path = strdup ( filename ) ;
if ( ! build_id_path )
2019-09-30 21:53:33 +03:00
return ENOMEM ;
2017-03-27 10:10:36 +03:00
2017-06-08 10:31:01 +03:00
/*
* old style build - id cache has name of XX / XXXXXXX . . while
* new style has XX / XXXXXXX . . / { elf , kallsyms , vdso } .
* extract the build - id part of dirname in the new style only .
*/
pos = strrchr ( build_id_path , ' / ' ) ;
if ( pos & & strlen ( pos ) < SBUILD_ID_SIZE - 2 )
dirname ( build_id_path ) ;
2017-03-27 10:10:36 +03:00
2019-07-29 23:57:50 +03:00
if ( dso__is_kcore ( dso ) )
goto fallback ;
len = readlink ( build_id_path , linkname , sizeof ( linkname ) - 1 ) ;
if ( len < 0 )
goto fallback ;
linkname [ len ] = ' \0 ' ;
if ( strstr ( linkname , DSO__NAME_KALLSYMS ) | |
access ( filename , R_OK ) ) {
2011-02-04 14:45:46 +03:00
fallback :
/*
* If we don ' t have build - ids or the build - id file isn ' t in the
* cache , or is just a kallsyms file , well , lets hope that this
* DSO is the same as when ' perf record ' ran .
*/
2016-08-09 21:32:53 +03:00
__symbol__join_symfs ( filename , filename_size , dso - > long_name ) ;
2011-02-04 14:45:46 +03:00
}
2017-03-27 10:10:36 +03:00
free ( build_id_path ) ;
2016-08-09 21:32:53 +03:00
return 0 ;
}
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
# if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
# define PACKAGE "perf"
# include <bfd.h>
# include <dis-asm.h>
2020-10-20 21:48:23 +03:00
# include <bpf/bpf.h>
# include <bpf/btf.h>
# include <bpf/libbpf.h>
# include <linux/btf.h>
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
static int symbol__disassemble_bpf ( struct symbol * sym ,
struct annotate_args * args )
{
struct annotation * notes = symbol__annotation ( sym ) ;
struct annotation_options * opts = args - > options ;
struct bpf_prog_linfo * prog_linfo = NULL ;
struct bpf_prog_info_node * info_node ;
int len = sym - > end - sym - > start ;
disassembler_ftype disassemble ;
struct map * map = args - > ms . map ;
2021-10-11 11:20:30 +03:00
struct perf_bpil * info_linear ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
struct disassemble_info info ;
struct dso * dso = map - > dso ;
int pc = 0 , count , sub_id ;
struct btf * btf = NULL ;
char tpath [ PATH_MAX ] ;
size_t buf_size ;
int nr_skip = 0 ;
char * buf ;
bfd * bfdf ;
2019-09-30 22:04:21 +03:00
int ret ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
FILE * s ;
if ( dso - > binary_type ! = DSO_BINARY_TYPE__BPF_PROG_INFO )
2019-09-30 22:04:21 +03:00
return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
2019-04-03 22:44:52 +03:00
pr_debug ( " %s: handling sym %s addr % " PRIx64 " len % " PRIx64 " \n " , __func__ ,
sym - > name , sym - > start , sym - > end - sym - > start ) ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
memset ( tpath , 0 , sizeof ( tpath ) ) ;
perf_exe ( tpath , sizeof ( tpath ) ) ;
bfdf = bfd_openr ( tpath , NULL ) ;
assert ( bfdf ) ;
assert ( bfd_check_format ( bfdf , bfd_object ) ) ;
s = open_memstream ( & buf , & buf_size ) ;
2019-09-30 22:04:21 +03:00
if ( ! s ) {
ret = errno ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
goto out ;
2019-09-30 22:04:21 +03:00
}
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
init_disassemble_info ( & info , s ,
( fprintf_ftype ) fprintf ) ;
info . arch = bfd_get_arch ( bfdf ) ;
info . mach = bfd_get_mach ( bfdf ) ;
info_node = perf_env__find_bpf_prog_info ( dso - > bpf_prog . env ,
dso - > bpf_prog . id ) ;
2019-09-30 22:04:21 +03:00
if ( ! info_node ) {
2019-10-14 20:10:47 +03:00
ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
goto out ;
2019-09-30 22:04:21 +03:00
}
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
info_linear = info_node - > info_linear ;
sub_id = dso - > bpf_prog . sub_id ;
2019-04-03 22:44:52 +03:00
info . buffer = ( void * ) ( uintptr_t ) ( info_linear - > info . jited_prog_insns ) ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
info . buffer_length = info_linear - > info . jited_prog_len ;
if ( info_linear - > info . nr_line_info )
prog_linfo = bpf_prog_linfo__new ( & info_linear - > info ) ;
if ( info_linear - > info . btf_id ) {
struct btf_node * node ;
node = perf_env__find_btf ( dso - > bpf_prog . env ,
info_linear - > info . btf_id ) ;
if ( node )
btf = btf__new ( ( __u8 * ) ( node - > data ) ,
node - > data_size ) ;
}
disassemble_init_for_target ( & info ) ;
# ifdef DISASM_FOUR_ARGS_SIGNATURE
disassemble = disassembler ( info . arch ,
bfd_big_endian ( bfdf ) ,
info . mach ,
bfdf ) ;
# else
disassemble = disassembler ( bfdf ) ;
# endif
assert ( disassemble ) ;
fflush ( s ) ;
do {
const struct bpf_line_info * linfo = NULL ;
struct disasm_line * dl ;
size_t prev_buf_size ;
const char * srcline ;
u64 addr ;
2019-04-03 22:44:52 +03:00
addr = pc + ( ( u64 * ) ( uintptr_t ) ( info_linear - > info . jited_ksyms ) ) [ sub_id ] ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
count = disassemble ( pc , & info ) ;
if ( prog_linfo )
linfo = bpf_prog_linfo__lfind_addr_func ( prog_linfo ,
addr , sub_id ,
nr_skip ) ;
if ( linfo & & btf ) {
srcline = btf__name_by_offset ( btf , linfo - > line_off ) ;
nr_skip + + ;
} else
srcline = NULL ;
fprintf ( s , " \n " ) ;
prev_buf_size = buf_size ;
fflush ( s ) ;
if ( ! opts - > hide_src_code & & srcline ) {
args - > offset = - 1 ;
args - > line = strdup ( srcline ) ;
args - > line_nr = 0 ;
2021-02-15 14:34:46 +03:00
args - > fileloc = NULL ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
args - > ms . sym = sym ;
dl = disasm_line__new ( args ) ;
if ( dl ) {
annotation_line__add ( & dl - > al ,
& notes - > src - > source ) ;
}
}
args - > offset = pc ;
args - > line = buf + prev_buf_size ;
args - > line_nr = 0 ;
2021-02-15 14:34:46 +03:00
args - > fileloc = NULL ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
args - > ms . sym = sym ;
dl = disasm_line__new ( args ) ;
if ( dl )
annotation_line__add ( & dl - > al , & notes - > src - > source ) ;
pc + = count ;
} while ( count > 0 & & pc < len ) ;
ret = 0 ;
out :
free ( prog_linfo ) ;
2021-08-26 21:48:33 +03:00
btf__free ( btf ) ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
fclose ( s ) ;
bfd_close ( bfdf ) ;
return ret ;
}
# else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
static int symbol__disassemble_bpf ( struct symbol * sym __maybe_unused ,
struct annotate_args * args __maybe_unused )
{
return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF ;
}
# endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
2020-03-12 22:56:10 +03:00
static int
symbol__disassemble_bpf_image ( struct symbol * sym ,
struct annotate_args * args )
{
struct annotation * notes = symbol__annotation ( sym ) ;
struct disasm_line * dl ;
args - > offset = - 1 ;
args - > line = strdup ( " to be implemented " ) ;
args - > line_nr = 0 ;
2021-02-15 14:34:46 +03:00
args - > fileloc = NULL ;
2020-03-12 22:56:10 +03:00
dl = disasm_line__new ( args ) ;
if ( dl )
annotation_line__add ( & dl - > al , & notes - > src - > source ) ;
free ( args - > line ) ;
return 0 ;
}
2019-10-10 21:36:48 +03:00
/*
* Possibly create a new version of line with tabs expanded . Returns the
* existing or new line , storage is updated if a new line is allocated . If
* allocation fails then NULL is returned .
*/
static char * expand_tabs ( char * line , char * * storage , size_t * storage_len )
{
size_t i , src , dst , len , new_storage_len , num_tabs ;
char * new_line ;
size_t line_len = strlen ( line ) ;
for ( num_tabs = 0 , i = 0 ; i < line_len ; i + + )
if ( line [ i ] = = ' \t ' )
num_tabs + + ;
if ( num_tabs = = 0 )
return line ;
/*
* Space for the line and ' \0 ' , less the leading and trailing
* spaces . Each tab may introduce 7 additional spaces .
*/
new_storage_len = line_len + 1 + ( num_tabs * 7 ) ;
new_line = malloc ( new_storage_len ) ;
if ( new_line = = NULL ) {
pr_err ( " Failure allocating memory for tab expansion \n " ) ;
return NULL ;
}
/*
* Copy regions starting at src and expand tabs . If there are two
* adjacent tabs then ' src = = i ' , the memcpy is of size 0 and the spaces
* are inserted .
*/
for ( i = 0 , src = 0 , dst = 0 ; i < line_len & & num_tabs ; i + + ) {
if ( line [ i ] = = ' \t ' ) {
len = i - src ;
memcpy ( & new_line [ dst ] , & line [ src ] , len ) ;
dst + = len ;
new_line [ dst + + ] = ' ' ;
while ( dst % 8 ! = 0 )
new_line [ dst + + ] = ' ' ;
src = i + 1 ;
num_tabs - - ;
}
}
/* Expand the last region. */
2019-10-26 06:56:44 +03:00
len = line_len - src ;
2019-10-10 21:36:48 +03:00
memcpy ( & new_line [ dst ] , & line [ src ] , len ) ;
dst + = len ;
new_line [ dst ] = ' \0 ' ;
free ( * storage ) ;
* storage = new_line ;
* storage_len = new_storage_len ;
return new_line ;
}
2017-10-11 18:01:31 +03:00
static int symbol__disassemble ( struct symbol * sym , struct annotate_args * args )
2016-08-09 21:32:53 +03:00
{
2018-05-28 17:42:59 +03:00
struct annotation_options * opts = args - > options ;
2018-03-20 22:19:08 +03:00
struct map * map = args - > ms . map ;
2016-08-09 21:32:53 +03:00
struct dso * dso = map - > dso ;
perf annotate: Use asprintf when formatting objdump command line
We were using a local buffer with an arbitrary size, that would have to
get increased to avoid truncation as warned by gcc 8:
util/annotate.c: In function 'symbol__disassemble':
util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=]
"%s %s%s --start-address=0x%016" PRIx64
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
util/annotate.c:1498:20:
symfs_filename, symfs_filename);
~~~~~~~~~~~~~~
util/annotate.c:1490:50: note: format string is defined here
" -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
^~
In file included from /usr/include/stdio.h:861,
from util/color.h:5,
from util/sort.h:8,
from util/annotate.c:14:
/usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192
return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__bos (__s), __fmt, __va_arg_pack ());
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
So switch to asprintf, that will make sure enough space is available.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 16:34:11 +03:00
char * command ;
2016-08-09 21:32:53 +03:00
FILE * file ;
char symfs_filename [ PATH_MAX ] ;
struct kcore_extract kce ;
bool delete_extract = false ;
2018-08-17 12:48:02 +03:00
bool decomp = false ;
2016-08-09 21:32:53 +03:00
int lineno = 0 ;
2021-02-15 14:34:46 +03:00
char * fileloc = NULL ;
2016-08-09 21:32:53 +03:00
int nline ;
2019-10-10 21:36:45 +03:00
char * line ;
size_t line_len ;
2019-10-10 21:36:46 +03:00
const char * objdump_argv [ ] = {
" /bin/sh " ,
" -c " ,
NULL , /* Will be the objdump command to run. */
" -- " ,
NULL , /* Will be the symfs path. */
NULL ,
} ;
struct child_process objdump_process ;
2016-08-09 21:32:53 +03:00
int err = dso__disassemble_filename ( dso , symfs_filename , sizeof ( symfs_filename ) ) ;
if ( err )
return err ;
2011-02-04 14:45:46 +03:00
pr_debug ( " %s: filename=%s, sym=%s, start=%# " PRIx64 " , end=%# " PRIx64 " \n " , __func__ ,
2016-08-09 21:16:37 +03:00
symfs_filename , sym - > name , map - > unmap_ip ( map , sym - > start ) ,
2011-02-04 14:45:46 +03:00
map - > unmap_ip ( map , sym - > end ) ) ;
pr_debug ( " annotating [%p] %30s : [%p] %30s \n " ,
dso , dso - > long_name , sym , sym - > name ) ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
if ( dso - > binary_type = = DSO_BINARY_TYPE__BPF_PROG_INFO ) {
return symbol__disassemble_bpf ( sym , args ) ;
2020-03-12 22:56:10 +03:00
} else if ( dso - > binary_type = = DSO_BINARY_TYPE__BPF_IMAGE ) {
return symbol__disassemble_bpf_image ( sym , args ) ;
perf annotate: Enable annotation of BPF programs
In symbol__disassemble(), DSO_BINARY_TYPE__BPF_PROG_INFO dso calls into
a new function symbol__disassemble_bpf(), where annotation line
information is filled based on the bpf_prog_info and btf data saved in
given perf_env.
symbol__disassemble_bpf() uses binutils's libopcodes to disassemble bpf
programs.
Committer testing:
After fixing this:
- u64 *addrs = (u64 *)(info_linear->info.jited_ksyms);
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
Detected when crossbuilding to a 32-bit arch.
And making all this dependent on HAVE_LIBBFD_SUPPORT and
HAVE_LIBBPF_SUPPORT:
1) Have a BPF program running, one that has BTF info, etc, I used
the tools/perf/examples/bpf/augmented_raw_syscalls.c put in place
by 'perf trace'.
# grep -B1 augmented_raw ~/.perfconfig
[trace]
add_events = /home/acme/git/perf/tools/perf/examples/bpf/augmented_raw_syscalls.c
#
# perf trace -e *mmsg
dnf/6245 sendmmsg(20, 0x7f5485a88030, 2, MSG_NOSIGNAL) = 2
NetworkManager/10055 sendmmsg(22<socket:[1056822]>, 0x7f8126ad1bb0, 2, MSG_NOSIGNAL) = 2
2) Then do a 'perf record' system wide for a while:
# perf record -a
^C[ perf record: Woken up 68 times to write data ]
[ perf record: Captured and wrote 19.427 MB perf.data (366891 samples) ]
#
3) Check that we captured BPF and BTF info in the perf.data file:
# perf report --header-only | grep 'b[pt]f'
# event : name = cycles:ppp, , id = { 294789, 294790, 294791, 294792, 294793, 294794, 294795, 294796 }, size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, read_format = ID, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1, ksymbol = 1, bpf_event = 1
# bpf_prog_info of id 13
# bpf_prog_info of id 14
# bpf_prog_info of id 15
# bpf_prog_info of id 16
# bpf_prog_info of id 17
# bpf_prog_info of id 18
# bpf_prog_info of id 21
# bpf_prog_info of id 22
# bpf_prog_info of id 41
# bpf_prog_info of id 42
# btf info of id 2
#
4) Check which programs got recorded:
# perf report | grep bpf_prog | head
0.16% exe bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.14% exe bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.08% fuse-overlayfs bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.07% fuse-overlayfs bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.01% clang-4.0 bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
0.00% runc bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% clang bpf_prog_819967866022f1e1_sys_enter [k] bpf_prog_819967866022f1e1_sys_enter
0.00% sh bpf_prog_c1bd85c092d6e4aa_sys_exit [k] bpf_prog_c1bd85c092d6e4aa_sys_exit
#
This was with the default --sort order for 'perf report', which is:
--sort comm,dso,symbol
If we just look for the symbol, for instance:
# perf report --sort symbol | grep bpf_prog | head
0.26% [k] bpf_prog_819967866022f1e1_sys_enter - -
0.24% [k] bpf_prog_c1bd85c092d6e4aa_sys_exit - -
#
or the DSO:
# perf report --sort dso | grep bpf_prog | head
0.26% bpf_prog_819967866022f1e1_sys_enter
0.24% bpf_prog_c1bd85c092d6e4aa_sys_exit
#
We'll see the two BPF programs that augmented_raw_syscalls.o puts in
place, one attached to the raw_syscalls:sys_enter and another to the
raw_syscalls:sys_exit tracepoints, as expected.
Now we can finally do, from the command line, annotation for one of
those two symbols, with the original BPF program source coude intermixed
with the disassembled JITed code:
# perf annotate --stdio2 bpf_prog_819967866022f1e1_sys_enter
Samples: 950 of event 'cycles:ppp', 4000 Hz, Event count (approx.): 553756947, [percent: local period]
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Percent int sys_enter(struct syscall_enter_args *args)
53.41 push %rbp
0.63 mov %rsp,%rbp
0.31 sub $0x170,%rsp
1.93 sub $0x28,%rbp
7.02 mov %rbx,0x0(%rbp)
3.20 mov %r13,0x8(%rbp)
1.07 mov %r14,0x10(%rbp)
0.61 mov %r15,0x18(%rbp)
0.11 xor %eax,%eax
1.29 mov %rax,0x20(%rbp)
0.11 mov %rdi,%rbx
return bpf_get_current_pid_tgid();
2.02 → callq *ffffffffda6776d9
2.76 mov %eax,-0x148(%rbp)
mov %rbp,%rsi
int sys_enter(struct syscall_enter_args *args)
add $0xfffffffffffffeb8,%rsi
return bpf_map_lookup_elem(pids, &pid) != NULL;
movabs $0xffff975ac2607800,%rdi
1.26 → callq *ffffffffda6789e9
cmp $0x0,%rax
2.43 → je 0
add $0x38,%rax
0.21 xor %r13d,%r13d
if (pid_filter__has(&pids_filtered, getpid()))
0.81 cmp $0x0,%rax
→ jne 0
mov %rbp,%rdi
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
2.22 add $0xfffffffffffffeb8,%rdi
0.11 mov $0x40,%esi
0.32 mov %rbx,%rdx
2.74 → callq *ffffffffda658409
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
0.22 mov %rbp,%rsi
1.69 add $0xfffffffffffffec0,%rsi
syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
movabs $0xffff975bfcd36000,%rdi
add $0xd0,%rdi
0.21 mov 0x0(%rsi),%eax
0.93 cmp $0x200,%rax
→ jae 0
0.10 shl $0x3,%rax
0.11 add %rdi,%rax
0.11 → jmp 0
xor %eax,%eax
if (syscall == NULL || !syscall->enabled)
1.07 cmp $0x0,%rax
→ je 0
if (syscall == NULL || !syscall->enabled)
6.57 movzbq 0x0(%rax),%rdi
if (syscall == NULL || !syscall->enabled)
cmp $0x0,%rdi
0.95 → je 0
mov $0x40,%r8d
switch (augmented_args.args.syscall_nr) {
mov -0x140(%rbp),%rdi
switch (augmented_args.args.syscall_nr) {
cmp $0x2,%rdi
→ je 0
cmp $0x101,%rdi
→ je 0
cmp $0x15,%rdi
→ jne 0
case SYS_OPEN: filename_arg = (const void *)args->args[0];
mov 0x10(%rbx),%rdx
→ jmp 0
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
mov 0x18(%rbx),%rdx
if (filename_arg != NULL) {
cmp $0x0,%rdx
→ je 0
xor %edi,%edi
augmented_args.filename.reserved = 0;
mov %edi,-0x104(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rbp,%rdi
add $0xffffffffffffff00,%rdi
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov $0x100,%esi
→ callq *ffffffffda658499
mov $0x148,%r8d
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %eax,-0x108(%rbp)
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
mov %rax,%rdi
shl $0x20,%rdi
shr $0x20,%rdi
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
cmp $0xff,%rdi
→ ja 0
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
add $0x48,%rax
len &= sizeof(augmented_args.filename.value) - 1;
and $0xff,%rax
mov %rax,%r8
mov %rbp,%rcx
return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
add $0xfffffffffffffeb8,%rcx
mov %rbx,%rdi
movabs $0xffff975fbd72d800,%rsi
mov $0xffffffff,%edx
→ callq *ffffffffda658ad9
mov %rax,%r13
}
mov %r13,%rax
0.72 mov 0x0(%rbp),%rbx
mov 0x8(%rbp),%r13
1.16 mov 0x10(%rbp),%r14
0.10 mov 0x18(%rbp),%r15
0.42 add $0x28,%rbp
0.54 leaveq
0.54 ← retq
#
Please see 'man perf-config' to see how to control what should be seen,
via ~/.perfconfig [annotate] section, for instance, one can suppress the
source code and see just the disassembly, etc.
Alternatively, use the TUI bu just using 'perf annotate', press
'/bpf_prog' to see the bpf symbols, press enter and do the interactive
annotation, which allows for dumping to a file after selecting the
the various output tunables, for instance, the above without source code
intermixed, plus showing all the instruction offsets:
# perf annotate bpf_prog_819967866022f1e1_sys_enter
Then press: 's' to hide the source code + 'O' twice to show all
instruction offsets, then 'P' to print to the
bpf_prog_819967866022f1e1_sys_enter.annotation file, which will have:
# cat bpf_prog_819967866022f1e1_sys_enter.annotation
bpf_prog_819967866022f1e1_sys_enter() bpf_prog_819967866022f1e1_sys_enter
Event: cycles:ppp
53.41 0: push %rbp
0.63 1: mov %rsp,%rbp
0.31 4: sub $0x170,%rsp
1.93 b: sub $0x28,%rbp
7.02 f: mov %rbx,0x0(%rbp)
3.20 13: mov %r13,0x8(%rbp)
1.07 17: mov %r14,0x10(%rbp)
0.61 1b: mov %r15,0x18(%rbp)
0.11 1f: xor %eax,%eax
1.29 21: mov %rax,0x20(%rbp)
0.11 25: mov %rdi,%rbx
2.02 28: → callq *ffffffffda6776d9
2.76 2d: mov %eax,-0x148(%rbp)
33: mov %rbp,%rsi
36: add $0xfffffffffffffeb8,%rsi
3d: movabs $0xffff975ac2607800,%rdi
1.26 47: → callq *ffffffffda6789e9
4c: cmp $0x0,%rax
2.43 50: → je 0
52: add $0x38,%rax
0.21 56: xor %r13d,%r13d
0.81 59: cmp $0x0,%rax
5d: → jne 0
63: mov %rbp,%rdi
2.22 66: add $0xfffffffffffffeb8,%rdi
0.11 6d: mov $0x40,%esi
0.32 72: mov %rbx,%rdx
2.74 75: → callq *ffffffffda658409
0.22 7a: mov %rbp,%rsi
1.69 7d: add $0xfffffffffffffec0,%rsi
84: movabs $0xffff975bfcd36000,%rdi
8e: add $0xd0,%rdi
0.21 95: mov 0x0(%rsi),%eax
0.93 98: cmp $0x200,%rax
9f: → jae 0
0.10 a1: shl $0x3,%rax
0.11 a5: add %rdi,%rax
0.11 a8: → jmp 0
aa: xor %eax,%eax
1.07 ac: cmp $0x0,%rax
b0: → je 0
6.57 b6: movzbq 0x0(%rax),%rdi
bb: cmp $0x0,%rdi
0.95 bf: → je 0
c5: mov $0x40,%r8d
cb: mov -0x140(%rbp),%rdi
d2: cmp $0x2,%rdi
d6: → je 0
d8: cmp $0x101,%rdi
df: → je 0
e1: cmp $0x15,%rdi
e5: → jne 0
e7: mov 0x10(%rbx),%rdx
eb: → jmp 0
ed: mov 0x18(%rbx),%rdx
f1: cmp $0x0,%rdx
f5: → je 0
f7: xor %edi,%edi
f9: mov %edi,-0x104(%rbp)
ff: mov %rbp,%rdi
102: add $0xffffffffffffff00,%rdi
109: mov $0x100,%esi
10e: → callq *ffffffffda658499
113: mov $0x148,%r8d
119: mov %eax,-0x108(%rbp)
11f: mov %rax,%rdi
122: shl $0x20,%rdi
126: shr $0x20,%rdi
12a: cmp $0xff,%rdi
131: → ja 0
133: add $0x48,%rax
137: and $0xff,%rax
13d: mov %rax,%r8
140: mov %rbp,%rcx
143: add $0xfffffffffffffeb8,%rcx
14a: mov %rbx,%rdi
14d: movabs $0xffff975fbd72d800,%rsi
157: mov $0xffffffff,%edx
15c: → callq *ffffffffda658ad9
161: mov %rax,%r13
164: mov %r13,%rax
0.72 167: mov 0x0(%rbp),%rbx
16b: mov 0x8(%rbp),%r13
1.16 16f: mov 0x10(%rbp),%r14
0.10 173: mov 0x18(%rbp),%r15
0.42 177: add $0x28,%rbp
0.54 17b: leaveq
0.54 17c: ← retq
Another cool way to test all this is to symple use 'perf top' look for
those symbols, go there and press enter, annotate it live :-)
Signed-off-by: Song Liu <songliubraving@fb.com>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislav Fomichev <sdf@google.com>
Link: http://lkml.kernel.org/r/20190312053051.2690567-13-songliubraving@fb.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-12 08:30:48 +03:00
} else if ( dso__is_kcore ( dso ) ) {
2013-10-09 16:01:12 +04:00
kce . kcore_filename = symfs_filename ;
kce . addr = map__rip_2objdump ( map , sym - > start ) ;
kce . offs = sym - > start ;
2014-10-15 00:19:44 +04:00
kce . len = sym - > end - sym - > start ;
2013-10-09 16:01:12 +04:00
if ( ! kcore_extract__create ( & kce ) ) {
delete_extract = true ;
strlcpy ( symfs_filename , kce . extract_filename ,
sizeof ( symfs_filename ) ) ;
}
2015-03-02 20:56:12 +03:00
} else if ( dso__needs_decompress ( dso ) ) {
2017-06-08 10:31:04 +03:00
char tmp [ KMOD_DECOMP_LEN ] ;
2015-03-02 20:56:12 +03:00
2017-06-08 10:31:04 +03:00
if ( dso__decompress_kmodule_path ( dso , symfs_filename ,
tmp , sizeof ( tmp ) ) < 0 )
2019-10-10 21:36:46 +03:00
return - 1 ;
2015-03-02 20:56:12 +03:00
2018-08-17 12:48:02 +03:00
decomp = true ;
2015-03-02 20:56:12 +03:00
strcpy ( symfs_filename , tmp ) ;
2013-10-09 16:01:12 +04:00
}
perf annotate: Use asprintf when formatting objdump command line
We were using a local buffer with an arbitrary size, that would have to
get increased to avoid truncation as warned by gcc 8:
util/annotate.c: In function 'symbol__disassemble':
util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=]
"%s %s%s --start-address=0x%016" PRIx64
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
util/annotate.c:1498:20:
symfs_filename, symfs_filename);
~~~~~~~~~~~~~~
util/annotate.c:1490:50: note: format string is defined here
" -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
^~
In file included from /usr/include/stdio.h:861,
from util/color.h:5,
from util/sort.h:8,
from util/annotate.c:14:
/usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192
return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__bos (__s), __fmt, __va_arg_pack ());
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
So switch to asprintf, that will make sure enough space is available.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 16:34:11 +03:00
err = asprintf ( & command ,
2012-09-04 14:32:30 +04:00
" %s %s%s --start-address=0x%016 " PRIx64
2011-05-17 19:32:07 +04:00
" --stop-address=0x%016 " PRIx64
2020-01-08 00:04:44 +03:00
" -l -d %s %s %s %c%s%c %s%s -C \" $1 \" " ,
2018-05-28 20:24:45 +03:00
opts - > objdump_path ? : " objdump " ,
2018-05-28 17:50:21 +03:00
opts - > disassembler_style ? " -M " : " " ,
opts - > disassembler_style ? : " " ,
2011-02-04 14:45:46 +03:00
map__rip_2objdump ( map , sym - > start ) ,
2014-10-15 00:19:44 +04:00
map__rip_2objdump ( map , sym - > end ) ,
2019-10-10 21:36:49 +03:00
opts - > show_asm_raw ? " " : " --no-show-raw-insn " ,
2020-01-08 00:04:44 +03:00
opts - > annotate_src ? " -S " : " " ,
opts - > prefix ? " --prefix " : " " ,
opts - > prefix ? ' " ' : ' ' ,
opts - > prefix ? : " " ,
opts - > prefix ? ' " ' : ' ' ,
opts - > prefix_strip ? " --prefix-strip= " : " " ,
opts - > prefix_strip ? : " " ) ;
2011-02-04 14:45:46 +03:00
perf annotate: Use asprintf when formatting objdump command line
We were using a local buffer with an arbitrary size, that would have to
get increased to avoid truncation as warned by gcc 8:
util/annotate.c: In function 'symbol__disassemble':
util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=]
"%s %s%s --start-address=0x%016" PRIx64
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
util/annotate.c:1498:20:
symfs_filename, symfs_filename);
~~~~~~~~~~~~~~
util/annotate.c:1490:50: note: format string is defined here
" -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
^~
In file included from /usr/include/stdio.h:861,
from util/color.h:5,
from util/sort.h:8,
from util/annotate.c:14:
/usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192
return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__bos (__s), __fmt, __va_arg_pack ());
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
So switch to asprintf, that will make sure enough space is available.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 16:34:11 +03:00
if ( err < 0 ) {
pr_err ( " Failure allocating memory for the command to run \n " ) ;
goto out_remove_tmp ;
}
2011-02-04 14:45:46 +03:00
pr_debug ( " Executing: %s \n " , command ) ;
2019-10-10 21:36:46 +03:00
objdump_argv [ 2 ] = command ;
objdump_argv [ 4 ] = symfs_filename ;
2016-06-15 21:48:08 +03:00
2019-10-10 21:36:46 +03:00
/* Create a pipe to read from for stdout */
memset ( & objdump_process , 0 , sizeof ( objdump_process ) ) ;
objdump_process . argv = objdump_argv ;
objdump_process . out = - 1 ;
if ( start_command ( & objdump_process ) ) {
pr_err ( " Failure starting to run %s \n " , command ) ;
err = - 1 ;
goto out_free_command ;
2016-06-15 21:48:08 +03:00
}
2019-10-10 21:36:46 +03:00
file = fdopen ( objdump_process . out , " r " ) ;
2015-11-06 06:06:07 +03:00
if ( ! file ) {
2016-06-15 21:48:08 +03:00
pr_err ( " Failure creating FILE stream for %s \n " , command ) ;
2015-11-06 06:06:07 +03:00
/*
* If we were using debug info should retry with
* original binary .
*/
2019-10-10 21:36:46 +03:00
err = - 1 ;
goto out_close_stdout ;
2015-11-06 06:06:07 +03:00
}
2011-02-04 14:45:46 +03:00
2019-10-10 21:36:45 +03:00
/* Storage for getline. */
line = NULL ;
line_len = 0 ;
2015-11-06 06:06:07 +03:00
nline = 0 ;
while ( ! feof ( file ) ) {
2019-10-10 21:36:47 +03:00
const char * match ;
2019-10-10 21:36:48 +03:00
char * expanded_line ;
2019-10-10 21:36:47 +03:00
2019-10-10 21:36:45 +03:00
if ( getline ( & line , & line_len , file ) < 0 | | ! line )
break ;
2019-10-10 21:36:47 +03:00
/* Skip lines containing "filename:" */
match = strstr ( line , symfs_filename ) ;
if ( match & & match [ strlen ( symfs_filename ) ] = = ' : ' )
continue ;
2019-10-10 21:36:48 +03:00
expanded_line = strim ( line ) ;
expanded_line = expand_tabs ( expanded_line , & line , & line_len ) ;
if ( ! expanded_line )
break ;
2017-03-21 22:00:50 +03:00
/*
* The source code line number ( lineno ) needs to be kept in
2018-12-03 13:22:00 +03:00
* across calls to symbol__parse_objdump_line ( ) , so that it
2017-03-21 22:00:50 +03:00
* can associate it with the instructions till the next one .
* See disasm_line__new ( ) and struct disasm_line : : line_nr .
*/
2019-10-10 21:36:48 +03:00
if ( symbol__parse_objdump_line ( sym , args , expanded_line ,
2021-02-15 14:34:46 +03:00
& lineno , & fileloc ) < 0 )
2011-02-04 14:45:46 +03:00
break ;
2015-11-06 06:06:07 +03:00
nline + + ;
}
2019-10-10 21:36:45 +03:00
free ( line ) ;
2015-11-06 06:06:07 +03:00
2019-10-10 21:36:46 +03:00
err = finish_command ( & objdump_process ) ;
if ( err )
pr_err ( " Error running %s \n " , command ) ;
if ( nline = = 0 ) {
err = - 1 ;
2015-11-06 06:06:07 +03:00
pr_err ( " No output from %s \n " , command ) ;
2019-10-10 21:36:46 +03:00
}
2011-02-04 14:45:46 +03:00
2013-08-07 15:38:56 +04:00
/*
* kallsyms does not have symbol sizes so there may a nop at the end .
* Remove it .
*/
if ( dso__is_kcore ( dso ) )
delete_last_nop ( sym ) ;
2016-06-15 21:48:08 +03:00
fclose ( file ) ;
2019-10-10 21:36:46 +03:00
out_close_stdout :
close ( objdump_process . out ) ;
perf annotate: Use asprintf when formatting objdump command line
We were using a local buffer with an arbitrary size, that would have to
get increased to avoid truncation as warned by gcc 8:
util/annotate.c: In function 'symbol__disassemble':
util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=]
"%s %s%s --start-address=0x%016" PRIx64
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
util/annotate.c:1498:20:
symfs_filename, symfs_filename);
~~~~~~~~~~~~~~
util/annotate.c:1490:50: note: format string is defined here
" -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
^~
In file included from /usr/include/stdio.h:861,
from util/color.h:5,
from util/sort.h:8,
from util/annotate.c:14:
/usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192
return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__bos (__s), __fmt, __va_arg_pack ());
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
So switch to asprintf, that will make sure enough space is available.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 16:34:11 +03:00
out_free_command :
free ( command ) ;
2016-06-15 21:48:08 +03:00
2019-10-10 21:36:46 +03:00
out_remove_tmp :
2018-08-17 12:48:02 +03:00
if ( decomp )
2015-03-02 20:56:12 +03:00
unlink ( symfs_filename ) ;
2016-08-09 21:16:37 +03:00
2013-10-09 16:01:12 +04:00
if ( delete_extract )
kcore_extract__delete ( & kce ) ;
2016-06-15 21:48:08 +03:00
2019-10-10 21:36:46 +03:00
return err ;
2011-02-04 14:45:46 +03:00
}
2018-08-04 16:05:07 +03:00
static void calc_percent ( struct sym_hist * sym_hist ,
2018-08-04 16:05:10 +03:00
struct hists * hists ,
2018-08-04 16:05:06 +03:00
struct annotation_data * data ,
2017-10-11 18:01:40 +03:00
s64 offset , s64 end )
{
unsigned int hits = 0 ;
u64 period = 0 ;
while ( offset < end ) {
2018-08-04 16:05:07 +03:00
hits + = sym_hist - > addr [ offset ] . nr_samples ;
period + = sym_hist - > addr [ offset ] . period ;
2017-10-11 18:01:40 +03:00
+ + offset ;
}
2018-08-04 16:05:07 +03:00
if ( sym_hist - > nr_samples ) {
2018-08-04 16:05:06 +03:00
data - > he . period = period ;
data - > he . nr_samples = hits ;
2018-08-04 16:05:09 +03:00
data - > percent [ PERCENT_HITS_LOCAL ] = 100.0 * hits / sym_hist - > nr_samples ;
2017-10-11 18:01:40 +03:00
}
2018-08-04 16:05:10 +03:00
if ( hists - > stats . nr_non_filtered_samples )
data - > percent [ PERCENT_HITS_GLOBAL ] = 100.0 * hits / hists - > stats . nr_non_filtered_samples ;
2018-08-04 16:05:11 +03:00
if ( sym_hist - > period )
data - > percent [ PERCENT_PERIOD_LOCAL ] = 100.0 * period / sym_hist - > period ;
2018-08-04 16:05:12 +03:00
if ( hists - > stats . total_period )
data - > percent [ PERCENT_PERIOD_GLOBAL ] = 100.0 * period / hists - > stats . total_period ;
2017-10-11 18:01:40 +03:00
}
2017-11-15 14:05:59 +03:00
static void annotation__calc_percent ( struct annotation * notes ,
2019-07-21 14:23:51 +03:00
struct evsel * leader , s64 len )
2017-10-11 18:01:40 +03:00
{
struct annotation_line * al , * next ;
2019-07-21 14:23:51 +03:00
struct evsel * evsel ;
2017-10-11 18:01:40 +03:00
list_for_each_entry ( al , & notes - > src - > source , node ) {
s64 end ;
2018-08-04 16:05:08 +03:00
int i = 0 ;
2017-10-11 18:01:40 +03:00
if ( al - > offset = = - 1 )
continue ;
next = annotation_line__next ( al , & notes - > src - > source ) ;
end = next ? next - > offset : len ;
2018-08-04 16:05:08 +03:00
for_each_group_evsel ( evsel , leader ) {
2018-08-04 16:05:10 +03:00
struct hists * hists = evsel__hists ( evsel ) ;
2018-08-04 16:05:06 +03:00
struct annotation_data * data ;
2018-08-04 16:05:07 +03:00
struct sym_hist * sym_hist ;
2017-10-11 18:01:40 +03:00
2018-08-04 16:05:08 +03:00
BUG_ON ( i > = al - > data_nr ) ;
2021-07-06 18:16:59 +03:00
sym_hist = annotation__histogram ( notes , evsel - > core . idx ) ;
2018-08-04 16:05:08 +03:00
data = & al - > data [ i + + ] ;
2017-10-11 18:01:40 +03:00
2018-08-04 16:05:10 +03:00
calc_percent ( sym_hist , hists , data , al - > offset , end ) ;
2017-10-11 18:01:40 +03:00
}
}
}
2019-07-21 14:23:51 +03:00
void symbol__calc_percent ( struct symbol * sym , struct evsel * evsel )
2017-10-11 18:01:40 +03:00
{
struct annotation * notes = symbol__annotation ( sym ) ;
2017-11-15 14:05:59 +03:00
annotation__calc_percent ( notes , evsel , symbol__size ( sym ) ) ;
2017-10-11 18:01:40 +03:00
}
2020-02-04 07:52:28 +03:00
int symbol__annotate ( struct map_symbol * ms , struct evsel * evsel ,
2019-11-04 17:10:00 +03:00
struct annotation_options * options , struct arch * * parch )
2017-10-11 18:01:28 +03:00
{
2019-11-04 17:10:00 +03:00
struct symbol * sym = ms - > sym ;
perf annotate: Fix getting source line failure
The output of "perf annotate -l --stdio xxx" changed since commit 425859ff0de33
("perf annotate: No need to calculate notes->start twice") removed notes->start
assignment in symbol__calc_lines(). It will get failed in
find_address_in_section() from symbol__tty_annotate() subroutine as the
a2l->addr is wrong. So the annotate summary doesn't report the line number of
source code correctly.
Before fix:
liwei@euler:~/main_code/hulk_work/hulk/tools/perf$ cat common_while_1.c
void hotspot_1(void)
{
volatile int i;
for (i = 0; i < 0x10000000; i++);
for (i = 0; i < 0x10000000; i++);
for (i = 0; i < 0x10000000; i++);
}
int main(void)
{
hotspot_1();
return 0;
}
liwei@euler:~/main_code/hulk_work/hulk/tools/perf$ gcc common_while_1.c -g -o common_while_1
liwei@euler:~/main_code/hulk_work/hulk/tools/perf$ sudo ./perf record ./common_while_1
[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.488 MB perf.data (12498 samples) ]
liwei@euler:~/main_code/hulk_work/hulk/tools/perf$ sudo ./perf annotate -l -s hotspot_1 --stdio
Sorted summary for file /home/liwei/main_code/hulk_work/hulk/tools/perf/common_while_1
----------------------------------------------
19.30 common_while_1[32]
19.03 common_while_1[4e]
19.01 common_while_1[16]
5.04 common_while_1[13]
4.99 common_while_1[4b]
4.78 common_while_1[2c]
4.77 common_while_1[10]
4.66 common_while_1[2f]
4.59 common_while_1[51]
4.59 common_while_1[35]
4.52 common_while_1[19]
4.20 common_while_1[56]
0.51 common_while_1[48]
Percent | Source code & Disassembly of common_while_1 for cycles:ppp (12480 samples, percent: local period)
-----------------------------------------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 00000000000005fa <hotspot_1>:
: hotspot_1():
: void hotspot_1(void)
: {
0.00 : 5fa: push %rbp
0.00 : 5fb: mov %rsp,%rbp
: volatile int i;
:
: for (i = 0; i < 0x10000000; i++);
0.00 : 5fe: movl $0x0,-0x4(%rbp)
0.00 : 605: jmp 610 <hotspot_1+0x16>
0.00 : 607: mov -0x4(%rbp),%eax
common_while_1[10] 4.77 : 60a: add $0x1,%eax
common_while_1[13] 5.04 : 60d: mov %eax,-0x4(%rbp)
common_while_1[16] 19.01 : 610: mov -0x4(%rbp),%eax
common_while_1[19] 4.52 : 613: cmp $0xfffffff,%eax
0.00 : 618: jle 607 <hotspot_1+0xd>
: for (i = 0; i < 0x10000000; i++);
...
After fix:
liwei@euler:~/main_code/hulk_work/hulk/tools/perf$ sudo ./perf record ./common_while_1
[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.488 MB perf.data (12500 samples) ]
liwei@euler:~/main_code/hulk_work/hulk/tools/perf$ sudo ./perf annotate -l -s hotspot_1 --stdio
Sorted summary for file /home/liwei/main_code/hulk_work/hulk/tools/perf/common_while_1
----------------------------------------------
33.34 common_while_1.c:5
33.34 common_while_1.c:6
33.32 common_while_1.c:7
Percent | Source code & Disassembly of common_while_1 for cycles:ppp (12482 samples, percent: local period)
-----------------------------------------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 00000000000005fa <hotspot_1>:
: hotspot_1():
: void hotspot_1(void)
: {
0.00 : 5fa: push %rbp
0.00 : 5fb: mov %rsp,%rbp
: volatile int i;
:
: for (i = 0; i < 0x10000000; i++);
0.00 : 5fe: movl $0x0,-0x4(%rbp)
0.00 : 605: jmp 610 <hotspot_1+0x16>
0.00 : 607: mov -0x4(%rbp),%eax
common_while_1.c:5 4.70 : 60a: add $0x1,%eax
4.89 : 60d: mov %eax,-0x4(%rbp)
common_while_1.c:5 19.03 : 610: mov -0x4(%rbp),%eax
common_while_1.c:5 4.72 : 613: cmp $0xfffffff,%eax
0.00 : 618: jle 607 <hotspot_1+0xd>
: for (i = 0; i < 0x10000000; i++);
0.00 : 61a: movl $0x0,-0x4(%rbp)
0.00 : 621: jmp 62c <hotspot_1+0x32>
0.00 : 623: mov -0x4(%rbp),%eax
common_while_1.c:6 4.54 : 626: add $0x1,%eax
4.73 : 629: mov %eax,-0x4(%rbp)
common_while_1.c:6 19.54 : 62c: mov -0x4(%rbp),%eax
common_while_1.c:6 4.54 : 62f: cmp $0xfffffff,%eax
...
Signed-off-by: Wei Li <liwei391@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Fixes: 425859ff0de33 ("perf annotate: No need to calculate notes->start twice")
Link: http://lkml.kernel.org/r/20190221095716.39529-1-liwei391@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-02-21 12:57:16 +03:00
struct annotation * notes = symbol__annotation ( sym ) ;
2017-10-11 18:01:29 +03:00
struct annotate_args args = {
2017-10-11 18:01:33 +03:00
. evsel = evsel ,
2018-05-28 17:27:40 +03:00
. options = options ,
2017-10-11 18:01:29 +03:00
} ;
2020-05-04 19:44:03 +03:00
struct perf_env * env = evsel__env ( evsel ) ;
2017-12-11 18:52:17 +03:00
const char * arch_name = perf_env__arch ( env ) ;
2017-10-11 18:01:28 +03:00
struct arch * arch ;
int err ;
if ( ! arch_name )
2019-09-30 21:06:01 +03:00
return errno ;
2017-10-11 18:01:28 +03:00
2017-10-11 18:01:30 +03:00
args . arch = arch = arch__find ( arch_name ) ;
2021-07-26 15:38:54 +03:00
if ( arch = = NULL ) {
pr_err ( " %s: unsupported arch %s \n " , __func__ , arch_name ) ;
2019-09-30 21:11:47 +03:00
return ENOTSUP ;
2021-07-26 15:38:54 +03:00
}
2017-10-11 18:01:28 +03:00
if ( parch )
* parch = arch ;
if ( arch - > init ) {
2017-12-11 18:46:11 +03:00
err = arch - > init ( arch , env ? env - > cpuid : NULL ) ;
2017-10-11 18:01:28 +03:00
if ( err ) {
pr_err ( " %s: failed to initialize %s arch priv area \n " , __func__ , arch - > name ) ;
return err ;
}
}
2019-11-04 17:10:00 +03:00
args . ms = * ms ;
notes - > start = map__rip_2objdump ( ms - > map , sym - > start ) ;
2018-03-20 22:19:08 +03:00
2017-11-15 14:20:08 +03:00
return symbol__disassemble ( sym , & args ) ;
2017-10-11 18:01:28 +03:00
}
2018-08-04 16:05:14 +03:00
static void insert_source_line ( struct rb_root * root , struct annotation_line * al ,
struct annotation_options * opts )
2011-02-04 14:45:46 +03:00
{
2017-10-11 18:01:41 +03:00
struct annotation_line * iter ;
2011-02-04 14:45:46 +03:00
struct rb_node * * p = & root - > rb_node ;
struct rb_node * parent = NULL ;
2013-03-05 09:53:28 +04:00
int i , ret ;
2011-02-04 14:45:46 +03:00
while ( * p ! = NULL ) {
parent = * p ;
2017-10-11 18:01:41 +03:00
iter = rb_entry ( parent , struct annotation_line , rb_node ) ;
2011-02-04 14:45:46 +03:00
2017-10-11 18:01:41 +03:00
ret = strcmp ( iter - > path , al - > path ) ;
2012-11-09 09:58:49 +04:00
if ( ret = = 0 ) {
2018-08-04 16:05:09 +03:00
for ( i = 0 ; i < al - > data_nr ; i + + ) {
iter - > data [ i ] . percent_sum + = annotation_data__percent ( & al - > data [ i ] ,
2018-08-04 16:05:14 +03:00
opts - > percent_type ) ;
2018-08-04 16:05:09 +03:00
}
2012-11-09 09:58:49 +04:00
return ;
}
if ( ret < 0 )
p = & ( * p ) - > rb_left ;
else
p = & ( * p ) - > rb_right ;
}
2018-08-04 16:05:09 +03:00
for ( i = 0 ; i < al - > data_nr ; i + + ) {
al - > data [ i ] . percent_sum = annotation_data__percent ( & al - > data [ i ] ,
2018-08-04 16:05:14 +03:00
opts - > percent_type ) ;
2018-08-04 16:05:09 +03:00
}
2012-11-09 09:58:49 +04:00
2017-10-11 18:01:41 +03:00
rb_link_node ( & al - > rb_node , parent , p ) ;
rb_insert_color ( & al - > rb_node , root ) ;
2012-11-09 09:58:49 +04:00
}
2017-10-11 18:01:41 +03:00
static int cmp_source_line ( struct annotation_line * a , struct annotation_line * b )
2013-03-05 09:53:28 +04:00
{
int i ;
2018-08-04 16:05:05 +03:00
for ( i = 0 ; i < a - > data_nr ; i + + ) {
if ( a - > data [ i ] . percent_sum = = b - > data [ i ] . percent_sum )
2013-03-05 09:53:28 +04:00
continue ;
2018-08-04 16:05:05 +03:00
return a - > data [ i ] . percent_sum > b - > data [ i ] . percent_sum ;
2013-03-05 09:53:28 +04:00
}
return 0 ;
}
2017-10-11 18:01:41 +03:00
static void __resort_source_line ( struct rb_root * root , struct annotation_line * al )
2012-11-09 09:58:49 +04:00
{
2017-10-11 18:01:41 +03:00
struct annotation_line * iter ;
2012-11-09 09:58:49 +04:00
struct rb_node * * p = & root - > rb_node ;
struct rb_node * parent = NULL ;
while ( * p ! = NULL ) {
parent = * p ;
2017-10-11 18:01:41 +03:00
iter = rb_entry ( parent , struct annotation_line , rb_node ) ;
2012-11-09 09:58:49 +04:00
2017-10-11 18:01:41 +03:00
if ( cmp_source_line ( al , iter ) )
2011-02-04 14:45:46 +03:00
p = & ( * p ) - > rb_left ;
else
p = & ( * p ) - > rb_right ;
}
2017-10-11 18:01:41 +03:00
rb_link_node ( & al - > rb_node , parent , p ) ;
rb_insert_color ( & al - > rb_node , root ) ;
2011-02-04 14:45:46 +03:00
}
2012-11-09 09:58:49 +04:00
static void resort_source_line ( struct rb_root * dest_root , struct rb_root * src_root )
{
2017-10-11 18:01:41 +03:00
struct annotation_line * al ;
2012-11-09 09:58:49 +04:00
struct rb_node * node ;
node = rb_first ( src_root ) ;
while ( node ) {
struct rb_node * next ;
2017-10-11 18:01:41 +03:00
al = rb_entry ( node , struct annotation_line , rb_node ) ;
2012-11-09 09:58:49 +04:00
next = rb_next ( node ) ;
rb_erase ( node , src_root ) ;
2017-10-11 18:01:41 +03:00
__resort_source_line ( dest_root , al ) ;
2012-11-09 09:58:49 +04:00
node = next ;
}
}
2011-02-04 14:45:46 +03:00
static void print_summary ( struct rb_root * root , const char * filename )
{
2017-10-11 18:01:41 +03:00
struct annotation_line * al ;
2011-02-04 14:45:46 +03:00
struct rb_node * node ;
printf ( " \n Sorted summary for file %s \n " , filename ) ;
printf ( " ---------------------------------------------- \n \n " ) ;
if ( RB_EMPTY_ROOT ( root ) ) {
printf ( " Nothing higher than %1.1f%% \n " , MIN_GREEN ) ;
return ;
}
node = rb_first ( root ) ;
while ( node ) {
2013-03-05 09:53:28 +04:00
double percent , percent_max = 0.0 ;
2011-02-04 14:45:46 +03:00
const char * color ;
char * path ;
2013-03-05 09:53:28 +04:00
int i ;
2011-02-04 14:45:46 +03:00
2017-10-11 18:01:41 +03:00
al = rb_entry ( node , struct annotation_line , rb_node ) ;
2018-08-04 16:05:05 +03:00
for ( i = 0 ; i < al - > data_nr ; i + + ) {
percent = al - > data [ i ] . percent_sum ;
2013-03-05 09:53:28 +04:00
color = get_percent_color ( percent ) ;
color_fprintf ( stdout , color , " %7.2f " , percent ) ;
if ( percent > percent_max )
percent_max = percent ;
}
2017-10-11 18:01:41 +03:00
path = al - > path ;
2013-03-05 09:53:28 +04:00
color = get_percent_color ( percent_max ) ;
2013-09-11 09:09:28 +04:00
color_fprintf ( stdout , color , " %s \n " , path ) ;
2011-02-04 14:45:46 +03:00
node = rb_next ( node ) ;
}
}
2019-07-21 14:23:51 +03:00
static void symbol__annotate_hits ( struct symbol * sym , struct evsel * evsel )
2011-02-04 14:45:46 +03:00
{
struct annotation * notes = symbol__annotation ( sym ) ;
2021-07-06 18:16:59 +03:00
struct sym_hist * h = annotation__histogram ( notes , evsel - > core . idx ) ;
2012-04-19 17:57:06 +04:00
u64 len = symbol__size ( sym ) , offset ;
2011-02-04 14:45:46 +03:00
for ( offset = 0 ; offset < len ; + + offset )
2017-07-20 00:36:45 +03:00
if ( h - > addr [ offset ] . nr_samples ! = 0 )
2011-02-04 14:45:46 +03:00
printf ( " %* " PRIx64 " : % " PRIu64 " \n " , BITS_PER_LONG / 2 ,
2017-07-20 00:36:45 +03:00
sym - > start + offset , h - > addr [ offset ] . nr_samples ) ;
2017-07-20 00:36:51 +03:00
printf ( " %*s: % " PRIu64 " \n " , BITS_PER_LONG / 2 , " h->nr_samples " , h - > nr_samples ) ;
2011-02-04 14:45:46 +03:00
}
perf annotate: Align source and offset lines
Align source with offset lines, which are more advanced, because of the
address column.
Before:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
After:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
It makes bigger different when displaying script sources, where the
comment lines looks oddly shifted from the lines which actually hold
code. I'll send script support separately.
Committer note:
Do not use a fixed column width for the addresses, as kernel ones se
more than 10 columns, look at the last offset and get the right width.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 18:01:58 +03:00
static int annotated_source__addr_fmt_width ( struct list_head * lines , u64 start )
{
char bf [ 32 ] ;
struct annotation_line * line ;
list_for_each_entry_reverse ( line , lines , node ) {
if ( line - > offset ! = - 1 )
return scnprintf ( bf , sizeof ( bf ) , " % " PRIx64 , start + line - > offset ) ;
}
return 0 ;
}
2019-11-04 17:10:00 +03:00
int symbol__annotate_printf ( struct map_symbol * ms , struct evsel * evsel ,
2018-05-25 23:28:37 +03:00
struct annotation_options * opts )
2011-02-04 14:45:46 +03:00
{
2019-11-04 17:10:00 +03:00
struct map * map = ms - > map ;
struct symbol * sym = ms - > sym ;
2011-02-04 14:45:46 +03:00
struct dso * dso = map - > dso ;
2012-09-08 19:06:50 +04:00
char * filename ;
const char * d_filename ;
2020-04-29 22:07:09 +03:00
const char * evsel_name = evsel__name ( evsel ) ;
2011-02-08 18:27:39 +03:00
struct annotation * notes = symbol__annotation ( sym ) ;
2021-07-06 18:16:59 +03:00
struct sym_hist * h = annotation__histogram ( notes , evsel - > core . idx ) ;
2017-10-11 18:01:46 +03:00
struct annotation_line * pos , * queue = NULL ;
2012-04-02 19:59:01 +04:00
u64 start = map__rip_2objdump ( map , sym - > start ) ;
perf annotate: Align source and offset lines
Align source with offset lines, which are more advanced, because of the
address column.
Before:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
After:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
It makes bigger different when displaying script sources, where the
comment lines looks oddly shifted from the lines which actually hold
code. I'll send script support separately.
Committer note:
Do not use a fixed column width for the addresses, as kernel ones se
more than 10 columns, look at the last offset and get the right width.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 18:01:58 +03:00
int printed = 2 , queue_len = 0 , addr_fmt_width ;
2011-02-06 19:54:44 +03:00
int more = 0 ;
2018-05-25 23:28:37 +03:00
bool context = opts - > context ;
2011-02-04 14:45:46 +03:00
u64 len ;
2017-07-26 23:16:46 +03:00
int width = symbol_conf . show_total_period ? 12 : 8 ;
2016-06-30 15:17:26 +03:00
int graph_dotted_len ;
2018-05-22 14:38:35 +03:00
char buf [ 512 ] ;
2011-02-04 14:45:46 +03:00
2012-09-08 19:06:50 +04:00
filename = strdup ( dso - > long_name ) ;
if ( ! filename )
return - ENOMEM ;
2018-05-25 23:28:37 +03:00
if ( opts - > full_path )
2011-02-04 14:45:46 +03:00
d_filename = filename ;
else
d_filename = basename ( filename ) ;
2012-04-19 17:57:06 +04:00
len = symbol__size ( sym ) ;
perf annotate: Add basic support to event group view
Add --group option to enable event grouping. When enabled, all the
group members information will be shown with the leader so skip
non-leader events.
It only supports --stdio output currently. Later patches will extend
additional features.
$ perf annotate --group --stdio
...
Percent | Source code & Disassembly of libpthread-2.15.so
--------------------------------------------------------------------------------
:
:
:
: Disassembly of section .text:
:
: 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>:
8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx
0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi
0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax
0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax
3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil
0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax
0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi
0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi
3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx)
0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use
0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx)
2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi
0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx)
83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586>
0.00 0.00 0.00 : 387dc0aa81: nop
0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax
0.00 0.00 0.00 : 387dc0aa84: retq
...
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 09:53:25 +04:00
2020-04-30 16:51:16 +03:00
if ( evsel__is_group_event ( evsel ) ) {
2019-07-21 14:24:46 +03:00
width * = evsel - > core . nr_members ;
2020-04-29 22:09:12 +03:00
evsel__group_desc ( evsel , buf , sizeof ( buf ) ) ;
2018-05-22 14:38:35 +03:00
evsel_name = buf ;
}
2011-02-04 14:45:46 +03:00
2018-08-04 16:05:19 +03:00
graph_dotted_len = printf ( " %-*.*s| Source code & Disassembly of %s for %s (% " PRIu64 " samples, "
" percent: %s) \n " ,
2017-08-18 11:46:48 +03:00
width , width , symbol_conf . show_total_period ? " Period " :
symbol_conf . show_nr_samples ? " Samples " : " Percent " ,
2018-08-04 16:05:19 +03:00
d_filename , evsel_name , h - > nr_samples ,
percent_type_str ( opts - > percent_type ) ) ;
2014-03-18 18:50:21 +04:00
2016-06-30 15:17:26 +03:00
printf ( " %-*.*s---- \n " ,
2014-03-18 18:50:21 +04:00
graph_dotted_len , graph_dotted_len , graph_dotted_line ) ;
2011-02-04 14:45:46 +03:00
2017-02-17 11:17:38 +03:00
if ( verbose > 0 )
2013-03-05 09:53:21 +04:00
symbol__annotate_hits ( sym , evsel ) ;
2011-02-04 14:45:46 +03:00
perf annotate: Align source and offset lines
Align source with offset lines, which are more advanced, because of the
address column.
Before:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
After:
: static void *worker_thread(void *__tdata)
: {
0.00 : 48a971: push %rbp
0.00 : 48a972: mov %rsp,%rbp
0.00 : 48a975: sub $0x30,%rsp
0.00 : 48a979: mov %rdi,-0x28(%rbp)
0.00 : 48a97d: mov %fs:0x28,%rax
0.00 : 48a986: mov %rax,-0x8(%rbp)
0.00 : 48a98a: xor %eax,%eax
: struct thread_data *td = __tdata;
0.00 : 48a98c: mov -0x28(%rbp),%rax
0.00 : 48a990: mov %rax,-0x10(%rbp)
: int m = 0, i;
0.00 : 48a994: movl $0x0,-0x1c(%rbp)
: int ret;
:
: for (i = 0; i < loops; i++) {
0.00 : 48a99b: movl $0x0,-0x18(%rbp)
It makes bigger different when displaying script sources, where the
comment lines looks oddly shifted from the lines which actually hold
code. I'll send script support separately.
Committer note:
Do not use a fixed column width for the addresses, as kernel ones se
more than 10 columns, look at the last offset and get the right width.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 18:01:58 +03:00
addr_fmt_width = annotated_source__addr_fmt_width ( & notes - > src - > source , start ) ;
2017-10-11 18:01:46 +03:00
list_for_each_entry ( pos , & notes - > src - > source , node ) {
int err ;
2011-02-08 20:29:25 +03:00
if ( context & & queue = = NULL ) {
queue = pos ;
queue_len = 0 ;
}
2017-10-11 18:01:46 +03:00
err = annotation_line__print ( pos , sym , start , evsel , len ,
2018-05-25 23:28:37 +03:00
opts - > min_pcnt , printed , opts - > max_lines ,
2018-08-04 16:05:13 +03:00
queue , addr_fmt_width , opts - > percent_type ) ;
2017-10-11 18:01:46 +03:00
switch ( err ) {
2011-02-06 19:54:44 +03:00
case 0 :
+ + printed ;
2011-02-08 20:29:25 +03:00
if ( context ) {
printed + = queue_len ;
queue = NULL ;
queue_len = 0 ;
}
2011-02-06 19:54:44 +03:00
break ;
case 1 :
/* filtered by max_lines */
+ + more ;
2011-02-05 20:37:31 +03:00
break ;
2011-02-06 19:54:44 +03:00
case - 1 :
default :
2011-02-08 20:29:25 +03:00
/*
* Filtered by min_pcnt or non IP lines when
* context ! = 0
*/
if ( ! context )
break ;
if ( queue_len = = context )
2017-10-11 18:01:46 +03:00
queue = list_entry ( queue - > node . next , typeof ( * queue ) , node ) ;
2011-02-08 20:29:25 +03:00
else
+ + queue_len ;
2011-02-06 19:54:44 +03:00
break ;
}
}
2012-09-08 19:06:50 +04:00
free ( filename ) ;
2011-02-06 19:54:44 +03:00
return more ;
}
2011-02-05 23:51:38 +03:00
2018-03-16 05:44:34 +03:00
static void FILE__set_percent_color ( void * fp __maybe_unused ,
double percent __maybe_unused ,
bool current __maybe_unused )
{
}
static int FILE__set_jumps_percent_color ( void * fp __maybe_unused ,
int nr __maybe_unused , bool current __maybe_unused )
{
return 0 ;
}
static int FILE__set_color ( void * fp __maybe_unused , int color __maybe_unused )
{
return 0 ;
}
static void FILE__printf ( void * fp , const char * fmt , . . . )
{
va_list args ;
va_start ( args , fmt ) ;
vfprintf ( fp , fmt , args ) ;
va_end ( args ) ;
}
static void FILE__write_graph ( void * fp , int graph )
{
const char * s ;
switch ( graph ) {
case DARROW_CHAR : s = " ↓ " ; break ;
case UARROW_CHAR : s = " ↑ " ; break ;
case LARROW_CHAR : s = " ← " ; break ;
case RARROW_CHAR : s = " → " ; break ;
default : s = " ? " ; break ;
}
fputs ( s , fp ) ;
}
2018-08-04 16:05:15 +03:00
static int symbol__annotate_fprintf2 ( struct symbol * sym , FILE * fp ,
struct annotation_options * opts )
2018-03-16 05:44:34 +03:00
{
struct annotation * notes = symbol__annotation ( sym ) ;
2018-08-04 16:05:15 +03:00
struct annotation_write_ops wops = {
2018-03-16 05:44:34 +03:00
. first_line = true ,
. obj = fp ,
. set_color = FILE__set_color ,
. set_percent_color = FILE__set_percent_color ,
. set_jumps_percent_color = FILE__set_jumps_percent_color ,
. printf = FILE__printf ,
. write_graph = FILE__write_graph ,
} ;
struct annotation_line * al ;
list_for_each_entry ( al , & notes - > src - > source , node ) {
if ( annotation_line__filter ( al , notes ) )
continue ;
2018-08-04 16:05:15 +03:00
annotation_line__write ( al , notes , & wops , opts ) ;
2018-03-16 05:44:34 +03:00
fputc ( ' \n ' , fp ) ;
2018-08-04 16:05:15 +03:00
wops . first_line = false ;
2018-03-16 05:44:34 +03:00
}
return 0 ;
}
2019-07-21 14:23:51 +03:00
int map_symbol__annotation_dump ( struct map_symbol * ms , struct evsel * evsel ,
2018-08-04 16:05:15 +03:00
struct annotation_options * opts )
perf annotate browser: Add 'P' hotkey to dump annotation to file
Just like we have in the histograms browser used as the main screen for
'perf top --tui' and 'perf report --tui', to print the current
annotation to a file with a named composed by the symbol name and the
".annotation" suffix.
Here is one example of pressing 'A' on 'perf top' to live annotate a
kernel function and then press 'P' to dump that annotation, the
resulting file:
# cat _raw_spin_lock_irqsave.annotation
_raw_spin_lock_irqsave() /proc/kcore
Event: cycles:ppp
7.14 nop
21.43 push %rbx
7.14 pushfq
pop %rax
nop
mov %rax,%rbx
cli
nop
xor %eax,%eax
mov $0x1,%edx
64.29 lock cmpxchg %edx,(%rdi)
test %eax,%eax
↓ jne 2b
mov %rbx,%rax
pop %rbx
← retq
2b: mov %eax,%esi
→ callq queued_spin_lock_slowpath
mov %rbx,%rax
pop %rbx
← retq
#
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-zzmnrwugb5vtk7bvg0rbx150@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-16 22:57:47 +03:00
{
2020-04-29 22:07:09 +03:00
const char * ev_name = evsel__name ( evsel ) ;
perf annotate browser: Add 'P' hotkey to dump annotation to file
Just like we have in the histograms browser used as the main screen for
'perf top --tui' and 'perf report --tui', to print the current
annotation to a file with a named composed by the symbol name and the
".annotation" suffix.
Here is one example of pressing 'A' on 'perf top' to live annotate a
kernel function and then press 'P' to dump that annotation, the
resulting file:
# cat _raw_spin_lock_irqsave.annotation
_raw_spin_lock_irqsave() /proc/kcore
Event: cycles:ppp
7.14 nop
21.43 push %rbx
7.14 pushfq
pop %rax
nop
mov %rax,%rbx
cli
nop
xor %eax,%eax
mov $0x1,%edx
64.29 lock cmpxchg %edx,(%rdi)
test %eax,%eax
↓ jne 2b
mov %rbx,%rax
pop %rbx
← retq
2b: mov %eax,%esi
→ callq queued_spin_lock_slowpath
mov %rbx,%rax
pop %rbx
← retq
#
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-zzmnrwugb5vtk7bvg0rbx150@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-16 22:57:47 +03:00
char buf [ 1024 ] ;
char * filename ;
int err = - 1 ;
FILE * fp ;
if ( asprintf ( & filename , " %s.annotation " , ms - > sym - > name ) < 0 )
return - 1 ;
fp = fopen ( filename , " w " ) ;
if ( fp = = NULL )
goto out_free_filename ;
2020-04-30 16:51:16 +03:00
if ( evsel__is_group_event ( evsel ) ) {
2020-04-29 22:09:12 +03:00
evsel__group_desc ( evsel , buf , sizeof ( buf ) ) ;
perf annotate browser: Add 'P' hotkey to dump annotation to file
Just like we have in the histograms browser used as the main screen for
'perf top --tui' and 'perf report --tui', to print the current
annotation to a file with a named composed by the symbol name and the
".annotation" suffix.
Here is one example of pressing 'A' on 'perf top' to live annotate a
kernel function and then press 'P' to dump that annotation, the
resulting file:
# cat _raw_spin_lock_irqsave.annotation
_raw_spin_lock_irqsave() /proc/kcore
Event: cycles:ppp
7.14 nop
21.43 push %rbx
7.14 pushfq
pop %rax
nop
mov %rax,%rbx
cli
nop
xor %eax,%eax
mov $0x1,%edx
64.29 lock cmpxchg %edx,(%rdi)
test %eax,%eax
↓ jne 2b
mov %rbx,%rax
pop %rbx
← retq
2b: mov %eax,%esi
→ callq queued_spin_lock_slowpath
mov %rbx,%rax
pop %rbx
← retq
#
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-zzmnrwugb5vtk7bvg0rbx150@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-16 22:57:47 +03:00
ev_name = buf ;
}
fprintf ( fp , " %s() %s \n Event: %s \n \n " ,
ms - > sym - > name , ms - > map - > dso - > long_name , ev_name ) ;
2018-08-04 16:05:15 +03:00
symbol__annotate_fprintf2 ( ms - > sym , fp , opts ) ;
perf annotate browser: Add 'P' hotkey to dump annotation to file
Just like we have in the histograms browser used as the main screen for
'perf top --tui' and 'perf report --tui', to print the current
annotation to a file with a named composed by the symbol name and the
".annotation" suffix.
Here is one example of pressing 'A' on 'perf top' to live annotate a
kernel function and then press 'P' to dump that annotation, the
resulting file:
# cat _raw_spin_lock_irqsave.annotation
_raw_spin_lock_irqsave() /proc/kcore
Event: cycles:ppp
7.14 nop
21.43 push %rbx
7.14 pushfq
pop %rax
nop
mov %rax,%rbx
cli
nop
xor %eax,%eax
mov $0x1,%edx
64.29 lock cmpxchg %edx,(%rdi)
test %eax,%eax
↓ jne 2b
mov %rbx,%rax
pop %rbx
← retq
2b: mov %eax,%esi
→ callq queued_spin_lock_slowpath
mov %rbx,%rax
pop %rbx
← retq
#
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-zzmnrwugb5vtk7bvg0rbx150@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-16 22:57:47 +03:00
fclose ( fp ) ;
err = 0 ;
out_free_filename :
free ( filename ) ;
return err ;
}
2011-02-06 19:54:44 +03:00
void symbol__annotate_zero_histogram ( struct symbol * sym , int evidx )
{
struct annotation * notes = symbol__annotation ( sym ) ;
struct sym_hist * h = annotation__histogram ( notes , evidx ) ;
2011-02-08 18:27:39 +03:00
memset ( h , 0 , notes - > src - > sizeof_sym_hist ) ;
2011-02-06 19:54:44 +03:00
}
2011-02-08 18:27:39 +03:00
void symbol__annotate_decay_histogram ( struct symbol * sym , int evidx )
2011-02-06 19:54:44 +03:00
{
struct annotation * notes = symbol__annotation ( sym ) ;
struct sym_hist * h = annotation__histogram ( notes , evidx ) ;
2012-04-19 17:57:06 +04:00
int len = symbol__size ( sym ) , offset ;
2011-02-06 19:54:44 +03:00
2017-07-20 00:36:51 +03:00
h - > nr_samples = 0 ;
2012-04-05 23:15:59 +04:00
for ( offset = 0 ; offset < len ; + + offset ) {
2017-07-20 00:36:45 +03:00
h - > addr [ offset ] . nr_samples = h - > addr [ offset ] . nr_samples * 7 / 8 ;
2017-07-20 00:36:51 +03:00
h - > nr_samples + = h - > addr [ offset ] . nr_samples ;
2011-02-05 23:51:38 +03:00
}
}
2017-10-11 18:01:38 +03:00
void annotated_source__purge ( struct annotated_source * as )
2011-02-05 23:51:38 +03:00
{
2017-10-11 18:01:38 +03:00
struct annotation_line * al , * n ;
2011-02-05 23:51:38 +03:00
2017-10-11 18:01:38 +03:00
list_for_each_entry_safe ( al , n , & as - > source , node ) {
2019-07-04 18:13:46 +03:00
list_del_init ( & al - > node ) ;
2017-10-11 18:01:38 +03:00
disasm_line__free ( disasm_line ( al ) ) ;
2011-02-05 23:51:38 +03:00
}
}
2012-04-15 22:52:18 +04:00
static size_t disasm_line__fprintf ( struct disasm_line * dl , FILE * fp )
{
size_t printed ;
2017-10-11 18:01:26 +03:00
if ( dl - > al . offset = = - 1 )
return fprintf ( fp , " %s \n " , dl - > al . line ) ;
2012-04-15 22:52:18 +04:00
2017-10-11 18:01:26 +03:00
printed = fprintf ( fp , " %# " PRIx64 " %s " , dl - > al . offset , dl - > ins . name ) ;
2012-04-15 22:52:18 +04:00
2012-04-20 21:38:46 +04:00
if ( dl - > ops . raw [ 0 ] ! = ' \0 ' ) {
2012-04-15 22:52:18 +04:00
printed + = fprintf ( fp , " %.*s %s \n " , 6 - ( int ) printed , " " ,
2012-04-20 21:38:46 +04:00
dl - > ops . raw ) ;
2012-04-15 22:52:18 +04:00
}
return printed + fprintf ( fp , " \n " ) ;
}
size_t disasm__fprintf ( struct list_head * head , FILE * fp )
{
struct disasm_line * pos ;
size_t printed = 0 ;
2017-10-11 18:01:25 +03:00
list_for_each_entry ( pos , head , al . node )
2012-04-15 22:52:18 +04:00
printed + = disasm_line__fprintf ( pos , fp ) ;
return printed ;
}
perf annotate: Add "_local" to jump/offset validation routines
Because they all really check if we can access data structures/visual
constructs where a "jump" instruction targets code in the same function,
i.e. things like:
__pthread_mutex_lock /usr/lib64/libpthread-2.26.so
1.95 │ mov __pthread_force_elision,%ecx
│ ┌──test %ecx,%ecx
0.07 │ ├──je 60
│ │ test $0x300,%esi
│ │↓ jne 60
│ │ or $0x100,%esi
│ │ mov %esi,0x10(%rdi)
│ 42:│ mov %esi,%edx
│ │ lea 0x16(%r8),%rsi
│ │ mov %r8,%rdi
│ │ and $0x80,%edx
│ │ add $0x8,%rsp
│ │→ jmpq __lll_lock_elision
│ │ nop
0.29 │ 60:└─→and $0x80,%esi
0.07 │ mov $0x1,%edi
0.29 │ xor %eax,%eax
2.53 │ lock cmpxchg %edi,(%r8)
And not things like that "jmpq __lll_lock_elision", that instead should behave
like a "call" instruction and "jump" to the disassembly of "___lll_lock_elision".
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-3cwx39u3h66dfw9xjrlt7ca2@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 16:12:33 +03:00
bool disasm_line__is_valid_local_jump ( struct disasm_line * dl , struct symbol * sym )
2018-03-15 21:31:56 +03:00
{
if ( ! dl | | ! dl - > ins . ops | | ! ins__is_jump ( & dl - > ins ) | |
perf annotate: Add "_local" to jump/offset validation routines
Because they all really check if we can access data structures/visual
constructs where a "jump" instruction targets code in the same function,
i.e. things like:
__pthread_mutex_lock /usr/lib64/libpthread-2.26.so
1.95 │ mov __pthread_force_elision,%ecx
│ ┌──test %ecx,%ecx
0.07 │ ├──je 60
│ │ test $0x300,%esi
│ │↓ jne 60
│ │ or $0x100,%esi
│ │ mov %esi,0x10(%rdi)
│ 42:│ mov %esi,%edx
│ │ lea 0x16(%r8),%rsi
│ │ mov %r8,%rdi
│ │ and $0x80,%edx
│ │ add $0x8,%rsp
│ │→ jmpq __lll_lock_elision
│ │ nop
0.29 │ 60:└─→and $0x80,%esi
0.07 │ mov $0x1,%edi
0.29 │ xor %eax,%eax
2.53 │ lock cmpxchg %edi,(%r8)
And not things like that "jmpq __lll_lock_elision", that instead should behave
like a "call" instruction and "jump" to the disassembly of "___lll_lock_elision".
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-3cwx39u3h66dfw9xjrlt7ca2@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 16:12:33 +03:00
! disasm_line__has_local_offset ( dl ) | | dl - > ops . target . offset < 0 | |
2018-03-15 21:31:56 +03:00
dl - > ops . target . offset > = ( s64 ) symbol__size ( sym ) )
return false ;
return true ;
}
void annotation__mark_jump_targets ( struct annotation * notes , struct symbol * sym )
{
u64 offset , size = symbol__size ( sym ) ;
/* PLT symbols contain external offsets */
if ( strstr ( sym - > name , " @plt " ) )
return ;
for ( offset = 0 ; offset < size ; + + offset ) {
struct annotation_line * al = notes - > offsets [ offset ] ;
struct disasm_line * dl ;
dl = disasm_line ( al ) ;
perf annotate: Add "_local" to jump/offset validation routines
Because they all really check if we can access data structures/visual
constructs where a "jump" instruction targets code in the same function,
i.e. things like:
__pthread_mutex_lock /usr/lib64/libpthread-2.26.so
1.95 │ mov __pthread_force_elision,%ecx
│ ┌──test %ecx,%ecx
0.07 │ ├──je 60
│ │ test $0x300,%esi
│ │↓ jne 60
│ │ or $0x100,%esi
│ │ mov %esi,0x10(%rdi)
│ 42:│ mov %esi,%edx
│ │ lea 0x16(%r8),%rsi
│ │ mov %r8,%rdi
│ │ and $0x80,%edx
│ │ add $0x8,%rsp
│ │→ jmpq __lll_lock_elision
│ │ nop
0.29 │ 60:└─→and $0x80,%esi
0.07 │ mov $0x1,%edi
0.29 │ xor %eax,%eax
2.53 │ lock cmpxchg %edi,(%r8)
And not things like that "jmpq __lll_lock_elision", that instead should behave
like a "call" instruction and "jump" to the disassembly of "___lll_lock_elision".
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-3cwx39u3h66dfw9xjrlt7ca2@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 16:12:33 +03:00
if ( ! disasm_line__is_valid_local_jump ( dl , sym ) )
2018-03-15 21:31:56 +03:00
continue ;
al = notes - > offsets [ dl - > ops . target . offset ] ;
/*
* FIXME : Oops , no jump target ? Buggy disassembler ? Or do we
* have to adjust to the previous offset ?
*/
if ( al = = NULL )
continue ;
if ( + + al - > jump_sources > notes - > max_jump_sources )
notes - > max_jump_sources = al - > jump_sources ;
}
}
2018-03-15 21:59:01 +03:00
void annotation__set_offsets ( struct annotation * notes , s64 size )
{
struct annotation_line * al ;
notes - > max_line_len = 0 ;
2020-02-04 07:52:31 +03:00
notes - > nr_entries = 0 ;
notes - > nr_asm_entries = 0 ;
2018-03-15 21:59:01 +03:00
list_for_each_entry ( al , & notes - > src - > source , node ) {
size_t line_len = strlen ( al - > line ) ;
if ( notes - > max_line_len < line_len )
notes - > max_line_len = line_len ;
al - > idx = notes - > nr_entries + + ;
if ( al - > offset ! = - 1 ) {
al - > idx_asm = notes - > nr_asm_entries + + ;
/*
* FIXME : short term bandaid to cope with assembly
* routines that comes with labels in the same column
* as the address in objdump , sigh .
*
* E . g . copy_user_generic_unrolled
*/
if ( al - > offset < size )
notes - > offsets [ al - > offset ] = al ;
} else
al - > idx_asm = - 1 ;
}
}
2018-03-15 22:26:29 +03:00
static inline int width_jumps ( int n )
{
if ( n > = 100 )
return 5 ;
if ( n / 10 )
return 2 ;
return 1 ;
}
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
static int annotation__max_ins_name ( struct annotation * notes )
{
int max_name = 0 , len ;
struct annotation_line * al ;
list_for_each_entry ( al , & notes - > src - > source , node ) {
if ( al - > offset = = - 1 )
continue ;
len = strlen ( disasm_line ( al ) - > ins . name ) ;
if ( max_name < len )
max_name = len ;
}
return max_name ;
}
2018-03-15 22:26:29 +03:00
void annotation__init_column_widths ( struct annotation * notes , struct symbol * sym )
{
notes - > widths . addr = notes - > widths . target =
notes - > widths . min_addr = hex_width ( symbol__size ( sym ) ) ;
notes - > widths . max_addr = hex_width ( sym - > end ) ;
notes - > widths . jumps = width_jumps ( notes - > max_jump_sources ) ;
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
notes - > widths . max_ins_name = annotation__max_ins_name ( notes ) ;
2018-03-15 22:26:29 +03:00
}
2018-03-15 22:19:59 +03:00
void annotation__update_column_widths ( struct annotation * notes )
{
if ( notes - > options - > use_offset )
notes - > widths . target = notes - > widths . min_addr ;
else
notes - > widths . target = notes - > widths . max_addr ;
notes - > widths . addr = notes - > widths . target ;
if ( notes - > options - > show_nr_jumps )
notes - > widths . addr + = notes - > widths . jumps + 1 ;
}
2017-10-11 18:01:41 +03:00
static void annotation__calc_lines ( struct annotation * notes , struct map * map ,
2018-08-04 16:05:14 +03:00
struct rb_root * root ,
struct annotation_options * opts )
2017-10-11 18:01:41 +03:00
{
struct annotation_line * al ;
struct rb_root tmp_root = RB_ROOT ;
list_for_each_entry ( al , & notes - > src - > source , node ) {
double percent_max = 0.0 ;
int i ;
2018-08-04 16:05:05 +03:00
for ( i = 0 ; i < al - > data_nr ; i + + ) {
2018-08-04 16:05:09 +03:00
double percent ;
2017-10-11 18:01:41 +03:00
2018-08-04 16:05:09 +03:00
percent = annotation_data__percent ( & al - > data [ i ] ,
2018-08-04 16:05:14 +03:00
opts - > percent_type ) ;
2017-10-11 18:01:41 +03:00
2018-08-04 16:05:09 +03:00
if ( percent > percent_max )
percent_max = percent ;
2017-10-11 18:01:41 +03:00
}
if ( percent_max < = 0.5 )
continue ;
2018-03-20 17:03:30 +03:00
al - > path = get_srcline ( map - > dso , notes - > start + al - > offset , NULL ,
false , true , notes - > start + al - > offset ) ;
2018-08-04 16:05:14 +03:00
insert_source_line ( & tmp_root , al , opts ) ;
2017-10-11 18:01:41 +03:00
}
resort_source_line ( root , & tmp_root ) ;
}
2019-11-04 17:10:00 +03:00
static void symbol__calc_lines ( struct map_symbol * ms , struct rb_root * root ,
2018-08-04 16:05:14 +03:00
struct annotation_options * opts )
2017-10-11 18:01:41 +03:00
{
2019-11-04 17:10:00 +03:00
struct annotation * notes = symbol__annotation ( ms - > sym ) ;
2017-10-11 18:01:41 +03:00
2019-11-04 17:10:00 +03:00
annotation__calc_lines ( notes , ms - > map , root , opts ) ;
2017-10-11 18:01:41 +03:00
}
2019-11-04 17:10:00 +03:00
int symbol__tty_annotate2 ( struct map_symbol * ms , struct evsel * evsel ,
2018-05-25 23:28:37 +03:00
struct annotation_options * opts )
2018-03-16 05:44:34 +03:00
{
2019-11-04 17:10:00 +03:00
struct dso * dso = ms - > map - > dso ;
struct symbol * sym = ms - > sym ;
2018-03-16 05:44:34 +03:00
struct rb_root source_line = RB_ROOT ;
2018-08-04 16:05:04 +03:00
struct hists * hists = evsel__hists ( evsel ) ;
2018-03-16 21:17:23 +03:00
char buf [ 1024 ] ;
2021-07-29 18:58:02 +03:00
int err ;
err = symbol__annotate2 ( ms , evsel , opts , NULL ) ;
if ( err ) {
char msg [ BUFSIZ ] ;
2018-03-16 05:44:34 +03:00
2021-07-29 18:58:02 +03:00
dso - > annotate_warned = true ;
symbol__strerror_disassemble ( ms , err , msg , sizeof ( msg ) ) ;
ui__error ( " Couldn't annotate %s: \n %s " , sym - > name , msg ) ;
2018-03-16 05:44:34 +03:00
return - 1 ;
2021-07-29 18:58:02 +03:00
}
2018-03-16 05:44:34 +03:00
2018-05-25 23:28:37 +03:00
if ( opts - > print_lines ) {
srcline_full_filename = opts - > full_path ;
2019-11-04 17:10:00 +03:00
symbol__calc_lines ( ms , & source_line , opts ) ;
2018-03-16 05:44:34 +03:00
print_summary ( & source_line , dso - > long_name ) ;
}
2018-08-04 16:05:04 +03:00
hists__scnprintf_title ( hists , buf , sizeof ( buf ) ) ;
2018-08-04 16:05:19 +03:00
fprintf ( stdout , " %s, [percent: %s] \n %s() %s \n " ,
buf , percent_type_str ( opts - > percent_type ) , sym - > name , dso - > long_name ) ;
2018-08-04 16:05:15 +03:00
symbol__annotate_fprintf2 ( sym , stdout , opts ) ;
2018-03-16 05:44:34 +03:00
annotated_source__purge ( symbol__annotation ( sym ) - > src ) ;
return 0 ;
}
2019-11-04 17:10:00 +03:00
int symbol__tty_annotate ( struct map_symbol * ms , struct evsel * evsel ,
2018-05-25 23:28:37 +03:00
struct annotation_options * opts )
2011-02-05 23:51:38 +03:00
{
2019-11-04 17:10:00 +03:00
struct dso * dso = ms - > map - > dso ;
struct symbol * sym = ms - > sym ;
2011-02-05 23:51:38 +03:00
struct rb_root source_line = RB_ROOT ;
2021-07-29 18:58:02 +03:00
int err ;
err = symbol__annotate ( ms , evsel , opts , NULL ) ;
if ( err ) {
char msg [ BUFSIZ ] ;
2011-02-05 23:51:38 +03:00
2021-07-29 18:58:02 +03:00
dso - > annotate_warned = true ;
symbol__strerror_disassemble ( ms , err , msg , sizeof ( msg ) ) ;
ui__error ( " Couldn't annotate %s: \n %s " , sym - > name , msg ) ;
2011-02-05 23:51:38 +03:00
return - 1 ;
2021-07-29 18:58:02 +03:00
}
2011-02-05 23:51:38 +03:00
2017-11-15 14:20:08 +03:00
symbol__calc_percent ( sym , evsel ) ;
2018-05-25 23:28:37 +03:00
if ( opts - > print_lines ) {
srcline_full_filename = opts - > full_path ;
2019-11-04 17:10:00 +03:00
symbol__calc_lines ( ms , & source_line , opts ) ;
2013-09-11 09:09:30 +04:00
print_summary ( & source_line , dso - > long_name ) ;
2011-02-04 14:45:46 +03:00
}
2019-11-04 17:10:00 +03:00
symbol__annotate_printf ( ms , evsel , opts ) ;
2011-02-04 14:45:46 +03:00
2017-10-11 18:01:38 +03:00
annotated_source__purge ( symbol__annotation ( sym ) - > src ) ;
2011-02-05 23:51:38 +03:00
2011-02-04 14:45:46 +03:00
return 0 ;
}
2013-12-19 00:10:15 +04:00
2014-02-20 05:32:53 +04:00
bool ui__has_annotation ( void )
{
2016-05-03 14:54:44 +03:00
return use_browser = = 1 & & perf_hpp_list . sym ;
2014-02-20 05:32:53 +04:00
}
2018-03-15 22:54:11 +03:00
2018-03-15 23:04:53 +03:00
2018-08-04 16:05:03 +03:00
static double annotation_line__max_percent ( struct annotation_line * al ,
2018-08-04 16:05:15 +03:00
struct annotation * notes ,
unsigned int percent_type )
2018-03-15 23:04:53 +03:00
{
double percent_max = 0.0 ;
int i ;
for ( i = 0 ; i < notes - > nr_events ; i + + ) {
2018-08-04 16:05:09 +03:00
double percent ;
percent = annotation_data__percent ( & al - > data [ i ] ,
2018-08-04 16:05:15 +03:00
percent_type ) ;
2018-08-04 16:05:09 +03:00
if ( percent > percent_max )
percent_max = percent ;
2018-03-15 23:04:53 +03:00
}
return percent_max ;
}
2018-03-16 01:12:39 +03:00
static void disasm_line__write ( struct disasm_line * dl , struct annotation * notes ,
void * obj , char * bf , size_t size ,
void ( * obj__printf ) ( void * obj , const char * fmt , . . . ) ,
void ( * obj__write_graph ) ( void * obj , int graph ) )
{
if ( dl - > ins . ops & & dl - > ins . ops - > scnprintf ) {
if ( ins__is_jump ( & dl - > ins ) ) {
2018-03-20 23:20:43 +03:00
bool fwd ;
2018-03-16 01:12:39 +03:00
2018-03-20 23:20:43 +03:00
if ( dl - > ops . target . outside )
goto call_like ;
fwd = dl - > ops . target . offset > dl - > al . offset ;
2018-03-16 01:12:39 +03:00
obj__write_graph ( obj , fwd ? DARROW_CHAR : UARROW_CHAR ) ;
obj__printf ( obj , " " ) ;
} else if ( ins__is_call ( & dl - > ins ) ) {
2018-03-20 23:20:43 +03:00
call_like :
2018-03-16 01:12:39 +03:00
obj__write_graph ( obj , RARROW_CHAR ) ;
obj__printf ( obj , " " ) ;
} else if ( ins__is_ret ( & dl - > ins ) ) {
obj__write_graph ( obj , LARROW_CHAR ) ;
obj__printf ( obj , " " ) ;
} else {
obj__printf ( obj , " " ) ;
}
} else {
obj__printf ( obj , " " ) ;
}
perf annotate: Calculate the max instruction name, align column to that
We were hardcoding '6' as the max instruction name, and we have lots
that are longer than that, see the diff from two 'P' printed TUI
annotations for a libc function that uses instructions with long names,
such as 'vpmovmskb' with its 9 chars:
--- __strcmp_avx2.annotation.before 2019-03-06 16:31:39.368020425 -0300
+++ __strcmp_avx2.annotation 2019-03-06 16:32:12.079450508 -0300
@@ -2,284 +2,284 @@
Event: cycles:ppp
Percent endbr64
- 0.10 mov %edi,%eax
+ 0.10 mov %edi,%eax
- xor %edx,%edx
+ xor %edx,%edx
- 3.54 vpxor %ymm7,%ymm7,%ymm7
+ 3.54 vpxor %ymm7,%ymm7,%ymm7
- or %esi,%eax
+ or %esi,%eax
- and $0xfff,%eax
+ and $0xfff,%eax
- cmp $0xf80,%eax
+ cmp $0xf80,%eax
- ↓ jg 370
+ ↓ jg 370
- 27.07 vmovdqu (%rdi),%ymm1
+ 27.07 vmovdqu (%rdi),%ymm1
- 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
+ 7.97 vpcmpeqb (%rsi),%ymm1,%ymm0
- 2.15 vpminub %ymm1,%ymm0,%ymm0
+ 2.15 vpminub %ymm1,%ymm0,%ymm0
- 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
+ 4.09 vpcmpeqb %ymm7,%ymm0,%ymm0
- 0.43 vpmovmskb %ymm0,%ecx
+ 0.43 vpmovmskb %ymm0,%ecx
- 1.53 test %ecx,%ecx
+ 1.53 test %ecx,%ecx
- ↓ je b0
+ ↓ je b0
- 5.26 tzcnt %ecx,%edx
+ 5.26 tzcnt %ecx,%edx
- 18.40 movzbl (%rdi,%rdx,1),%eax
+ 18.40 movzbl (%rdi,%rdx,1),%eax
- 7.09 movzbl (%rsi,%rdx,1),%edx
+ 7.09 movzbl (%rsi,%rdx,1),%edx
- 3.34 sub %edx,%eax
+ 3.34 sub %edx,%eax
2.37 vzeroupper
← retq
nop
- 50: tzcnt %ecx,%edx
+ 50: tzcnt %ecx,%edx
- movzbl 0x20(%rdi,%rdx,1),%eax
+ movzbl 0x20(%rdi,%rdx,1),%eax
- movzbl 0x20(%rsi,%rdx,1),%edx
+ movzbl 0x20(%rsi,%rdx,1),%edx
- sub %edx,%eax
+ sub %edx,%eax
vzeroupper
← retq
- data16 nopw %cs:0x0(%rax,%rax,1)
+ data16 nopw %cs:0x0(%rax,%rax,1)
Reported-by: Travis Downs <travis.downs@gmail.com>
LPU-Reference: CAOBGo4z1KfmWeOm6Et0cnX5Z6DWsG2PQbAvRn1MhVPJmXHrc5g@mail.gmail.com
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-89wsdd9h9g6bvq52sgp6d0u4@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-03-06 22:40:15 +03:00
disasm_line__scnprintf ( dl , bf , size , ! notes - > options - > use_offset , notes - > widths . max_ins_name ) ;
2018-03-16 01:12:39 +03:00
}
perf annotate: Compute average IPC and IPC coverage per symbol
Add support to 'perf report' annotate view or 'perf annotate --stdio2'
to aggregate the IPC derived from timed LBRs per symbol. We compute the
average IPC and the IPC coverage percentage.
For example:
$ perf annotate --stdio2
Percent IPC Cycle (Average IPC: 2.30, IPC Coverage: 54.8%)
Disassembly of section .text:
000000000003aac0 <random@@GLIBC_2.2.5>:
8.32 3.28 sub $0x18,%rsp
3.28 mov $0x1,%esi
3.28 xor %eax,%eax
3.28 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
11.57 3.28 1 ↓ je 20
lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 29
↓ jmp 43
11.57 1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
0.00 1.10 1 ↓ je 43
29: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_lock_wait_private
add $0x80,%rsp
0.00 3.00 43: lea __ctype_b@GLIBC_2.2.5+0x38,%rdi
3.00 lea 0xc(%rsp),%rsi
8.49 3.00 1 → callq __random_r
7.91 1.94 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
0.00 1.94 1 ↓ je 68
lock decl __abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 70
↓ jmp 8a
0.00 2.00 68: decl __abort_msg@@GLIBC_PRIVATE+0x8a0
21.56 2.00 1 ↓ je 8a
70: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_unlock_wake_private
add $0x80,%rsp
21.56 2.90 8a: movslq 0xc(%rsp),%rax
2.90 add $0x18,%rsp
9.03 2.90 1 ← retq
It shows for this symbol the average IPC is 2.30 and the IPC coverage is
54.8%.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1543586097-27632-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-30 16:54:54 +03:00
static void ipc_coverage_string ( char * bf , int size , struct annotation * notes )
{
double ipc = 0.0 , coverage = 0.0 ;
if ( notes - > hit_cycles )
ipc = notes - > hit_insn / ( ( double ) notes - > hit_cycles ) ;
if ( notes - > total_insn ) {
coverage = notes - > cover_insn * 100.0 /
( ( double ) notes - > total_insn ) ;
}
scnprintf ( bf , size , " (Average IPC: %.2f, IPC Coverage: %.1f%%) " ,
ipc , coverage ) ;
}
2018-03-16 05:14:51 +03:00
static void __annotation_line__write ( struct annotation_line * al , struct annotation * notes ,
bool first_line , bool current_entry , bool change_color , int width ,
2018-08-04 16:05:15 +03:00
void * obj , unsigned int percent_type ,
2018-03-16 05:14:51 +03:00
int ( * obj__set_color ) ( void * obj , int color ) ,
void ( * obj__set_percent_color ) ( void * obj , double percent , bool current ) ,
int ( * obj__set_jumps_percent_color ) ( void * obj , int nr , bool current ) ,
void ( * obj__printf ) ( void * obj , const char * fmt , . . . ) ,
void ( * obj__write_graph ) ( void * obj , int graph ) )
2018-03-15 23:54:36 +03:00
{
2018-08-04 16:05:15 +03:00
double percent_max = annotation_line__max_percent ( al , notes , percent_type ) ;
2018-03-16 01:12:39 +03:00
int pcnt_width = annotation__pcnt_width ( notes ) ,
cycles_width = annotation__cycles_width ( notes ) ;
2018-03-15 23:54:36 +03:00
bool show_title = false ;
2018-03-16 01:12:39 +03:00
char bf [ 256 ] ;
int printed ;
2018-03-15 23:54:36 +03:00
if ( first_line & & ( al - > offset = = - 1 | | percent_max = = 0.0 ) ) {
if ( notes - > have_cycles ) {
if ( al - > ipc = = 0.0 & & al - > cycles = = 0 )
show_title = true ;
} else
show_title = true ;
}
if ( al - > offset ! = - 1 & & percent_max ! = 0.0 ) {
int i ;
for ( i = 0 ; i < notes - > nr_events ; i + + ) {
2018-08-04 16:05:09 +03:00
double percent ;
2018-08-04 16:05:15 +03:00
percent = annotation_data__percent ( & al - > data [ i ] , percent_type ) ;
2018-08-04 16:05:09 +03:00
obj__set_percent_color ( obj , percent , current_entry ) ;
2020-02-13 09:43:00 +03:00
if ( symbol_conf . show_total_period ) {
2018-08-04 16:05:05 +03:00
obj__printf ( obj , " %11 " PRIu64 " " , al - > data [ i ] . he . period ) ;
2020-02-13 09:43:01 +03:00
} else if ( symbol_conf . show_nr_samples ) {
2018-03-15 23:54:36 +03:00
obj__printf ( obj , " %6 " PRIu64 " " ,
2018-08-04 16:05:05 +03:00
al - > data [ i ] . he . nr_samples ) ;
2018-03-15 23:54:36 +03:00
} else {
2018-08-04 16:05:09 +03:00
obj__printf ( obj , " %6.2f " , percent ) ;
2018-03-15 23:54:36 +03:00
}
}
} else {
obj__set_percent_color ( obj , 0 , current_entry ) ;
if ( ! show_title )
2018-03-16 01:12:39 +03:00
obj__printf ( obj , " %-*s " , pcnt_width , " " ) ;
2018-03-15 23:54:36 +03:00
else {
2018-03-16 01:12:39 +03:00
obj__printf ( obj , " %-*s " , pcnt_width ,
2020-02-13 09:43:00 +03:00
symbol_conf . show_total_period ? " Period " :
2020-02-13 09:43:01 +03:00
symbol_conf . show_nr_samples ? " Samples " : " Percent " ) ;
2018-03-15 23:54:36 +03:00
}
}
if ( notes - > have_cycles ) {
if ( al - > ipc )
obj__printf ( obj , " %*.2f " , ANNOTATION__IPC_WIDTH - 1 , al - > ipc ) ;
else if ( ! show_title )
obj__printf ( obj , " %*s " , ANNOTATION__IPC_WIDTH , " " ) ;
else
obj__printf ( obj , " %*s " , ANNOTATION__IPC_WIDTH - 1 , " IPC " ) ;
perf annotate: Create hotkey 'c' to show min/max cycles
In the 'perf annotate' view, a new hotkey 'c' is created for showing the
min/max cycles.
For example, when press 'c', the annotate view is:
Percent│ IPC Cycle(min/max)
│
│
│ Disassembly of section .text:
│
│ 000000000003aab0 <random@@GLIBC_2.2.5>:
8.22 │3.92 sub $0x18,%rsp
│3.92 mov $0x1,%esi
│3.92 xor %eax,%eax
│3.92 cmpl $0x0,argp_program_version_hook@@G
│3.92 1(2/1) ↓ je 20
│ lock cmpxchg %esi,__abort_msg@@GLIBC_P
│ ↓ jne 29
│ ↓ jmp 43
│1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+
8.93 │1.10 1(5/1) ↓ je 43
When press 'c' again, the annotate view is switched back:
Percent│ IPC Cycle
│
│
│ Disassembly of section .text:
│
│ 000000000003aab0 <random@@GLIBC_2.2.5>:
8.22 │3.92 sub $0x18,%rsp
│3.92 mov $0x1,%esi
│3.92 xor %eax,%eax
│3.92 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x
│3.92 1 ↓ je 20
│ lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
│ ↓ jne 29
│ ↓ jmp 43
│1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
8.93 │1.10 1 ↓ je 43
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1526569118-14217-3-git-send-email-yao.jin@linux.intel.com
[ Rename all maxmin to minmax ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-05-17 17:58:38 +03:00
if ( ! notes - > options - > show_minmax_cycle ) {
if ( al - > cycles )
obj__printf ( obj , " %* " PRIu64 " " ,
2018-03-15 23:54:36 +03:00
ANNOTATION__CYCLES_WIDTH - 1 , al - > cycles ) ;
perf annotate: Create hotkey 'c' to show min/max cycles
In the 'perf annotate' view, a new hotkey 'c' is created for showing the
min/max cycles.
For example, when press 'c', the annotate view is:
Percent│ IPC Cycle(min/max)
│
│
│ Disassembly of section .text:
│
│ 000000000003aab0 <random@@GLIBC_2.2.5>:
8.22 │3.92 sub $0x18,%rsp
│3.92 mov $0x1,%esi
│3.92 xor %eax,%eax
│3.92 cmpl $0x0,argp_program_version_hook@@G
│3.92 1(2/1) ↓ je 20
│ lock cmpxchg %esi,__abort_msg@@GLIBC_P
│ ↓ jne 29
│ ↓ jmp 43
│1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+
8.93 │1.10 1(5/1) ↓ je 43
When press 'c' again, the annotate view is switched back:
Percent│ IPC Cycle
│
│
│ Disassembly of section .text:
│
│ 000000000003aab0 <random@@GLIBC_2.2.5>:
8.22 │3.92 sub $0x18,%rsp
│3.92 mov $0x1,%esi
│3.92 xor %eax,%eax
│3.92 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x
│3.92 1 ↓ je 20
│ lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
│ ↓ jne 29
│ ↓ jmp 43
│1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
8.93 │1.10 1 ↓ je 43
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1526569118-14217-3-git-send-email-yao.jin@linux.intel.com
[ Rename all maxmin to minmax ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-05-17 17:58:38 +03:00
else if ( ! show_title )
obj__printf ( obj , " %*s " ,
ANNOTATION__CYCLES_WIDTH , " " ) ;
else
obj__printf ( obj , " %*s " ,
ANNOTATION__CYCLES_WIDTH - 1 ,
" Cycle " ) ;
} else {
if ( al - > cycles ) {
char str [ 32 ] ;
scnprintf ( str , sizeof ( str ) ,
" % " PRIu64 " (% " PRIu64 " /% " PRIu64 " ) " ,
al - > cycles , al - > cycles_min ,
al - > cycles_max ) ;
obj__printf ( obj , " %*s " ,
ANNOTATION__MINMAX_CYCLES_WIDTH - 1 ,
str ) ;
} else if ( ! show_title )
obj__printf ( obj , " %*s " ,
ANNOTATION__MINMAX_CYCLES_WIDTH ,
" " ) ;
else
obj__printf ( obj , " %*s " ,
ANNOTATION__MINMAX_CYCLES_WIDTH - 1 ,
" Cycle(min/max) " ) ;
}
perf annotate: Compute average IPC and IPC coverage per symbol
Add support to 'perf report' annotate view or 'perf annotate --stdio2'
to aggregate the IPC derived from timed LBRs per symbol. We compute the
average IPC and the IPC coverage percentage.
For example:
$ perf annotate --stdio2
Percent IPC Cycle (Average IPC: 2.30, IPC Coverage: 54.8%)
Disassembly of section .text:
000000000003aac0 <random@@GLIBC_2.2.5>:
8.32 3.28 sub $0x18,%rsp
3.28 mov $0x1,%esi
3.28 xor %eax,%eax
3.28 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
11.57 3.28 1 ↓ je 20
lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 29
↓ jmp 43
11.57 1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0
0.00 1.10 1 ↓ je 43
29: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_lock_wait_private
add $0x80,%rsp
0.00 3.00 43: lea __ctype_b@GLIBC_2.2.5+0x38,%rdi
3.00 lea 0xc(%rsp),%rsi
8.49 3.00 1 → callq __random_r
7.91 1.94 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x1e0
0.00 1.94 1 ↓ je 68
lock decl __abort_msg@@GLIBC_PRIVATE+0x8a0
↓ jne 70
↓ jmp 8a
0.00 2.00 68: decl __abort_msg@@GLIBC_PRIVATE+0x8a0
21.56 2.00 1 ↓ je 8a
70: lea __abort_msg@@GLIBC_PRIVATE+0x8a0,%rdi
sub $0x80,%rsp
→ callq __lll_unlock_wake_private
add $0x80,%rsp
21.56 2.90 8a: movslq 0xc(%rsp),%rax
2.90 add $0x18,%rsp
9.03 2.90 1 ← retq
It shows for this symbol the average IPC is 2.30 and the IPC coverage is
54.8%.
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1543586097-27632-2-git-send-email-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-11-30 16:54:54 +03:00
if ( show_title & & ! * al - > line ) {
ipc_coverage_string ( bf , sizeof ( bf ) , notes ) ;
obj__printf ( obj , " %*s " , ANNOTATION__AVG_IPC_WIDTH , bf ) ;
}
2018-03-15 23:54:36 +03:00
}
obj__printf ( obj , " " ) ;
2018-03-16 01:12:39 +03:00
if ( ! * al - > line )
obj__printf ( obj , " %-*s " , width - pcnt_width - cycles_width , " " ) ;
else if ( al - > offset = = - 1 ) {
if ( al - > line_nr & & notes - > options - > show_linenr )
printed = scnprintf ( bf , sizeof ( bf ) , " %-*d " , notes - > widths . addr + 1 , al - > line_nr ) ;
else
printed = scnprintf ( bf , sizeof ( bf ) , " %-*s " , notes - > widths . addr , " " ) ;
obj__printf ( obj , bf ) ;
obj__printf ( obj , " %-*s " , width - printed - pcnt_width - cycles_width + 1 , al - > line ) ;
} else {
u64 addr = al - > offset ;
int color = - 1 ;
if ( ! notes - > options - > use_offset )
addr + = notes - > start ;
if ( ! notes - > options - > use_offset ) {
printed = scnprintf ( bf , sizeof ( bf ) , " % " PRIx64 " : " , addr ) ;
} else {
2018-04-11 16:30:03 +03:00
if ( al - > jump_sources & &
notes - > options - > offset_level > = ANNOTATION__OFFSET_JUMP_TARGETS ) {
2018-03-16 01:12:39 +03:00
if ( notes - > options - > show_nr_jumps ) {
int prev ;
printed = scnprintf ( bf , sizeof ( bf ) , " %*d " ,
notes - > widths . jumps ,
al - > jump_sources ) ;
prev = obj__set_jumps_percent_color ( obj , al - > jump_sources ,
current_entry ) ;
obj__printf ( obj , bf ) ;
obj__set_color ( obj , prev ) ;
}
2018-04-11 16:30:03 +03:00
print_addr :
2018-03-16 01:12:39 +03:00
printed = scnprintf ( bf , sizeof ( bf ) , " %* " PRIx64 " : " ,
notes - > widths . target , addr ) ;
2018-04-11 16:30:03 +03:00
} else if ( ins__is_call ( & disasm_line ( al ) - > ins ) & &
notes - > options - > offset_level > = ANNOTATION__OFFSET_CALL ) {
goto print_addr ;
} else if ( notes - > options - > offset_level = = ANNOTATION__MAX_OFFSET_LEVEL ) {
goto print_addr ;
2018-03-16 01:12:39 +03:00
} else {
printed = scnprintf ( bf , sizeof ( bf ) , " %-*s " ,
notes - > widths . addr , " " ) ;
}
}
if ( change_color )
color = obj__set_color ( obj , HE_COLORSET_ADDR ) ;
obj__printf ( obj , bf ) ;
if ( change_color )
obj__set_color ( obj , color ) ;
disasm_line__write ( disasm_line ( al ) , notes , obj , bf , sizeof ( bf ) , obj__printf , obj__write_graph ) ;
obj__printf ( obj , " %-*s " , width - pcnt_width - cycles_width - 3 - printed , bf ) ;
}
2018-03-15 23:54:36 +03:00
}
2018-03-16 05:14:51 +03:00
void annotation_line__write ( struct annotation_line * al , struct annotation * notes ,
2018-08-04 16:05:15 +03:00
struct annotation_write_ops * wops ,
struct annotation_options * opts )
2018-03-16 05:14:51 +03:00
{
2018-08-04 16:05:15 +03:00
__annotation_line__write ( al , notes , wops - > first_line , wops - > current_entry ,
wops - > change_color , wops - > width , wops - > obj ,
opts - > percent_type ,
wops - > set_color , wops - > set_percent_color ,
wops - > set_jumps_percent_color , wops - > printf ,
wops - > write_graph ) ;
2018-03-16 05:14:51 +03:00
}
2019-11-04 17:10:00 +03:00
int symbol__annotate2 ( struct map_symbol * ms , struct evsel * evsel ,
2018-03-15 22:54:11 +03:00
struct annotation_options * options , struct arch * * parch )
{
2019-11-04 17:10:00 +03:00
struct symbol * sym = ms - > sym ;
2018-03-15 22:54:11 +03:00
struct annotation * notes = symbol__annotation ( sym ) ;
size_t size = symbol__size ( sym ) ;
int nr_pcnt = 1 , err ;
notes - > offsets = zalloc ( size * sizeof ( struct annotation_line * ) ) ;
if ( notes - > offsets = = NULL )
2019-09-30 21:53:33 +03:00
return ENOMEM ;
2018-03-15 22:54:11 +03:00
2020-04-30 16:51:16 +03:00
if ( evsel__is_group_event ( evsel ) )
2019-07-21 14:24:46 +03:00
nr_pcnt = evsel - > core . nr_members ;
2018-03-15 22:54:11 +03:00
2020-02-04 07:52:28 +03:00
err = symbol__annotate ( ms , evsel , options , parch ) ;
2018-03-15 22:54:11 +03:00
if ( err )
goto out_free_offsets ;
notes - > options = options ;
symbol__calc_percent ( sym , evsel ) ;
annotation__set_offsets ( notes , size ) ;
annotation__mark_jump_targets ( notes , sym ) ;
annotation__compute_ipc ( notes , size ) ;
annotation__init_column_widths ( notes , sym ) ;
notes - > nr_events = nr_pcnt ;
annotation__update_column_widths ( notes ) ;
2021-11-12 06:51:23 +03:00
sym - > annotate2 = 1 ;
2018-03-15 22:54:11 +03:00
return 0 ;
out_free_offsets :
zfree ( & notes - > offsets ) ;
2019-09-30 21:44:13 +03:00
return err ;
2018-03-15 22:54:11 +03:00
}
2018-03-16 20:33:38 +03:00
perf annotate: Make perf config effective
perf default config set by user in [annotate] section is totally ignored
by annotate code. Fix it.
Before:
$ ./perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
$ ./perf annotate shash
│ unsigned h = 0;
│ movl $0x0,-0xc(%rbp)
│ while (*s)
│ ↓ jmp 44
│ h = 65599 * h + *s++;
11.33 │24: mov -0xc(%rbp),%eax
43.50 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
After:
│ movl $0x0,-0xc(%rbp)
│ ↓ jmp 44
1 │1 24: mov -0xc(%rbp),%eax
4 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
Note that we have removed show_nr_samples and show_total_period from
annotation_options because they are not used. Instead of them we use
symbol_conf.show_nr_samples and symbol_conf.show_total_period.
Committer testing:
Using 'perf annotate --stdio2' to use the TUI rendering but emitting the output to stdio:
# perf config
#
# perf config annotate.hide_src_code=true
# perf config
annotate.hide_src_code=true
#
# perf config annotate.show_nr_jumps=true
# perf config annotate.show_nr_samples=true
# perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
#
#
Before:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Percent
00000000000609f0 <ObjectInstance::weak_pointer_was_finalized()@@Base>:
endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
100.00 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
After:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1 1b: xor %eax,%eax
pop %rbp
← retq
nop
1 20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=true
# perf config annotate.show_nr_jumps=false
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=false
#
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Changbin Du <changbin.du@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yisheng Xie <xieyisheng1@huawei.com>
Link: http://lore.kernel.org/lkml/20200213064306.160480-6-ravi.bangoria@linux.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-13 09:43:03 +03:00
static int annotation__config ( const char * var , const char * value , void * data )
2018-03-16 20:33:38 +03:00
{
perf annotate: Make perf config effective
perf default config set by user in [annotate] section is totally ignored
by annotate code. Fix it.
Before:
$ ./perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
$ ./perf annotate shash
│ unsigned h = 0;
│ movl $0x0,-0xc(%rbp)
│ while (*s)
│ ↓ jmp 44
│ h = 65599 * h + *s++;
11.33 │24: mov -0xc(%rbp),%eax
43.50 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
After:
│ movl $0x0,-0xc(%rbp)
│ ↓ jmp 44
1 │1 24: mov -0xc(%rbp),%eax
4 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
Note that we have removed show_nr_samples and show_total_period from
annotation_options because they are not used. Instead of them we use
symbol_conf.show_nr_samples and symbol_conf.show_total_period.
Committer testing:
Using 'perf annotate --stdio2' to use the TUI rendering but emitting the output to stdio:
# perf config
#
# perf config annotate.hide_src_code=true
# perf config
annotate.hide_src_code=true
#
# perf config annotate.show_nr_jumps=true
# perf config annotate.show_nr_samples=true
# perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
#
#
Before:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Percent
00000000000609f0 <ObjectInstance::weak_pointer_was_finalized()@@Base>:
endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
100.00 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
After:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1 1b: xor %eax,%eax
pop %rbp
← retq
nop
1 20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=true
# perf config annotate.show_nr_jumps=false
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=false
#
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Changbin Du <changbin.du@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yisheng Xie <xieyisheng1@huawei.com>
Link: http://lore.kernel.org/lkml/20200213064306.160480-6-ravi.bangoria@linux.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-13 09:43:03 +03:00
struct annotation_options * opt = data ;
2018-03-16 20:33:38 +03:00
if ( ! strstarts ( var , " annotate. " ) )
return 0 ;
perf annotate: Make perf config effective
perf default config set by user in [annotate] section is totally ignored
by annotate code. Fix it.
Before:
$ ./perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
$ ./perf annotate shash
│ unsigned h = 0;
│ movl $0x0,-0xc(%rbp)
│ while (*s)
│ ↓ jmp 44
│ h = 65599 * h + *s++;
11.33 │24: mov -0xc(%rbp),%eax
43.50 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
After:
│ movl $0x0,-0xc(%rbp)
│ ↓ jmp 44
1 │1 24: mov -0xc(%rbp),%eax
4 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
Note that we have removed show_nr_samples and show_total_period from
annotation_options because they are not used. Instead of them we use
symbol_conf.show_nr_samples and symbol_conf.show_total_period.
Committer testing:
Using 'perf annotate --stdio2' to use the TUI rendering but emitting the output to stdio:
# perf config
#
# perf config annotate.hide_src_code=true
# perf config
annotate.hide_src_code=true
#
# perf config annotate.show_nr_jumps=true
# perf config annotate.show_nr_samples=true
# perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
#
#
Before:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Percent
00000000000609f0 <ObjectInstance::weak_pointer_was_finalized()@@Base>:
endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
100.00 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
After:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1 1b: xor %eax,%eax
pop %rbp
← retq
nop
1 20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=true
# perf config annotate.show_nr_jumps=false
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=false
#
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Changbin Du <changbin.du@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yisheng Xie <xieyisheng1@huawei.com>
Link: http://lore.kernel.org/lkml/20200213064306.160480-6-ravi.bangoria@linux.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-13 09:43:03 +03:00
if ( ! strcmp ( var , " annotate.offset_level " ) ) {
perf_config_u8 ( & opt - > offset_level , " offset_level " , value ) ;
if ( opt - > offset_level > ANNOTATION__MAX_OFFSET_LEVEL )
opt - > offset_level = ANNOTATION__MAX_OFFSET_LEVEL ;
else if ( opt - > offset_level < ANNOTATION__MIN_OFFSET_LEVEL )
opt - > offset_level = ANNOTATION__MIN_OFFSET_LEVEL ;
} else if ( ! strcmp ( var , " annotate.hide_src_code " ) ) {
opt - > hide_src_code = perf_config_bool ( " hide_src_code " , value ) ;
} else if ( ! strcmp ( var , " annotate.jump_arrows " ) ) {
opt - > jump_arrows = perf_config_bool ( " jump_arrows " , value ) ;
} else if ( ! strcmp ( var , " annotate.show_linenr " ) ) {
opt - > show_linenr = perf_config_bool ( " show_linenr " , value ) ;
} else if ( ! strcmp ( var , " annotate.show_nr_jumps " ) ) {
opt - > show_nr_jumps = perf_config_bool ( " show_nr_jumps " , value ) ;
} else if ( ! strcmp ( var , " annotate.show_nr_samples " ) ) {
symbol_conf . show_nr_samples = perf_config_bool ( " show_nr_samples " ,
value ) ;
} else if ( ! strcmp ( var , " annotate.show_total_period " ) ) {
symbol_conf . show_total_period = perf_config_bool ( " show_total_period " ,
value ) ;
} else if ( ! strcmp ( var , " annotate.use_offset " ) ) {
opt - > use_offset = perf_config_bool ( " use_offset " , value ) ;
perf annotate: Allow configuring the 'disassembler_style' knob via 'perf config'
# perf annotate --stdio2 acpi_processor_ffh_cstate_enter > default
# perf config annotate.disassembler_style=intel
# perf config annotate.disassembler_style
annotate.disassembler_style=intel
# perf annotate --stdio2 acpi_processor_ffh_cstate_enter > intel
# diff -u default intel
--- default 2020-09-04 13:09:26.019205732 -0300
+++ intel 2020-09-04 13:09:52.823795081 -0300
@@ -1,42 +1,42 @@
Samples: 1K of event 'cycles', 4000 Hz, Event count (approx.): 990065316, [percent: local period]
acpi_processor_ffh_cstate_enter() /lib/modules/5.9.0-rc3/build/vmlinux
-Percent → callq __fentry__
- mov cpu_number,%edx
- mov %edx,%edx
- mov cpu_cstate_entry,%rax
- add -0x7dbe9700(,%rdx,8),%rax
- movzbl 0x9(%rdi),%edx
- mov 0x4(%rax,%rdx,8),%edi
- mov (%rax,%rdx,8),%esi
- → jmpq 137ccc6
- 2d: → jmpq 137ccd8
+Percent → call __fentry__
+ mov edx,DWORD PTR gs:[rip+0x7e541d74]
+ mov edx,edx
+ mov rax,QWORD PTR [rip+0x152b8fb]
+ add rax,QWORD PTR [rdx*8-0x7dbe9700]
+ movzx edx,BYTE PTR [rdi+0x9]
+ mov edi,DWORD PTR [rax+rdx*8+0x4]
+ mov esi,DWORD PTR [rax+rdx*8]
+ → jmp 137ccc6
+ 2d: → jmp 137ccd8
mfence
- mov %gs:0x17bc0,%rax
- clflush (%rax)
+ mov rax,QWORD PTR gs:0x17bc0
+ clflush BYTE PTR [rax]
mfence
- xor %edx,%edx
- mov %rdx,%rcx
- mov %gs:0x17bc0,%rax
- 0.00 monitor %rax,%ecx,%edx
- mov (%rax),%rax
- test $0x8,%al
+ xor edx,edx
+ mov rcx,rdx
+ mov rax,QWORD PTR gs:0x17bc0
+ 0.00 monitor
+ mov rax,QWORD PTR [rax]
+ test al,0x8
↓ jne 71
- ↓ jmpq 68
- verw 0x538b08(%rip) # ffffffff82008150 <ds.0>
- 68: mov %rsi,%rax
- mov %rdi,%rcx
-100.00 mwait %eax,%ecx
- 71: mov %gs:0x17bc0,%rax
- lock andb $0xdf,0x2(%rax)
- lock addl $0x0,-0x4(%rsp)
- mov (%rax),%rax
- test $0x8,%al
+ ↓ jmp 68
+ verw WORD PTR [rip+0x538b08] # ffffffff82008150 <ds.0>
+ 68: mov rax,rsi
+ mov rcx,rdi
+100.00 mwait
+ 71: mov rax,QWORD PTR gs:0x17bc0
+ lock and BYTE PTR [rax+0x2],0xdf
+ lock add DWORD PTR [rsp-0x4],0x0
+ mov rax,QWORD PTR [rax]
+ test al,0x8
↓ je 97
- andl $0x7fffffff,__preempt_count
- 97: ← retq
- mov %gs:0x17bc0,%rax
- lock orb $0x20,0x2(%rax)
- mov (%rax),%rax
- test $0x8,%al
+ and DWORD PTR gs:[rip+0x7e548509],0x7fffffff
+ 97: ret
+ mov rax,QWORD PTR gs:0x17bc0
+ lock or BYTE PTR [rax+0x2],0x20
+ mov rax,QWORD PTR [rax]
+ test al,0x8
↑ jne 71
- ↑ jmpq 2d
+ ↑ jmp 2d
#
Requested-by: Matt P. Dziubinski <matdzb@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-09-04 19:10:43 +03:00
} else if ( ! strcmp ( var , " annotate.disassembler_style " ) ) {
opt - > disassembler_style = value ;
2021-02-26 13:01:24 +03:00
} else if ( ! strcmp ( var , " annotate.demangle " ) ) {
symbol_conf . demangle = perf_config_bool ( " demangle " , value ) ;
} else if ( ! strcmp ( var , " annotate.demangle_kernel " ) ) {
symbol_conf . demangle_kernel = perf_config_bool ( " demangle_kernel " , value ) ;
perf annotate: Allow setting the offset level in .perfconfig
The default is 1 (jump_target):
# perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave
Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574
_raw_spin_lock_irqsave() /proc/kcore
0.26 nop
4.61 push %rbx
19.33 pushfq
7.97 pop %rax
0.32 nop
0.06 mov %rax,%rbx
14.63 cli
0.06 nop
xor %eax,%eax
mov $0x1,%edx
49.94 lock cmpxchg %edx,(%rdi)
0.16 test %eax,%eax
↓ jne 2b
2.66 mov %rbx,%rax
pop %rbx
← retq
2b: mov %eax,%esi
→ callq *ffffffffb30eaed0
mov %rbx,%rax
pop %rbx
← retq
#
But one can ask for showing offsets for call instructions by setting
this:
# perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave
Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574
_raw_spin_lock_irqsave() /proc/kcore
0.26 nop
4.61 push %rbx
19.33 pushfq
7.97 pop %rax
0.32 nop
0.06 mov %rax,%rbx
14.63 cli
0.06 nop
xor %eax,%eax
mov $0x1,%edx
49.94 lock cmpxchg %edx,(%rdi)
0.16 test %eax,%eax
↓ jne 2b
2.66 mov %rbx,%rax
pop %rbx
← retq
2b: mov %eax,%esi
2d: → callq *ffffffffb30eaed0
mov %rbx,%rax
pop %rbx
← retq
#
Or using a big value to ask for all offsets to be shown:
# cat ~/.perfconfig
[annotate]
offset_level = 100
hide_src_code = true
# perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave
Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574
_raw_spin_lock_irqsave() /proc/kcore
0.26 0: nop
4.61 5: push %rbx
19.33 6: pushfq
7.97 7: pop %rax
0.32 8: nop
0.06 d: mov %rax,%rbx
14.63 10: cli
0.06 11: nop
17: xor %eax,%eax
19: mov $0x1,%edx
49.94 1e: lock cmpxchg %edx,(%rdi)
0.16 22: test %eax,%eax
24: ↓ jne 2b
2.66 26: mov %rbx,%rax
29: pop %rbx
2a: ← retq
2b: mov %eax,%esi
2d: → callq *ffffffffb30eaed0
32: mov %rbx,%rax
35: pop %rbx
36: ← retq
#
This also affects the TUI, i.e. the default 'perf annotate' and 'perf
top/report' -> A hotkey -> annotate interfaces, when slang-devel is present
in the build, i.e.:
# perf version --build-options | grep slang
libslang: [ on ] # HAVE_SLANG_SUPPORT
#
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Martin Liška <mliska@suse.cz>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Thomas Richter <tmricht@linux.vnet.ibm.com>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-venm6x5zrt40eu8hxdsmqxz6@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-04-12 21:23:02 +03:00
} else {
perf annotate: Make perf config effective
perf default config set by user in [annotate] section is totally ignored
by annotate code. Fix it.
Before:
$ ./perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
$ ./perf annotate shash
│ unsigned h = 0;
│ movl $0x0,-0xc(%rbp)
│ while (*s)
│ ↓ jmp 44
│ h = 65599 * h + *s++;
11.33 │24: mov -0xc(%rbp),%eax
43.50 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
After:
│ movl $0x0,-0xc(%rbp)
│ ↓ jmp 44
1 │1 24: mov -0xc(%rbp),%eax
4 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
Note that we have removed show_nr_samples and show_total_period from
annotation_options because they are not used. Instead of them we use
symbol_conf.show_nr_samples and symbol_conf.show_total_period.
Committer testing:
Using 'perf annotate --stdio2' to use the TUI rendering but emitting the output to stdio:
# perf config
#
# perf config annotate.hide_src_code=true
# perf config
annotate.hide_src_code=true
#
# perf config annotate.show_nr_jumps=true
# perf config annotate.show_nr_samples=true
# perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
#
#
Before:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Percent
00000000000609f0 <ObjectInstance::weak_pointer_was_finalized()@@Base>:
endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
100.00 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
After:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1 1b: xor %eax,%eax
pop %rbp
← retq
nop
1 20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=true
# perf config annotate.show_nr_jumps=false
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=false
#
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Changbin Du <changbin.du@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yisheng Xie <xieyisheng1@huawei.com>
Link: http://lore.kernel.org/lkml/20200213064306.160480-6-ravi.bangoria@linux.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-13 09:43:03 +03:00
pr_debug ( " %s variable unknown, ignoring... " , var ) ;
perf annotate: Allow setting the offset level in .perfconfig
The default is 1 (jump_target):
# perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave
Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574
_raw_spin_lock_irqsave() /proc/kcore
0.26 nop
4.61 push %rbx
19.33 pushfq
7.97 pop %rax
0.32 nop
0.06 mov %rax,%rbx
14.63 cli
0.06 nop
xor %eax,%eax
mov $0x1,%edx
49.94 lock cmpxchg %edx,(%rdi)
0.16 test %eax,%eax
↓ jne 2b
2.66 mov %rbx,%rax
pop %rbx
← retq
2b: mov %eax,%esi
→ callq *ffffffffb30eaed0
mov %rbx,%rax
pop %rbx
← retq
#
But one can ask for showing offsets for call instructions by setting
this:
# perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave
Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574
_raw_spin_lock_irqsave() /proc/kcore
0.26 nop
4.61 push %rbx
19.33 pushfq
7.97 pop %rax
0.32 nop
0.06 mov %rax,%rbx
14.63 cli
0.06 nop
xor %eax,%eax
mov $0x1,%edx
49.94 lock cmpxchg %edx,(%rdi)
0.16 test %eax,%eax
↓ jne 2b
2.66 mov %rbx,%rax
pop %rbx
← retq
2b: mov %eax,%esi
2d: → callq *ffffffffb30eaed0
mov %rbx,%rax
pop %rbx
← retq
#
Or using a big value to ask for all offsets to be shown:
# cat ~/.perfconfig
[annotate]
offset_level = 100
hide_src_code = true
# perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave
Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574
_raw_spin_lock_irqsave() /proc/kcore
0.26 0: nop
4.61 5: push %rbx
19.33 6: pushfq
7.97 7: pop %rax
0.32 8: nop
0.06 d: mov %rax,%rbx
14.63 10: cli
0.06 11: nop
17: xor %eax,%eax
19: mov $0x1,%edx
49.94 1e: lock cmpxchg %edx,(%rdi)
0.16 22: test %eax,%eax
24: ↓ jne 2b
2.66 26: mov %rbx,%rax
29: pop %rbx
2a: ← retq
2b: mov %eax,%esi
2d: → callq *ffffffffb30eaed0
32: mov %rbx,%rax
35: pop %rbx
36: ← retq
#
This also affects the TUI, i.e. the default 'perf annotate' and 'perf
top/report' -> A hotkey -> annotate interfaces, when slang-devel is present
in the build, i.e.:
# perf version --build-options | grep slang
libslang: [ on ] # HAVE_SLANG_SUPPORT
#
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Martin Liška <mliska@suse.cz>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
Cc: Thomas Richter <tmricht@linux.vnet.ibm.com>
Cc: Wang Nan <wangnan0@huawei.com>
Link: https://lkml.kernel.org/n/tip-venm6x5zrt40eu8hxdsmqxz6@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-04-12 21:23:02 +03:00
}
perf annotate: Make perf config effective
perf default config set by user in [annotate] section is totally ignored
by annotate code. Fix it.
Before:
$ ./perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
$ ./perf annotate shash
│ unsigned h = 0;
│ movl $0x0,-0xc(%rbp)
│ while (*s)
│ ↓ jmp 44
│ h = 65599 * h + *s++;
11.33 │24: mov -0xc(%rbp),%eax
43.50 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
After:
│ movl $0x0,-0xc(%rbp)
│ ↓ jmp 44
1 │1 24: mov -0xc(%rbp),%eax
4 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
Note that we have removed show_nr_samples and show_total_period from
annotation_options because they are not used. Instead of them we use
symbol_conf.show_nr_samples and symbol_conf.show_total_period.
Committer testing:
Using 'perf annotate --stdio2' to use the TUI rendering but emitting the output to stdio:
# perf config
#
# perf config annotate.hide_src_code=true
# perf config
annotate.hide_src_code=true
#
# perf config annotate.show_nr_jumps=true
# perf config annotate.show_nr_samples=true
# perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
#
#
Before:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Percent
00000000000609f0 <ObjectInstance::weak_pointer_was_finalized()@@Base>:
endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
100.00 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
After:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1 1b: xor %eax,%eax
pop %rbp
← retq
nop
1 20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=true
# perf config annotate.show_nr_jumps=false
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=false
#
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Changbin Du <changbin.du@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yisheng Xie <xieyisheng1@huawei.com>
Link: http://lore.kernel.org/lkml/20200213064306.160480-6-ravi.bangoria@linux.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-13 09:43:03 +03:00
2018-03-16 20:33:38 +03:00
return 0 ;
}
perf annotate: Make perf config effective
perf default config set by user in [annotate] section is totally ignored
by annotate code. Fix it.
Before:
$ ./perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
$ ./perf annotate shash
│ unsigned h = 0;
│ movl $0x0,-0xc(%rbp)
│ while (*s)
│ ↓ jmp 44
│ h = 65599 * h + *s++;
11.33 │24: mov -0xc(%rbp),%eax
43.50 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
After:
│ movl $0x0,-0xc(%rbp)
│ ↓ jmp 44
1 │1 24: mov -0xc(%rbp),%eax
4 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
Note that we have removed show_nr_samples and show_total_period from
annotation_options because they are not used. Instead of them we use
symbol_conf.show_nr_samples and symbol_conf.show_total_period.
Committer testing:
Using 'perf annotate --stdio2' to use the TUI rendering but emitting the output to stdio:
# perf config
#
# perf config annotate.hide_src_code=true
# perf config
annotate.hide_src_code=true
#
# perf config annotate.show_nr_jumps=true
# perf config annotate.show_nr_samples=true
# perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
#
#
Before:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Percent
00000000000609f0 <ObjectInstance::weak_pointer_was_finalized()@@Base>:
endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
100.00 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
After:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1 1b: xor %eax,%eax
pop %rbp
← retq
nop
1 20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=true
# perf config annotate.show_nr_jumps=false
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=false
#
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Changbin Du <changbin.du@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yisheng Xie <xieyisheng1@huawei.com>
Link: http://lore.kernel.org/lkml/20200213064306.160480-6-ravi.bangoria@linux.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-13 09:43:03 +03:00
void annotation_config__init ( struct annotation_options * opt )
2018-03-16 20:33:38 +03:00
{
perf annotate: Make perf config effective
perf default config set by user in [annotate] section is totally ignored
by annotate code. Fix it.
Before:
$ ./perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
$ ./perf annotate shash
│ unsigned h = 0;
│ movl $0x0,-0xc(%rbp)
│ while (*s)
│ ↓ jmp 44
│ h = 65599 * h + *s++;
11.33 │24: mov -0xc(%rbp),%eax
43.50 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
After:
│ movl $0x0,-0xc(%rbp)
│ ↓ jmp 44
1 │1 24: mov -0xc(%rbp),%eax
4 │ imul $0x1003f,%eax,%ecx
│ mov -0x18(%rbp),%rax
Note that we have removed show_nr_samples and show_total_period from
annotation_options because they are not used. Instead of them we use
symbol_conf.show_nr_samples and symbol_conf.show_total_period.
Committer testing:
Using 'perf annotate --stdio2' to use the TUI rendering but emitting the output to stdio:
# perf config
#
# perf config annotate.hide_src_code=true
# perf config
annotate.hide_src_code=true
#
# perf config annotate.show_nr_jumps=true
# perf config annotate.show_nr_samples=true
# perf config
annotate.hide_src_code=true
annotate.show_nr_jumps=true
annotate.show_nr_samples=true
#
#
Before:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Percent
00000000000609f0 <ObjectInstance::weak_pointer_was_finalized()@@Base>:
endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
100.00 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
After:
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1 1b: xor %eax,%eax
pop %rbp
← retq
nop
1 20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=true
# perf config annotate.show_nr_jumps=false
# perf config annotate.show_nr_jumps
annotate.show_nr_jumps=false
#
# perf annotate --stdio2 ObjectInstance::weak_pointer_was_finalized 2> /dev/null
Samples: 1 of event 'cycles', 4000 Hz, Event count (approx.): 830873, [percent: local period]
ObjectInstance::weak_pointer_was_finalized() /usr/lib64/libgjs.so.0.0.0
Samples endbr64
cmpq $0x0,0x20(%rdi)
↓ je 10
xor %eax,%eax
← retq
xchg %ax,%ax
1 10: push %rbp
cmpq $0x0,0x18(%rdi)
mov %rdi,%rbp
↓ jne 20
1b: xor %eax,%eax
pop %rbp
← retq
nop
20: lea 0x18(%rdi),%rdi
→ callq JS_UpdateWeakPointerAfterGC(JS::Heap<JSObject*
cmpq $0x0,0x18(%rbp)
↑ jne 1b
mov %rbp,%rdi
→ callq ObjectBase::jsobj_addr() const@plt
mov $0x1,%eax
pop %rbp
← retq
#
Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Changbin Du <changbin.du@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jin Yao <yao.jin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Taeung Song <treeze.taeung@gmail.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yisheng Xie <xieyisheng1@huawei.com>
Link: http://lore.kernel.org/lkml/20200213064306.160480-6-ravi.bangoria@linux.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-02-13 09:43:03 +03:00
perf_config ( annotation__config , opt ) ;
2018-03-16 20:33:38 +03:00
}
2018-08-04 16:05:20 +03:00
static unsigned int parse_percent_type ( char * str1 , char * str2 )
{
unsigned int type = ( unsigned int ) - 1 ;
if ( ! strcmp ( " period " , str1 ) ) {
if ( ! strcmp ( " local " , str2 ) )
type = PERCENT_PERIOD_LOCAL ;
else if ( ! strcmp ( " global " , str2 ) )
type = PERCENT_PERIOD_GLOBAL ;
}
if ( ! strcmp ( " hits " , str1 ) ) {
if ( ! strcmp ( " local " , str2 ) )
type = PERCENT_HITS_LOCAL ;
else if ( ! strcmp ( " global " , str2 ) )
type = PERCENT_HITS_GLOBAL ;
}
return type ;
}
int annotate_parse_percent_type ( const struct option * opt , const char * _str ,
int unset __maybe_unused )
{
struct annotation_options * opts = opt - > value ;
unsigned int type ;
char * str1 , * str2 ;
int err = - 1 ;
str1 = strdup ( _str ) ;
if ( ! str1 )
return - ENOMEM ;
str2 = strchr ( str1 , ' - ' ) ;
if ( ! str2 )
goto out ;
* str2 + + = 0 ;
type = parse_percent_type ( str1 , str2 ) ;
if ( type = = ( unsigned int ) - 1 )
type = parse_percent_type ( str2 , str1 ) ;
if ( type ! = ( unsigned int ) - 1 ) {
opts - > percent_type = type ;
err = 0 ;
}
out :
free ( str1 ) ;
return err ;
}
2020-01-08 00:04:44 +03:00
int annotate_check_args ( struct annotation_options * args )
{
if ( args - > prefix_strip & & ! args - > prefix ) {
pr_err ( " --prefix-strip requires --prefix \n " ) ;
return - 1 ;
}
return 0 ;
}