2019-05-29 17:18:02 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2015-04-09 18:53:42 +03:00
/*
* auxtrace . h : AUX area trace support
* Copyright ( c ) 2013 - 2015 , Intel Corporation .
*/
# ifndef __PERF_AUXTRACE_H
# define __PERF_AUXTRACE_H
# include <sys/types.h>
2017-04-18 16:46:11 +03:00
# include <errno.h>
2015-04-09 18:53:42 +03:00
# include <stdbool.h>
2015-04-09 18:53:44 +03:00
# include <stddef.h>
2019-09-18 17:36:13 +03:00
# include <stdio.h> // FILE
2015-04-21 12:21:51 +03:00
# include <linux/list.h>
2015-04-09 18:53:42 +03:00
# include <linux/perf_event.h>
# include <linux/types.h>
2022-01-05 09:13:51 +03:00
# include <internal/cpumap.h>
2018-09-25 17:52:10 +03:00
# include <asm/bitsperlong.h>
2019-08-29 20:59:50 +03:00
# include <asm/barrier.h>
2015-04-09 18:53:42 +03:00
2015-04-09 18:53:44 +03:00
union perf_event ;
struct perf_session ;
2019-07-21 14:23:52 +03:00
struct evlist ;
2020-04-01 13:15:58 +03:00
struct evsel ;
2015-04-09 18:53:44 +03:00
struct perf_tool ;
2019-07-27 21:30:53 +03:00
struct mmap ;
2019-09-18 16:12:07 +03:00
struct perf_sample ;
2015-04-09 18:53:49 +03:00
struct option ;
2015-04-09 18:53:44 +03:00
struct record_opts ;
2019-09-18 17:36:13 +03:00
struct perf_record_auxtrace_error ;
2019-08-28 16:57:16 +03:00
struct perf_record_auxtrace_info ;
2015-04-09 18:53:50 +03:00
struct events_stats ;
2020-02-17 11:23:00 +03:00
struct perf_pmu ;
2015-04-09 18:53:42 +03:00
2019-08-30 17:44:32 +03:00
enum auxtrace_error_type {
PERF_AUXTRACE_ERROR_ITRACE = 1 ,
PERF_AUXTRACE_ERROR_MAX
} ;
2019-02-06 13:39:43 +03:00
/* Auxtrace records must have the same alignment as perf event records */
# define PERF_AUXTRACE_RECORD_ALIGNMENT 8
2015-04-09 18:53:53 +03:00
enum auxtrace_type {
PERF_AUXTRACE_UNKNOWN ,
2015-07-17 19:33:36 +03:00
PERF_AUXTRACE_INTEL_PT ,
perf tools: Add Intel BTS support
Intel BTS support fits within the new auxtrace infrastructure. Recording is
supporting by identifying the Intel BTS PMU, parsing options and setting up
events.
Decoding is supported by queuing up trace data by thread and then decoding
synchronously delivering synthesized event samples into the session processing
for tools to consume.
Committer note:
E.g:
[root@felicio ~]# perf record --per-thread -e intel_bts// ls
anaconda-ks.cfg apctest.output bin kernel-rt-3.10.0-298.rt56.171.el7.x86_64.rpm libexec lock_page.bpf.c perf.data perf.data.old
[ perf record: Woken up 3 times to write data ]
[ perf record: Captured and wrote 4.367 MB perf.data ]
[root@felicio ~]# perf evlist -v
intel_bts//: type: 6, size: 112, { sample_period, sample_freq }: 1, sample_type: IP|TID|IDENTIFIER, read_format: ID, disabled: 1, enable_on_exec: 1, sample_id_all: 1, exclude_guest: 1
dummy:u: type: 1, size: 112, config: 0x9, { sample_period, sample_freq }: 1, sample_type: IP|TID|IDENTIFIER, read_format: ID, disabled: 1, exclude_kernel: 1, exclude_hv: 1, mmap: 1, comm: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, mmap2: 1, comm_exec: 1
[root@felicio ~]# perf script # the navigate in the pager to some interesting place:
ls 1843 1 branches: ffffffff810a60cb flush_signal_handlers ([kernel.kallsyms]) => ffffffff8121a522 setup_new_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8121a529 setup_new_exec ([kernel.kallsyms]) => ffffffff8122fa30 do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122fa5d do_close_on_exec ([kernel.kallsyms]) => ffffffff81767ae0 _raw_spin_lock ([kernel.kallsyms])
ls 1843 1 branches: ffffffff81767af4 _raw_spin_lock ([kernel.kallsyms]) => ffffffff8122fa62 do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122fa8e do_close_on_exec ([kernel.kallsyms]) => ffffffff8122faf0 do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122faf7 do_close_on_exec ([kernel.kallsyms]) => ffffffff8122fa8b do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122fa8e do_close_on_exec ([kernel.kallsyms]) => ffffffff8122faf0 do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122faf7 do_close_on_exec ([kernel.kallsyms]) => ffffffff8122fa8b do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122fa8e do_close_on_exec ([kernel.kallsyms]) => ffffffff8122faf0 do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122faf7 do_close_on_exec ([kernel.kallsyms]) => ffffffff8122fa8b do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122fa8e do_close_on_exec ([kernel.kallsyms]) => ffffffff8122faf0 do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122faf7 do_close_on_exec ([kernel.kallsyms]) => ffffffff8122fa8b do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122fa8e do_close_on_exec ([kernel.kallsyms]) => ffffffff8122faf0 do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122faf7 do_close_on_exec ([kernel.kallsyms]) => ffffffff8122fa8b do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122fa8e do_close_on_exec ([kernel.kallsyms]) => ffffffff8122faf0 do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122faf7 do_close_on_exec ([kernel.kallsyms]) => ffffffff8122fa8b do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122fac9 do_close_on_exec ([kernel.kallsyms]) => ffffffff8122fad2 do_close_on_exec ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8122fadd do_close_on_exec ([kernel.kallsyms]) => ffffffff8120fc80 filp_close ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8120fcaf filp_close ([kernel.kallsyms]) => ffffffff8120fcb6 filp_close ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8120fcc2 filp_close ([kernel.kallsyms]) => ffffffff812547f0 dnotify_flush ([kernel.kallsyms])
ls 1843 1 branches: ffffffff81254823 dnotify_flush ([kernel.kallsyms]) => ffffffff8120fcc7 filp_close ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8120fccd filp_close ([kernel.kallsyms]) => ffffffff81261790 locks_remove_posix ([kernel.kallsyms])
ls 1843 1 branches: ffffffff812617a3 locks_remove_posix ([kernel.kallsyms]) => ffffffff812617b9 locks_remove_posix ([kernel.kallsyms])
ls 1843 1 branches: ffffffff812617b9 locks_remove_posix ([kernel.kallsyms]) => ffffffff8120fcd2 filp_close ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8120fcd5 filp_close ([kernel.kallsyms]) => ffffffff812142c0 fput ([kernel.kallsyms])
ls 1843 1 branches: ffffffff812142d6 fput ([kernel.kallsyms]) => ffffffff812142df fput ([kernel.kallsyms])
ls 1843 1 branches: ffffffff8121430c fput ([kernel.kallsyms]) => ffffffff810b6580 task_work_add ([kernel.kallsyms])
ls 1843 1 branches: ffffffff810b65ad task_work_add ([kernel.kallsyms]) => ffffffff810b65b1 task_work_add ([kernel.kallsyms])
ls 1843 1 branches: ffffffff810b65c1 task_work_add ([kernel.kallsyms]) => ffffffff810bc710 kick_process ([kernel.kallsyms])
ls 1843 1 branches: ffffffff810bc725 kick_process ([kernel.kallsyms]) => ffffffff810bc742 kick_process ([kernel.kallsyms])
ls 1843 1 branches: ffffffff810bc742 kick_process ([kernel.kallsyms]) => ffffffff810b65c6 task_work_add ([kernel.kallsyms])
ls 1843 1 branches: ffffffff810b65c9 task_work_add ([kernel.kallsyms]) => ffffffff81214311 fput ([kernel.kallsyms])
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Link: http://lkml.kernel.org/r/1437150840-31811-9-git-send-email-adrian.hunter@intel.com
[ Merged sample->time fix for bug found after first round of testing on slightly older kernel ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-17 19:33:43 +03:00
PERF_AUXTRACE_INTEL_BTS ,
2016-09-16 18:50:00 +03:00
PERF_AUXTRACE_CS_ETM ,
2018-01-14 22:28:50 +03:00
PERF_AUXTRACE_ARM_SPE ,
perf auxtrace: Support for perf report -D for s390
Add initial support for s390 auxiliary traces using the CPU-Measurement
Sampling Facility.
Support and ignore PERF_REPORT_AUXTRACE_INFO records in the perf data
file. Later patches will show the contents of the auxiliary traces.
Setup the auxtrace queues and data structures for s390. A raw dump of
the perf.data file now does not show an error when an auxtrace event is
encountered.
Output before:
[root@s35lp76 perf]# ./perf report -D -i perf.data.auxtrace
0x128 [0x10]: failed to process type: 70
Error:
failed to process sample
0x128 [0x10]: event: 70
.
. ... raw event: size 16 bytes
. 0000: 00 00 00 46 00 00 00 10 00 00 00 00 00 00 00 00 ...F............
0x128 [0x10]: PERF_RECORD_AUXTRACE_INFO type: 0
[root@s35lp76 perf]#
Output after:
# ./perf report -D -i perf.data.auxtrace |fgrep PERF_RECORD_AUXTRACE
0 0 0x128 [0x10]: PERF_RECORD_AUXTRACE_INFO type: 5
0 0 0x25a66 [0x30]: PERF_RECORD_AUXTRACE size: 0x40000
offset: 0 ref: 0 idx: 4 tid: -1 cpu: 4
....
Additional notes about the underlying hardware and software
implementation, provided by Hendrik Brueckner (see Link: below).
=============================================================================
The CPU-Measurement Facility (CPU-MF) provides a set of functions to obtain
performance information on the mainframe. Basically, it was introduced
with System z10 years ago for the z/Architecture, that means, 64-bit.
For Linux, there are two facilities of interest, counter facility and sampling
facility. The counter facility provides hardware counters for instructions,
cycles, crypto-activities, and many more.
The sampling facility is a hardware sampler that when started will write
samples at a particular interval into a sampling buffer. At some point,
for example, if a sample block is full, it generates an interrupt to collect
samples (while the sampler continues to run).
Few years ago, I started to provide the a perf PMU to use the counter
and sampling facilities. Recently, the device driver was updated to also
"export" the sampling buffer into the AUX area. Thomas now completed the
related perf work to interpret and process these AUX data.
If people are more interested in the sampling facility, they can have a
look into:
- The Load-Program-Parameter and the CPU-Measurement Facilities, SA23-2260-05
http://www-01.ibm.com/support/docview.wss?uid=isg26fcd1cc32246f4c8852574ce0044734a
and to learn how-to use it for Linux on Z, have look at chapter 54,
"Using the CPU-measurement facilities" in the:
- Device Drivers, Features, and Commands, SC33-8411-34
http://public.dhe.ibm.com/software/dw/linux390/docu/l416dd34.pdf
=============================================================================
Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
Reviewed-by: Hendrik Brueckner <brueckner@linux.ibm.com>
Link: http://lkml.kernel.org/r/20180803100758.GA28475@linux.ibm.com
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Link: http://lkml.kernel.org/r/20180802074622.13641-2-tmricht@linux.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-02 10:46:20 +03:00
PERF_AUXTRACE_S390_CPUMSF ,
2015-04-09 18:53:53 +03:00
} ;
2015-04-09 18:53:49 +03:00
enum itrace_period_type {
PERF_ITRACE_PERIOD_INSTRUCTIONS ,
PERF_ITRACE_PERIOD_TICKS ,
PERF_ITRACE_PERIOD_NANOSECS ,
} ;
2020-07-10 18:10:57 +03:00
# define AUXTRACE_ERR_FLG_OVERFLOW (1 << ('o' - 'a'))
# define AUXTRACE_ERR_FLG_DATA_LOST (1 << ('l' - 'a'))
2020-07-10 18:10:59 +03:00
# define AUXTRACE_LOG_FLG_ALL_PERF_EVTS (1 << ('a' - 'a'))
2021-10-27 11:03:33 +03:00
# define AUXTRACE_LOG_FLG_USE_STDOUT (1 << ('o' - 'a'))
2020-07-10 18:10:59 +03:00
2015-04-09 18:53:49 +03:00
/**
* struct itrace_synth_opts - AUX area tracing synthesis options .
* @ set : indicates whether or not options have been set
2018-09-20 21:05:37 +03:00
* @ default_no_sample : Default to no sampling .
2015-04-09 18:53:49 +03:00
* @ inject : indicates the event ( not just the sample ) must be fully synthesized
* because ' perf inject ' will write it out
* @ instructions : whether to synthesize ' instructions ' events
* @ branches : whether to synthesize ' branches ' events
2020-05-30 15:24:41 +03:00
* ( branch misses only for Arm SPE )
2015-04-30 17:37:28 +03:00
* @ transactions : whether to synthesize events for transactions
2017-05-26 11:17:24 +03:00
* @ ptwrites : whether to synthesize events for ptwrites
2017-05-26 11:17:25 +03:00
* @ pwr_events : whether to synthesize power events
2019-08-06 11:46:03 +03:00
* @ other_events : whether to synthesize other events recorded due to the use of
* aux_output
2022-01-24 11:41:48 +03:00
* @ intr_events : whether to synthesize interrupt events
2015-04-09 18:53:49 +03:00
* @ errors : whether to synthesize decoder error events
* @ dont_decode : whether to skip decoding entirely
* @ log : write a decoding log
* @ calls : limit branch samples to calls ( can be combined with @ returns )
* @ returns : limit branch samples to returns ( can be combined with @ calls )
* @ callchain : add callchain to ' instructions ' events
2020-04-01 13:16:05 +03:00
* @ add_callchain : add callchain to existing event records
2016-06-23 16:40:57 +03:00
* @ thread_stack : feed branches to the thread_stack
2015-09-25 16:15:39 +03:00
* @ last_branch : add branch context to ' instruction ' events
2020-04-29 18:07:46 +03:00
* @ add_last_branch : add branch context to existing event records
2021-10-27 11:03:30 +03:00
* @ approx_ipc : approximate IPC
2020-05-30 15:24:41 +03:00
* @ flc : whether to synthesize first level cache events
* @ llc : whether to synthesize last level cache events
* @ tlb : whether to synthesize TLB events
* @ remote_access : whether to synthesize remote access events
2020-11-06 12:48:50 +03:00
* @ mem : whether to synthesize memory events
2021-04-30 10:02:58 +03:00
* @ timeless_decoding : prefer " timeless " decoding i . e . ignore timestamps
2021-04-30 10:03:02 +03:00
* @ vm_time_correlation : perform VM Time Correlation
* @ vm_tm_corr_dry_run : VM Time Correlation dry - run
* @ vm_tm_corr_args : VM Time Correlation implementation - specific arguments
2015-04-09 18:53:49 +03:00
* @ callchain_sz : maximum callchain size
2015-09-25 16:15:39 +03:00
* @ last_branch_sz : branch context size
2015-04-09 18:53:49 +03:00
* @ period : ' instructions ' events period
* @ period_type : ' instructions ' events period type
2016-03-28 20:45:38 +03:00
* @ initial_skip : skip N events at the beginning .
2017-05-26 11:17:38 +03:00
* @ cpu_bitmap : CPUs for which to synthesize events , or NULL for all
2019-06-04 15:59:59 +03:00
* @ ptime_range : time intervals to trace or NULL
* @ range_num : number of time intervals to trace
2020-07-10 18:10:57 +03:00
* @ error_plus_flags : flags to affect what errors are reported
* @ error_minus_flags : flags to affect what errors are reported
2020-07-10 18:10:59 +03:00
* @ log_plus_flags : flags to affect what is logged
* @ log_minus_flags : flags to affect what is logged
2020-07-10 18:11:02 +03:00
* @ quick : quicker ( less detailed ) decoding
2015-04-09 18:53:49 +03:00
*/
struct itrace_synth_opts {
bool set ;
2018-09-20 21:05:37 +03:00
bool default_no_sample ;
2015-04-09 18:53:49 +03:00
bool inject ;
bool instructions ;
bool branches ;
2015-04-30 17:37:28 +03:00
bool transactions ;
2017-05-26 11:17:24 +03:00
bool ptwrites ;
2017-05-26 11:17:25 +03:00
bool pwr_events ;
2019-08-06 11:46:03 +03:00
bool other_events ;
2022-01-24 11:41:48 +03:00
bool intr_events ;
2015-04-09 18:53:49 +03:00
bool errors ;
bool dont_decode ;
bool log ;
bool calls ;
bool returns ;
bool callchain ;
2020-04-01 13:16:05 +03:00
bool add_callchain ;
2016-06-23 16:40:57 +03:00
bool thread_stack ;
2015-09-25 16:15:39 +03:00
bool last_branch ;
2020-04-29 18:07:46 +03:00
bool add_last_branch ;
2021-10-27 11:03:30 +03:00
bool approx_ipc ;
2020-05-30 15:24:41 +03:00
bool flc ;
bool llc ;
bool tlb ;
bool remote_access ;
2020-11-06 12:48:50 +03:00
bool mem ;
2021-04-30 10:02:58 +03:00
bool timeless_decoding ;
2021-04-30 10:03:02 +03:00
bool vm_time_correlation ;
bool vm_tm_corr_dry_run ;
char * vm_tm_corr_args ;
2015-04-09 18:53:49 +03:00
unsigned int callchain_sz ;
2015-09-25 16:15:39 +03:00
unsigned int last_branch_sz ;
2015-04-09 18:53:49 +03:00
unsigned long long period ;
enum itrace_period_type period_type ;
2016-03-28 20:45:38 +03:00
unsigned long initial_skip ;
2017-05-26 11:17:38 +03:00
unsigned long * cpu_bitmap ;
2019-06-04 15:59:59 +03:00
struct perf_time_interval * ptime_range ;
int range_num ;
2020-07-10 18:10:57 +03:00
unsigned int error_plus_flags ;
unsigned int error_minus_flags ;
2020-07-10 18:10:59 +03:00
unsigned int log_plus_flags ;
unsigned int log_minus_flags ;
2020-07-10 18:11:02 +03:00
unsigned int quick ;
2015-04-09 18:53:49 +03:00
} ;
2015-04-30 17:37:25 +03:00
/**
* struct auxtrace_index_entry - indexes a AUX area tracing event within a
* perf . data file .
* @ file_offset : offset within the perf . data file
* @ sz : size of the event
*/
struct auxtrace_index_entry {
u64 file_offset ;
u64 sz ;
} ;
# define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
/**
* struct auxtrace_index - index of AUX area tracing events within a perf . data
* file .
* @ list : linking a number of arrays of entries
* @ nr : number of entries
* @ entries : array of entries
*/
struct auxtrace_index {
struct list_head list ;
size_t nr ;
struct auxtrace_index_entry entries [ PERF_AUXTRACE_INDEX_ENTRY_COUNT ] ;
} ;
2015-04-09 18:53:48 +03:00
/**
* struct auxtrace - session callbacks to allow AUX area data decoding .
* @ process_event : lets the decoder see all session events
2018-03-06 12:13:14 +03:00
* @ process_auxtrace_event : process a PERF_RECORD_AUXTRACE event
2019-11-15 15:42:21 +03:00
* @ queue_data : queue an AUX sample or PERF_RECORD_AUXTRACE event for later
* processing
2019-11-15 15:42:19 +03:00
* @ dump_auxtrace_sample : dump AUX area sample data
2015-04-09 18:53:48 +03:00
* @ flush_events : process any remaining data
* @ free_events : free resources associated with event processing
* @ free : free resources associated with the session
*/
struct auxtrace {
int ( * process_event ) ( struct perf_session * session ,
union perf_event * event ,
struct perf_sample * sample ,
struct perf_tool * tool ) ;
2015-04-09 18:53:53 +03:00
int ( * process_auxtrace_event ) ( struct perf_session * session ,
union perf_event * event ,
struct perf_tool * tool ) ;
2019-11-15 15:42:21 +03:00
int ( * queue_data ) ( struct perf_session * session ,
struct perf_sample * sample , union perf_event * event ,
u64 data_offset ) ;
2019-11-15 15:42:19 +03:00
void ( * dump_auxtrace_sample ) ( struct perf_session * session ,
struct perf_sample * sample ) ;
2015-04-09 18:53:48 +03:00
int ( * flush_events ) ( struct perf_session * session ,
struct perf_tool * tool ) ;
void ( * free_events ) ( struct perf_session * session ) ;
void ( * free ) ( struct perf_session * session ) ;
2020-04-01 13:15:58 +03:00
bool ( * evsel_is_auxtrace ) ( struct perf_session * session ,
struct evsel * evsel ) ;
2015-04-09 18:53:48 +03:00
} ;
2015-04-21 12:21:51 +03:00
/**
* struct auxtrace_buffer - a buffer containing AUX area tracing data .
* @ list : buffers are queued in a list held by struct auxtrace_queue
* @ size : size of the buffer in bytes
* @ pid : in per - thread mode , the pid this buffer is associated with
* @ tid : in per - thread mode , the tid this buffer is associated with
* @ cpu : in per - cpu mode , the cpu this buffer is associated with
* @ data : actual buffer data ( can be null if the data has not been loaded )
* @ data_offset : file offset at which the buffer can be read
* @ mmap_addr : mmap address at which the buffer can be read
* @ mmap_size : size of the mmap at @ mmap_addr
* @ data_needs_freeing : @ data was malloc ' d so free it when it is no longer
* needed
* @ consecutive : the original data was split up and this buffer is consecutive
* to the previous buffer
* @ offset : offset as determined by aux_head / aux_tail members of struct
* perf_event_mmap_page
* @ reference : an implementation - specific reference determined when the data is
* recorded
* @ buffer_nr : used to number each buffer
* @ use_size : implementation actually only uses this number of bytes
* @ use_data : implementation actually only uses data starting at this address
*/
struct auxtrace_buffer {
struct list_head list ;
size_t size ;
pid_t pid ;
pid_t tid ;
2022-01-05 09:13:51 +03:00
struct perf_cpu cpu ;
2015-04-21 12:21:51 +03:00
void * data ;
off_t data_offset ;
void * mmap_addr ;
size_t mmap_size ;
bool data_needs_freeing ;
bool consecutive ;
u64 offset ;
u64 reference ;
u64 buffer_nr ;
size_t use_size ;
void * use_data ;
} ;
/**
* struct auxtrace_queue - a queue of AUX area tracing data buffers .
* @ head : head of buffer list
* @ tid : in per - thread mode , the tid this queue is associated with
* @ cpu : in per - cpu mode , the cpu this queue is associated with
* @ set : % true once this queue has been dedicated to a specific thread or cpu
* @ priv : implementation - specific data
*/
struct auxtrace_queue {
struct list_head head ;
pid_t tid ;
int cpu ;
bool set ;
void * priv ;
} ;
/**
* struct auxtrace_queues - an array of AUX area tracing queues .
* @ queue_array : array of queues
* @ nr_queues : number of queues
* @ new_data : set whenever new data is queued
* @ populated : queues have been fully populated using the auxtrace_index
* @ next_buffer_nr : used to number each buffer
*/
struct auxtrace_queues {
struct auxtrace_queue * queue_array ;
unsigned int nr_queues ;
bool new_data ;
bool populated ;
u64 next_buffer_nr ;
} ;
2015-04-09 18:53:52 +03:00
/**
* struct auxtrace_heap_item - element of struct auxtrace_heap .
* @ queue_nr : queue number
* @ ordinal : value used for sorting ( lowest ordinal is top of the heap ) expected
* to be a timestamp
*/
struct auxtrace_heap_item {
unsigned int queue_nr ;
u64 ordinal ;
} ;
/**
* struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues .
* @ heap_array : the heap
* @ heap_cnt : the number of elements in the heap
* @ heap_sz : maximum number of elements ( grows as needed )
*/
struct auxtrace_heap {
struct auxtrace_heap_item * heap_array ;
unsigned int heap_cnt ;
unsigned int heap_sz ;
} ;
2015-04-09 18:53:42 +03:00
/**
* struct auxtrace_mmap - records an mmap of the auxtrace buffer .
* @ base : address of mapped area
* @ userpg : pointer to buffer ' s perf_event_mmap_page
* @ mask : % 0 if @ len is not a power of two , otherwise ( @ len - % 1 )
* @ len : size of mapped area
* @ prev : previous aux_head
* @ idx : index of this mmap
* @ tid : tid for a per - thread mmap ( also set if there is only 1 tid on a per - cpu
* mmap ) otherwise % 0
* @ cpu : cpu number for a per - cpu mmap otherwise % - 1
*/
struct auxtrace_mmap {
void * base ;
void * userpg ;
size_t mask ;
size_t len ;
u64 prev ;
int idx ;
pid_t tid ;
int cpu ;
} ;
/**
* struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap .
* @ mask : % 0 if @ len is not a power of two , otherwise ( @ len - % 1 )
* @ offset : file offset of mapped area
* @ len : size of mapped area
* @ prot : mmap memory protection
* @ idx : index of this mmap
* @ tid : tid for a per - thread mmap ( also set if there is only 1 tid on a per - cpu
* mmap ) otherwise % 0
* @ cpu : cpu number for a per - cpu mmap otherwise % - 1
*/
struct auxtrace_mmap_params {
size_t mask ;
off_t offset ;
size_t len ;
int prot ;
int idx ;
pid_t tid ;
2022-01-05 09:13:51 +03:00
struct perf_cpu cpu ;
2015-04-09 18:53:42 +03:00
} ;
2015-04-09 18:53:44 +03:00
/**
* struct auxtrace_record - callbacks for recording AUX area data .
* @ recording_options : validate and process recording options
* @ info_priv_size : return the size of the private data in auxtrace_info_event
* @ info_fill : fill - in the private data in auxtrace_info_event
* @ free : free this auxtrace record structure
2015-04-30 17:37:31 +03:00
* @ snapshot_start : starting a snapshot
* @ snapshot_finish : finishing a snapshot
* @ find_snapshot : find data to snapshot within auxtrace mmap
* @ parse_snapshot_options : parse snapshot options
2015-04-09 18:53:44 +03:00
* @ reference : provide a 64 - bit reference number for auxtrace_event
* @ read_finish : called after reading from an auxtrace mmap
2018-03-06 12:13:14 +03:00
* @ alignment : alignment ( if any ) for AUX area data
2019-11-15 15:42:15 +03:00
* @ default_aux_sample_size : default sample size for - - aux sample option
2020-02-17 11:23:00 +03:00
* @ pmu : associated pmu
* @ evlist : selected events list
2015-04-09 18:53:44 +03:00
*/
struct auxtrace_record {
int ( * recording_options ) ( struct auxtrace_record * itr ,
2019-07-21 14:23:52 +03:00
struct evlist * evlist ,
2015-04-09 18:53:44 +03:00
struct record_opts * opts ) ;
2016-01-15 00:46:15 +03:00
size_t ( * info_priv_size ) ( struct auxtrace_record * itr ,
2019-07-21 14:23:52 +03:00
struct evlist * evlist ) ;
2015-04-09 18:53:44 +03:00
int ( * info_fill ) ( struct auxtrace_record * itr ,
struct perf_session * session ,
2019-08-28 16:57:16 +03:00
struct perf_record_auxtrace_info * auxtrace_info ,
2015-04-09 18:53:44 +03:00
size_t priv_size ) ;
void ( * free ) ( struct auxtrace_record * itr ) ;
2015-04-30 17:37:31 +03:00
int ( * snapshot_start ) ( struct auxtrace_record * itr ) ;
int ( * snapshot_finish ) ( struct auxtrace_record * itr ) ;
int ( * find_snapshot ) ( struct auxtrace_record * itr , int idx ,
struct auxtrace_mmap * mm , unsigned char * data ,
u64 * head , u64 * old ) ;
int ( * parse_snapshot_options ) ( struct auxtrace_record * itr ,
struct record_opts * opts ,
const char * str ) ;
2015-04-09 18:53:44 +03:00
u64 ( * reference ) ( struct auxtrace_record * itr ) ;
int ( * read_finish ) ( struct auxtrace_record * itr , int idx ) ;
2015-05-29 16:33:38 +03:00
unsigned int alignment ;
2019-11-15 15:42:15 +03:00
unsigned int default_aux_sample_size ;
2020-02-17 11:23:00 +03:00
struct perf_pmu * pmu ;
struct evlist * evlist ;
2015-04-09 18:53:44 +03:00
} ;
2016-09-23 17:38:39 +03:00
/**
* struct addr_filter - address filter .
* @ list : list node
* @ range : true if it is a range filter
* @ start : true if action is ' filter ' or ' start '
* @ action : ' filter ' , ' start ' or ' stop ' ( ' tracestop ' is accepted but converted
* to ' stop ' )
* @ sym_from : symbol name for the filter address
* @ sym_to : symbol name that determines the filter size
* @ sym_from_idx : selects n ' th from symbols with the same name ( 0 means global
* and less than 0 means symbol must be unique )
* @ sym_to_idx : same as @ sym_from_idx but for @ sym_to
* @ addr : filter address
* @ size : filter region size ( for range filters )
* @ filename : DSO file name or NULL for the kernel
* @ str : allocated string that contains the other string members
*/
struct addr_filter {
struct list_head list ;
bool range ;
bool start ;
const char * action ;
const char * sym_from ;
const char * sym_to ;
int sym_from_idx ;
int sym_to_idx ;
u64 addr ;
u64 size ;
const char * filename ;
char * str ;
} ;
/**
* struct addr_filters - list of address filters .
* @ head : list of address filters
* @ cnt : number of address filters
*/
struct addr_filters {
struct list_head head ;
int cnt ;
} ;
2019-08-30 20:45:20 +03:00
struct auxtrace_cache ;
2015-04-30 17:37:27 +03:00
# ifdef HAVE_AUXTRACE_SUPPORT
perf auxtrace: Add compat_auxtrace_mmap__{read_head|write_tail}
When perf runs in compat mode (kernel in 64-bit mode and the perf is in
32-bit mode), the 64-bit value atomicity in the user space cannot be
assured, E.g. on some architectures, the 64-bit value accessing is split
into two instructions, one is for the low 32-bit word accessing and
another is for the high 32-bit word.
This patch introduces weak functions compat_auxtrace_mmap__read_head()
and compat_auxtrace_mmap__write_tail(), as their naming indicates, when
perf tool works in compat mode, it uses these two functions to access
the AUX head and tail. These two functions can allow the perf tool to
work properly in certain conditions, e.g. when perf tool works in
snapshot mode with only using AUX head pointer, or perf tool uses the
AUX buffer and the incremented tail is not bigger than 4GB.
When perf tool cannot handle the case when the AUX tail is bigger than
4GB, the function compat_auxtrace_mmap__write_tail() returns -1 and
tells the caller to bail out for the error.
These two functions are declared as weak attribute, this allows to
implement arch specific functions if any arch can support the 64-bit
value atomicity in compat mode.
Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Russell King (oracle)" <linux@armlinux.org.uk>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lore.kernel.org/lkml/20210829102238.19693-2-leo.yan@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-08-29 13:22:37 +03:00
u64 compat_auxtrace_mmap__read_head ( struct auxtrace_mmap * mm ) ;
int compat_auxtrace_mmap__write_tail ( struct auxtrace_mmap * mm , u64 tail ) ;
static inline u64 auxtrace_mmap__read_head ( struct auxtrace_mmap * mm ,
int kernel_is_64_bit __maybe_unused )
2015-04-09 18:53:42 +03:00
{
struct perf_event_mmap_page * pc = mm - > userpg ;
perf auxtrace: Add compat_auxtrace_mmap__{read_head|write_tail}
When perf runs in compat mode (kernel in 64-bit mode and the perf is in
32-bit mode), the 64-bit value atomicity in the user space cannot be
assured, E.g. on some architectures, the 64-bit value accessing is split
into two instructions, one is for the low 32-bit word accessing and
another is for the high 32-bit word.
This patch introduces weak functions compat_auxtrace_mmap__read_head()
and compat_auxtrace_mmap__write_tail(), as their naming indicates, when
perf tool works in compat mode, it uses these two functions to access
the AUX head and tail. These two functions can allow the perf tool to
work properly in certain conditions, e.g. when perf tool works in
snapshot mode with only using AUX head pointer, or perf tool uses the
AUX buffer and the incremented tail is not bigger than 4GB.
When perf tool cannot handle the case when the AUX tail is bigger than
4GB, the function compat_auxtrace_mmap__write_tail() returns -1 and
tells the caller to bail out for the error.
These two functions are declared as weak attribute, this allows to
implement arch specific functions if any arch can support the 64-bit
value atomicity in compat mode.
Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Russell King (oracle)" <linux@armlinux.org.uk>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lore.kernel.org/lkml/20210829102238.19693-2-leo.yan@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-08-29 13:22:37 +03:00
u64 head ;
# if BITS_PER_LONG == 32
if ( kernel_is_64_bit )
return compat_auxtrace_mmap__read_head ( mm ) ;
# endif
head = READ_ONCE ( pc - > aux_head ) ;
2015-04-09 18:53:42 +03:00
/* Ensure all reads are done after we read the head */
2021-06-02 13:30:04 +03:00
smp_rmb ( ) ;
2015-04-09 18:53:42 +03:00
return head ;
}
perf auxtrace: Add compat_auxtrace_mmap__{read_head|write_tail}
When perf runs in compat mode (kernel in 64-bit mode and the perf is in
32-bit mode), the 64-bit value atomicity in the user space cannot be
assured, E.g. on some architectures, the 64-bit value accessing is split
into two instructions, one is for the low 32-bit word accessing and
another is for the high 32-bit word.
This patch introduces weak functions compat_auxtrace_mmap__read_head()
and compat_auxtrace_mmap__write_tail(), as their naming indicates, when
perf tool works in compat mode, it uses these two functions to access
the AUX head and tail. These two functions can allow the perf tool to
work properly in certain conditions, e.g. when perf tool works in
snapshot mode with only using AUX head pointer, or perf tool uses the
AUX buffer and the incremented tail is not bigger than 4GB.
When perf tool cannot handle the case when the AUX tail is bigger than
4GB, the function compat_auxtrace_mmap__write_tail() returns -1 and
tells the caller to bail out for the error.
These two functions are declared as weak attribute, this allows to
implement arch specific functions if any arch can support the 64-bit
value atomicity in compat mode.
Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Russell King (oracle)" <linux@armlinux.org.uk>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lore.kernel.org/lkml/20210829102238.19693-2-leo.yan@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-08-29 13:22:37 +03:00
static inline int auxtrace_mmap__write_tail ( struct auxtrace_mmap * mm , u64 tail ,
int kernel_is_64_bit __maybe_unused )
2015-04-09 18:53:42 +03:00
{
struct perf_event_mmap_page * pc = mm - > userpg ;
perf auxtrace: Add compat_auxtrace_mmap__{read_head|write_tail}
When perf runs in compat mode (kernel in 64-bit mode and the perf is in
32-bit mode), the 64-bit value atomicity in the user space cannot be
assured, E.g. on some architectures, the 64-bit value accessing is split
into two instructions, one is for the low 32-bit word accessing and
another is for the high 32-bit word.
This patch introduces weak functions compat_auxtrace_mmap__read_head()
and compat_auxtrace_mmap__write_tail(), as their naming indicates, when
perf tool works in compat mode, it uses these two functions to access
the AUX head and tail. These two functions can allow the perf tool to
work properly in certain conditions, e.g. when perf tool works in
snapshot mode with only using AUX head pointer, or perf tool uses the
AUX buffer and the incremented tail is not bigger than 4GB.
When perf tool cannot handle the case when the AUX tail is bigger than
4GB, the function compat_auxtrace_mmap__write_tail() returns -1 and
tells the caller to bail out for the error.
These two functions are declared as weak attribute, this allows to
implement arch specific functions if any arch can support the 64-bit
value atomicity in compat mode.
Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Russell King (oracle)" <linux@armlinux.org.uk>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lore.kernel.org/lkml/20210829102238.19693-2-leo.yan@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-08-29 13:22:37 +03:00
# if BITS_PER_LONG == 32
if ( kernel_is_64_bit )
return compat_auxtrace_mmap__write_tail ( mm , tail ) ;
# endif
2015-04-09 18:53:42 +03:00
/* Ensure all reads are done before we write the tail out */
2021-06-02 13:30:04 +03:00
smp_mb ( ) ;
2021-08-09 14:14:03 +03:00
WRITE_ONCE ( pc - > aux_tail , tail ) ;
perf auxtrace: Add compat_auxtrace_mmap__{read_head|write_tail}
When perf runs in compat mode (kernel in 64-bit mode and the perf is in
32-bit mode), the 64-bit value atomicity in the user space cannot be
assured, E.g. on some architectures, the 64-bit value accessing is split
into two instructions, one is for the low 32-bit word accessing and
another is for the high 32-bit word.
This patch introduces weak functions compat_auxtrace_mmap__read_head()
and compat_auxtrace_mmap__write_tail(), as their naming indicates, when
perf tool works in compat mode, it uses these two functions to access
the AUX head and tail. These two functions can allow the perf tool to
work properly in certain conditions, e.g. when perf tool works in
snapshot mode with only using AUX head pointer, or perf tool uses the
AUX buffer and the incremented tail is not bigger than 4GB.
When perf tool cannot handle the case when the AUX tail is bigger than
4GB, the function compat_auxtrace_mmap__write_tail() returns -1 and
tells the caller to bail out for the error.
These two functions are declared as weak attribute, this allows to
implement arch specific functions if any arch can support the 64-bit
value atomicity in compat mode.
Suggested-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Leo Yan <leo.yan@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Garry <john.garry@huawei.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Russell King (oracle)" <linux@armlinux.org.uk>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lore.kernel.org/lkml/20210829102238.19693-2-leo.yan@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2021-08-29 13:22:37 +03:00
return 0 ;
2015-04-09 18:53:42 +03:00
}
int auxtrace_mmap__mmap ( struct auxtrace_mmap * mm ,
struct auxtrace_mmap_params * mp ,
void * userpg , int fd ) ;
void auxtrace_mmap__munmap ( struct auxtrace_mmap * mm ) ;
void auxtrace_mmap_params__init ( struct auxtrace_mmap_params * mp ,
off_t auxtrace_offset ,
unsigned int auxtrace_pages ,
bool auxtrace_overwrite ) ;
void auxtrace_mmap_params__set_idx ( struct auxtrace_mmap_params * mp ,
2019-07-21 14:23:52 +03:00
struct evlist * evlist , int idx ,
2015-04-09 18:53:42 +03:00
bool per_cpu ) ;
2015-04-09 18:53:44 +03:00
typedef int ( * process_auxtrace_t ) ( struct perf_tool * tool ,
2019-07-27 21:30:53 +03:00
struct mmap * map ,
2015-04-09 18:53:44 +03:00
union perf_event * event , void * data1 ,
size_t len1 , void * data2 , size_t len2 ) ;
2019-07-27 21:30:53 +03:00
int auxtrace_mmap__read ( struct mmap * map , struct auxtrace_record * itr ,
2015-04-09 18:53:44 +03:00
struct perf_tool * tool , process_auxtrace_t fn ) ;
2019-07-27 21:30:53 +03:00
int auxtrace_mmap__read_snapshot ( struct mmap * map ,
2015-04-30 17:37:31 +03:00
struct auxtrace_record * itr ,
struct perf_tool * tool , process_auxtrace_t fn ,
size_t snapshot_size ) ;
2015-04-21 12:21:51 +03:00
int auxtrace_queues__init ( struct auxtrace_queues * queues ) ;
int auxtrace_queues__add_event ( struct auxtrace_queues * queues ,
struct perf_session * session ,
union perf_event * event , off_t data_offset ,
struct auxtrace_buffer * * buffer_ptr ) ;
2019-11-15 15:42:21 +03:00
struct auxtrace_queue *
auxtrace_queues__sample_queue ( struct auxtrace_queues * queues ,
struct perf_sample * sample ,
struct perf_session * session ) ;
int auxtrace_queues__add_sample ( struct auxtrace_queues * queues ,
struct perf_session * session ,
struct perf_sample * sample , u64 data_offset ,
u64 reference ) ;
2015-04-21 12:21:51 +03:00
void auxtrace_queues__free ( struct auxtrace_queues * queues ) ;
2015-04-30 17:37:25 +03:00
int auxtrace_queues__process_index ( struct auxtrace_queues * queues ,
struct perf_session * session ) ;
2019-11-15 15:42:21 +03:00
int auxtrace_queue_data ( struct perf_session * session , bool samples ,
bool events ) ;
2015-04-21 12:21:51 +03:00
struct auxtrace_buffer * auxtrace_buffer__next ( struct auxtrace_queue * queue ,
struct auxtrace_buffer * buffer ) ;
2021-04-30 10:03:03 +03:00
void * auxtrace_buffer__get_data_rw ( struct auxtrace_buffer * buffer , int fd , bool rw ) ;
static inline void * auxtrace_buffer__get_data ( struct auxtrace_buffer * buffer , int fd )
{
return auxtrace_buffer__get_data_rw ( buffer , fd , false ) ;
}
2015-04-21 12:21:51 +03:00
void auxtrace_buffer__put_data ( struct auxtrace_buffer * buffer ) ;
void auxtrace_buffer__drop_data ( struct auxtrace_buffer * buffer ) ;
void auxtrace_buffer__free ( struct auxtrace_buffer * buffer ) ;
2015-04-09 18:53:52 +03:00
int auxtrace_heap__add ( struct auxtrace_heap * heap , unsigned int queue_nr ,
u64 ordinal ) ;
void auxtrace_heap__pop ( struct auxtrace_heap * heap ) ;
void auxtrace_heap__free ( struct auxtrace_heap * heap ) ;
2015-04-09 18:53:54 +03:00
struct auxtrace_cache_entry {
struct hlist_node hash ;
u32 key ;
} ;
struct auxtrace_cache * auxtrace_cache__new ( unsigned int bits , size_t entry_size ,
unsigned int limit_percent ) ;
void auxtrace_cache__free ( struct auxtrace_cache * auxtrace_cache ) ;
void * auxtrace_cache__alloc_entry ( struct auxtrace_cache * c ) ;
void auxtrace_cache__free_entry ( struct auxtrace_cache * c , void * entry ) ;
int auxtrace_cache__add ( struct auxtrace_cache * c , u32 key ,
struct auxtrace_cache_entry * entry ) ;
2019-10-25 15:59:59 +03:00
void auxtrace_cache__remove ( struct auxtrace_cache * c , u32 key ) ;
2015-04-09 18:53:54 +03:00
void * auxtrace_cache__lookup ( struct auxtrace_cache * c , u32 key ) ;
2019-07-21 14:23:52 +03:00
struct auxtrace_record * auxtrace_record__init ( struct evlist * evlist ,
2015-04-09 18:53:44 +03:00
int * err ) ;
2015-04-30 17:37:31 +03:00
int auxtrace_parse_snapshot_options ( struct auxtrace_record * itr ,
struct record_opts * opts ,
const char * str ) ;
2019-11-15 15:42:15 +03:00
int auxtrace_parse_sample_options ( struct auxtrace_record * itr ,
struct evlist * evlist ,
struct record_opts * opts , const char * str ) ;
2021-01-21 17:04:18 +03:00
void auxtrace_regroup_aux_output ( struct evlist * evlist ) ;
2015-04-09 18:53:44 +03:00
int auxtrace_record__options ( struct auxtrace_record * itr ,
2019-07-21 14:23:52 +03:00
struct evlist * evlist ,
2015-04-09 18:53:44 +03:00
struct record_opts * opts ) ;
2016-01-15 00:46:15 +03:00
size_t auxtrace_record__info_priv_size ( struct auxtrace_record * itr ,
2019-07-21 14:23:52 +03:00
struct evlist * evlist ) ;
2015-04-09 18:53:44 +03:00
int auxtrace_record__info_fill ( struct auxtrace_record * itr ,
struct perf_session * session ,
2019-08-28 16:57:16 +03:00
struct perf_record_auxtrace_info * auxtrace_info ,
2015-04-09 18:53:44 +03:00
size_t priv_size ) ;
void auxtrace_record__free ( struct auxtrace_record * itr ) ;
2015-04-30 17:37:31 +03:00
int auxtrace_record__snapshot_start ( struct auxtrace_record * itr ) ;
2019-08-06 17:41:01 +03:00
int auxtrace_record__snapshot_finish ( struct auxtrace_record * itr , bool on_exit ) ;
2015-04-30 17:37:31 +03:00
int auxtrace_record__find_snapshot ( struct auxtrace_record * itr , int idx ,
struct auxtrace_mmap * mm ,
unsigned char * data , u64 * head , u64 * old ) ;
2015-04-09 18:53:44 +03:00
u64 auxtrace_record__reference ( struct auxtrace_record * itr ) ;
2020-02-17 11:23:00 +03:00
int auxtrace_record__read_finish ( struct auxtrace_record * itr , int idx ) ;
2015-04-09 18:53:44 +03:00
2015-04-30 17:37:25 +03:00
int auxtrace_index__auxtrace_event ( struct list_head * head , union perf_event * event ,
off_t file_offset ) ;
int auxtrace_index__write ( int fd , struct list_head * head ) ;
int auxtrace_index__process ( int fd , u64 size , struct perf_session * session ,
bool needs_swap ) ;
void auxtrace_index__free ( struct list_head * head ) ;
2019-08-28 16:57:16 +03:00
void auxtrace_synth_error ( struct perf_record_auxtrace_error * auxtrace_error , int type ,
2015-04-09 18:53:50 +03:00
int code , int cpu , pid_t pid , pid_t tid , u64 ip ,
2019-02-06 13:39:47 +03:00
const char * msg , u64 timestamp ) ;
2015-04-09 18:53:50 +03:00
2018-09-13 15:54:03 +03:00
int perf_event__process_auxtrace_info ( struct perf_session * session ,
union perf_event * event ) ;
2018-09-13 15:54:04 +03:00
s64 perf_event__process_auxtrace ( struct perf_session * session ,
union perf_event * event ) ;
2018-09-13 15:54:03 +03:00
int perf_event__process_auxtrace_error ( struct perf_session * session ,
union perf_event * event ) ;
2021-05-30 22:23:03 +03:00
int itrace_do_parse_synth_opts ( struct itrace_synth_opts * synth_opts ,
const char * str , int unset ) ;
2015-04-09 18:53:49 +03:00
int itrace_parse_synth_opts ( const struct option * opt , const char * str ,
int unset ) ;
2018-09-20 21:05:37 +03:00
void itrace_synth_opts__set_default ( struct itrace_synth_opts * synth_opts ,
bool no_sample ) ;
2015-04-09 18:53:44 +03:00
2015-04-09 18:53:50 +03:00
size_t perf_event__fprintf_auxtrace_error ( union perf_event * event , FILE * fp ) ;
void perf_session__auxtrace_error_inc ( struct perf_session * session ,
union perf_event * event ) ;
void events_stats__auxtrace_error_warn ( const struct events_stats * stats ) ;
2016-09-23 17:38:39 +03:00
void addr_filters__init ( struct addr_filters * filts ) ;
void addr_filters__exit ( struct addr_filters * filts ) ;
int addr_filters__parse_bare_filter ( struct addr_filters * filts ,
const char * filter ) ;
2019-07-21 14:23:52 +03:00
int auxtrace_parse_filters ( struct evlist * evlist ) ;
2016-09-23 17:38:39 +03:00
2019-08-30 20:45:20 +03:00
int auxtrace__process_event ( struct perf_session * session , union perf_event * event ,
struct perf_sample * sample , struct perf_tool * tool ) ;
2019-11-15 15:42:19 +03:00
void auxtrace__dump_auxtrace_sample ( struct perf_session * session ,
struct perf_sample * sample ) ;
2019-08-30 20:45:20 +03:00
int auxtrace__flush_events ( struct perf_session * session , struct perf_tool * tool ) ;
void auxtrace__free_events ( struct perf_session * session ) ;
void auxtrace__free ( struct perf_session * session ) ;
2020-04-01 13:15:58 +03:00
bool auxtrace__evsel_is_auxtrace ( struct perf_session * session ,
struct evsel * evsel ) ;
2015-04-09 18:53:48 +03:00
2018-09-14 06:10:31 +03:00
# define ITRACE_HELP \
2020-07-10 18:10:56 +03:00
" i[period]: synthesize instructions events \n " \
2020-05-30 15:24:41 +03:00
" b: synthesize branches events (branch misses for Arm SPE) \n " \
2018-09-14 06:10:31 +03:00
" c: synthesize branches events (calls only) \n " \
" r: synthesize branches events (returns only) \n " \
" x: synthesize transactions events \n " \
" w: synthesize ptwrite events \n " \
" p: synthesize power events \n " \
2020-07-10 18:10:56 +03:00
" o: synthesize other events recorded due to the use \n " \
" of aux-output (refer to perf record) \n " \
2022-01-24 11:41:48 +03:00
" I: synthesize interrupt or similar (asynchronous) events \n " \
" (e.g. Intel PT Event Trace) \n " \
2020-07-10 18:10:57 +03:00
" e[flags]: synthesize error events \n " \
" each flag must be preceded by + or - \n " \
" error flags are: o (overflow) \n " \
" l (data lost) \n " \
2020-07-10 18:10:59 +03:00
" d[flags]: create a debug log \n " \
" each flag must be preceded by + or - \n " \
" log flags are: a (all perf events) \n " \
2021-10-27 11:03:33 +03:00
" o (output to stdout) \n " \
2020-05-30 15:24:41 +03:00
" f: synthesize first level cache events \n " \
" m: synthesize last level cache events \n " \
" t: synthesize TLB events \n " \
" a: synthesize remote access events \n " \
2018-09-14 06:10:31 +03:00
" g[len]: synthesize a call chain (use with i or x) \n " \
2020-07-10 18:10:56 +03:00
" G[len]: synthesize a call chain on existing event records \n " \
2018-09-14 06:10:31 +03:00
" l[len]: synthesize last branch entries (use with i or x) \n " \
2020-07-10 18:10:56 +03:00
" L[len]: synthesize last branch entries on existing event records \n " \
2018-09-14 06:10:31 +03:00
" sNUMBER: skip initial number of events \n " \
2020-07-10 18:11:02 +03:00
" q: quicker (less detailed) decoding \n " \
2021-10-27 11:03:30 +03:00
" A: approximate IPC \n " \
2021-10-27 11:03:29 +03:00
" Z: prefer to ignore timestamps (so-called \" timeless \" decoding) \n " \
2018-09-14 06:10:31 +03:00
" PERIOD[ns|us|ms|i|t]: specify period to sample stream \n " \
" concatenate multiple options. Default is ibxwpe or cewp \n "
2019-06-04 15:59:59 +03:00
static inline
void itrace_synth_opts__set_time_range ( struct itrace_synth_opts * opts ,
struct perf_time_interval * ptime_range ,
int range_num )
{
opts - > ptime_range = ptime_range ;
opts - > range_num = range_num ;
}
static inline
void itrace_synth_opts__clear_time_range ( struct itrace_synth_opts * opts )
{
opts - > ptime_range = NULL ;
opts - > range_num = 0 ;
}
2018-09-14 06:10:31 +03:00
2015-04-30 17:37:27 +03:00
# else
2019-08-29 21:56:40 +03:00
# include "debug.h"
2015-04-30 17:37:27 +03:00
static inline struct auxtrace_record *
2019-07-21 14:23:52 +03:00
auxtrace_record__init ( struct evlist * evlist __maybe_unused ,
2016-03-22 19:09:37 +03:00
int * err )
2015-04-30 17:37:27 +03:00
{
* err = 0 ;
return NULL ;
}
static inline
void auxtrace_record__free ( struct auxtrace_record * itr __maybe_unused )
{
}
static inline
int auxtrace_record__options ( struct auxtrace_record * itr __maybe_unused ,
2019-07-21 14:23:52 +03:00
struct evlist * evlist __maybe_unused ,
2015-04-30 17:37:27 +03:00
struct record_opts * opts __maybe_unused )
{
return 0 ;
}
2021-05-25 16:04:58 +03:00
static inline
int perf_event__process_auxtrace_info ( struct perf_session * session __maybe_unused ,
union perf_event * event __maybe_unused )
{
return 0 ;
}
static inline
s64 perf_event__process_auxtrace ( struct perf_session * session __maybe_unused ,
union perf_event * event __maybe_unused )
{
return 0 ;
}
static inline
int perf_event__process_auxtrace_error ( struct perf_session * session __maybe_unused ,
union perf_event * event __maybe_unused )
{
return 0 ;
}
2015-04-30 17:37:27 +03:00
static inline
void perf_session__auxtrace_error_inc ( struct perf_session * session
__maybe_unused ,
union perf_event * event
__maybe_unused )
{
}
static inline
void events_stats__auxtrace_error_warn ( const struct events_stats * stats
__maybe_unused )
{
}
2021-05-30 22:23:03 +03:00
static inline
int itrace_do_parse_synth_opts ( struct itrace_synth_opts * synth_opts __maybe_unused ,
const char * str __maybe_unused , int unset __maybe_unused )
{
pr_err ( " AUX area tracing not supported \n " ) ;
return - EINVAL ;
}
2015-04-30 17:37:27 +03:00
static inline
int itrace_parse_synth_opts ( const struct option * opt __maybe_unused ,
const char * str __maybe_unused ,
int unset __maybe_unused )
{
pr_err ( " AUX area tracing not supported \n " ) ;
return - EINVAL ;
}
2015-04-30 17:37:32 +03:00
static inline
int auxtrace_parse_snapshot_options ( struct auxtrace_record * itr __maybe_unused ,
struct record_opts * opts __maybe_unused ,
const char * str )
2019-11-15 15:42:15 +03:00
{
if ( ! str )
return 0 ;
pr_err ( " AUX area tracing not supported \n " ) ;
return - EINVAL ;
}
static inline
int auxtrace_parse_sample_options ( struct auxtrace_record * itr __maybe_unused ,
struct evlist * evlist __maybe_unused ,
struct record_opts * opts __maybe_unused ,
const char * str )
2015-04-30 17:37:32 +03:00
{
if ( ! str )
return 0 ;
pr_err ( " AUX area tracing not supported \n " ) ;
return - EINVAL ;
}
2021-01-21 17:04:18 +03:00
static inline
void auxtrace_regroup_aux_output ( struct evlist * evlist __maybe_unused )
{
}
2015-04-30 17:37:27 +03:00
static inline
int auxtrace__process_event ( struct perf_session * session __maybe_unused ,
union perf_event * event __maybe_unused ,
struct perf_sample * sample __maybe_unused ,
struct perf_tool * tool __maybe_unused )
{
return 0 ;
}
2019-11-15 15:42:19 +03:00
static inline
void auxtrace__dump_auxtrace_sample ( struct perf_session * session __maybe_unused ,
struct perf_sample * sample __maybe_unused )
{
}
2015-04-30 17:37:27 +03:00
static inline
int auxtrace__flush_events ( struct perf_session * session __maybe_unused ,
struct perf_tool * tool __maybe_unused )
{
return 0 ;
}
static inline
void auxtrace__free_events ( struct perf_session * session __maybe_unused )
{
}
static inline
void auxtrace_cache__free ( struct auxtrace_cache * auxtrace_cache __maybe_unused )
{
}
static inline
void auxtrace__free ( struct perf_session * session __maybe_unused )
{
}
static inline
int auxtrace_index__write ( int fd __maybe_unused ,
struct list_head * head __maybe_unused )
{
return - EINVAL ;
}
static inline
int auxtrace_index__process ( int fd __maybe_unused ,
u64 size __maybe_unused ,
struct perf_session * session __maybe_unused ,
bool needs_swap __maybe_unused )
{
return - EINVAL ;
}
static inline
void auxtrace_index__free ( struct list_head * head __maybe_unused )
{
}
2020-04-01 13:15:58 +03:00
static inline
bool auxtrace__evsel_is_auxtrace ( struct perf_session * session __maybe_unused ,
struct evsel * evsel __maybe_unused )
{
return false ;
}
2016-09-23 17:38:39 +03:00
static inline
2019-07-21 14:23:52 +03:00
int auxtrace_parse_filters ( struct evlist * evlist __maybe_unused )
2016-09-23 17:38:39 +03:00
{
return 0 ;
}
2015-04-30 17:37:27 +03:00
int auxtrace_mmap__mmap ( struct auxtrace_mmap * mm ,
struct auxtrace_mmap_params * mp ,
void * userpg , int fd ) ;
void auxtrace_mmap__munmap ( struct auxtrace_mmap * mm ) ;
void auxtrace_mmap_params__init ( struct auxtrace_mmap_params * mp ,
off_t auxtrace_offset ,
unsigned int auxtrace_pages ,
bool auxtrace_overwrite ) ;
void auxtrace_mmap_params__set_idx ( struct auxtrace_mmap_params * mp ,
2019-07-21 14:23:52 +03:00
struct evlist * evlist , int idx ,
2015-04-30 17:37:27 +03:00
bool per_cpu ) ;
2018-09-14 06:10:31 +03:00
# define ITRACE_HELP ""
2019-06-04 15:59:59 +03:00
static inline
void itrace_synth_opts__set_time_range ( struct itrace_synth_opts * opts
__maybe_unused ,
struct perf_time_interval * ptime_range
__maybe_unused ,
int range_num __maybe_unused )
{
}
static inline
void itrace_synth_opts__clear_time_range ( struct itrace_synth_opts * opts
__maybe_unused )
{
}
2015-04-30 17:37:27 +03:00
# endif
2015-04-09 18:53:42 +03:00
# endif