2019-07-21 13:24:21 +02:00
// SPDX-License-Identifier: GPL-2.0
# include <perf/evlist.h>
2019-07-21 13:24:55 +02:00
# include <perf/evsel.h>
2019-09-03 11:01:04 +02:00
# include <linux/bitops.h>
2019-07-21 13:24:21 +02:00
# include <linux/list.h>
2019-09-03 11:01:04 +02:00
# include <linux/hash.h>
2019-09-03 11:19:56 +02:00
# include <sys/ioctl.h>
2019-07-21 13:24:21 +02:00
# include <internal/evlist.h>
2019-07-21 13:24:26 +02:00
# include <internal/evsel.h>
2019-09-03 11:01:04 +02:00
# include <internal/xyarray.h>
2019-10-07 14:53:21 +02:00
# include <internal/mmap.h>
# include <internal/cpumap.h>
# include <internal/threadmap.h>
# include <internal/xyarray.h>
# include <internal/lib.h>
2019-07-21 13:24:32 +02:00
# include <linux/zalloc.h>
2019-10-07 14:53:21 +02:00
# include <sys/ioctl.h>
2019-07-21 13:24:35 +02:00
# include <stdlib.h>
2019-09-03 11:19:56 +02:00
# include <errno.h>
# include <unistd.h>
2019-08-16 16:00:45 +02:00
# include <fcntl.h>
# include <signal.h>
# include <poll.h>
2019-10-07 14:53:21 +02:00
# include <sys/mman.h>
2019-07-21 13:24:43 +02:00
# include <perf/cpumap.h>
# include <perf/threadmap.h>
2019-08-06 13:21:53 +02:00
# include <api/fd/array.h>
libperf: Add perf_evlist__init() function
Add the perf_evlist__init() function to initialize a perf_evlist struct.
Committer testing:
Fix a change in init ordering that was causing this backtrace:
(gdb) run stat sleep 1
Starting program: /root/bin/perf stat sleep 1
Program received signal SIGSEGV, Segmentation fault.
0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
161 if (!evsel->own_cpus || evlist->has_user_cpus) {
Missing separate debuginfos, use: dnf debuginfo-install bzip2-libs-1.0.6-29.fc30.x86_64 elfutils-libelf-0.176-3.fc30.x86_64 elfutils-libs-0.176-3.fc30.x86_64 glib2-2.60.4-1.fc30.x86_64 libbabeltrace-1.5.6-2.fc30.x86_64 libgcc-9.1.1-1.fc30.x86_64 libunwind-1.3.1-2.fc30.x86_64 libuuid-2.33.2-1.fc30.x86_64 libxcrypt-4.4.6-2.fc30.x86_64 libzstd-1.4.0-1.fc30.x86_64 numactl-libs-2.0.12-2.fc30.x86_64 pcre-8.43-2.fc30.x86_64 perl-libs-5.28.2-436.fc30.x86_64 popt-1.16-17.fc30.x86_64 python2-libs-2.7.16-2.fc30.x86_64 slang-2.3.2-5.fc30.x86_64 xz-libs-5.2.4-5.fc30.x86_64 zlib-1.2.11-15.fc30.x86_64
(gdb) bt
#0 0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
#1 0x00000000004f6c7a in perf_evlist__propagate_maps (evlist=0xbb34c0) at util/evlist.c:178
#2 0x00000000004f955e in perf_evlist__set_maps (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:1128
#3 0x00000000004f66f8 in evlist__init (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:52
#4 0x00000000004f6790 in evlist__new () at util/evlist.c:64
#5 0x0000000000456071 in cmd_stat (argc=3, argv=0x7fffffffd670) at builtin-stat.c:1705
#6 0x00000000004dd0fa in run_builtin (p=0xa21e00 <commands+288>, argc=3, argv=0x7fffffffd670) at perf.c:304
#7 0x00000000004dd367 in handle_internal_command (argc=3, argv=0x7fffffffd670) at perf.c:356
#8 0x00000000004dd4ae in run_argv (argcp=0x7fffffffd4cc, argv=0x7fffffffd4c0) at perf.c:400
#9 0x00000000004dd81a in main (argc=3, argv=0x7fffffffd670) at perf.c:522
(gdb) bt
So move the initialization of the core evlist (calling
perf_evlist__init()) to before perf_evlist__set_maps() in
evlist__init().
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-39-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:25 +02:00
void perf_evlist__init ( struct perf_evlist * evlist )
{
2019-09-02 22:20:12 +02:00
int i ;
for ( i = 0 ; i < PERF_EVLIST__HLIST_SIZE ; + + i )
INIT_HLIST_HEAD ( & evlist - > heads [ i ] ) ;
libperf: Add perf_evlist__init() function
Add the perf_evlist__init() function to initialize a perf_evlist struct.
Committer testing:
Fix a change in init ordering that was causing this backtrace:
(gdb) run stat sleep 1
Starting program: /root/bin/perf stat sleep 1
Program received signal SIGSEGV, Segmentation fault.
0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
161 if (!evsel->own_cpus || evlist->has_user_cpus) {
Missing separate debuginfos, use: dnf debuginfo-install bzip2-libs-1.0.6-29.fc30.x86_64 elfutils-libelf-0.176-3.fc30.x86_64 elfutils-libs-0.176-3.fc30.x86_64 glib2-2.60.4-1.fc30.x86_64 libbabeltrace-1.5.6-2.fc30.x86_64 libgcc-9.1.1-1.fc30.x86_64 libunwind-1.3.1-2.fc30.x86_64 libuuid-2.33.2-1.fc30.x86_64 libxcrypt-4.4.6-2.fc30.x86_64 libzstd-1.4.0-1.fc30.x86_64 numactl-libs-2.0.12-2.fc30.x86_64 pcre-8.43-2.fc30.x86_64 perl-libs-5.28.2-436.fc30.x86_64 popt-1.16-17.fc30.x86_64 python2-libs-2.7.16-2.fc30.x86_64 slang-2.3.2-5.fc30.x86_64 xz-libs-5.2.4-5.fc30.x86_64 zlib-1.2.11-15.fc30.x86_64
(gdb) bt
#0 0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
#1 0x00000000004f6c7a in perf_evlist__propagate_maps (evlist=0xbb34c0) at util/evlist.c:178
#2 0x00000000004f955e in perf_evlist__set_maps (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:1128
#3 0x00000000004f66f8 in evlist__init (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:52
#4 0x00000000004f6790 in evlist__new () at util/evlist.c:64
#5 0x0000000000456071 in cmd_stat (argc=3, argv=0x7fffffffd670) at builtin-stat.c:1705
#6 0x00000000004dd0fa in run_builtin (p=0xa21e00 <commands+288>, argc=3, argv=0x7fffffffd670) at perf.c:304
#7 0x00000000004dd367 in handle_internal_command (argc=3, argv=0x7fffffffd670) at perf.c:356
#8 0x00000000004dd4ae in run_argv (argcp=0x7fffffffd4cc, argv=0x7fffffffd4c0) at perf.c:400
#9 0x00000000004dd81a in main (argc=3, argv=0x7fffffffd670) at perf.c:522
(gdb) bt
So move the initialization of the core evlist (calling
perf_evlist__init()) to before perf_evlist__set_maps() in
evlist__init().
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-39-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:25 +02:00
INIT_LIST_HEAD ( & evlist - > entries ) ;
2019-07-21 13:24:28 +02:00
evlist - > nr_entries = 0 ;
libperf: Add perf_evlist__init() function
Add the perf_evlist__init() function to initialize a perf_evlist struct.
Committer testing:
Fix a change in init ordering that was causing this backtrace:
(gdb) run stat sleep 1
Starting program: /root/bin/perf stat sleep 1
Program received signal SIGSEGV, Segmentation fault.
0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
161 if (!evsel->own_cpus || evlist->has_user_cpus) {
Missing separate debuginfos, use: dnf debuginfo-install bzip2-libs-1.0.6-29.fc30.x86_64 elfutils-libelf-0.176-3.fc30.x86_64 elfutils-libs-0.176-3.fc30.x86_64 glib2-2.60.4-1.fc30.x86_64 libbabeltrace-1.5.6-2.fc30.x86_64 libgcc-9.1.1-1.fc30.x86_64 libunwind-1.3.1-2.fc30.x86_64 libuuid-2.33.2-1.fc30.x86_64 libxcrypt-4.4.6-2.fc30.x86_64 libzstd-1.4.0-1.fc30.x86_64 numactl-libs-2.0.12-2.fc30.x86_64 pcre-8.43-2.fc30.x86_64 perl-libs-5.28.2-436.fc30.x86_64 popt-1.16-17.fc30.x86_64 python2-libs-2.7.16-2.fc30.x86_64 slang-2.3.2-5.fc30.x86_64 xz-libs-5.2.4-5.fc30.x86_64 zlib-1.2.11-15.fc30.x86_64
(gdb) bt
#0 0x00000000004f6b55 in __perf_evlist__propagate_maps (evlist=0xbb34c0, evsel=0x0) at util/evlist.c:161
#1 0x00000000004f6c7a in perf_evlist__propagate_maps (evlist=0xbb34c0) at util/evlist.c:178
#2 0x00000000004f955e in perf_evlist__set_maps (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:1128
#3 0x00000000004f66f8 in evlist__init (evlist=0xbb34c0, cpus=0x0, threads=0x0) at util/evlist.c:52
#4 0x00000000004f6790 in evlist__new () at util/evlist.c:64
#5 0x0000000000456071 in cmd_stat (argc=3, argv=0x7fffffffd670) at builtin-stat.c:1705
#6 0x00000000004dd0fa in run_builtin (p=0xa21e00 <commands+288>, argc=3, argv=0x7fffffffd670) at perf.c:304
#7 0x00000000004dd367 in handle_internal_command (argc=3, argv=0x7fffffffd670) at perf.c:356
#8 0x00000000004dd4ae in run_argv (argcp=0x7fffffffd4cc, argv=0x7fffffffd4c0) at perf.c:400
#9 0x00000000004dd81a in main (argc=3, argv=0x7fffffffd670) at perf.c:522
(gdb) bt
So move the initialization of the core evlist (calling
perf_evlist__init()) to before perf_evlist__set_maps() in
evlist__init().
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-39-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-07-21 13:24:25 +02:00
}
2019-07-21 13:24:26 +02:00
2019-07-21 13:24:43 +02:00
static void __perf_evlist__propagate_maps ( struct perf_evlist * evlist ,
struct perf_evsel * evsel )
{
/*
* We already have cpus for evsel ( via PMU sysfs ) so
* keep it , if there ' s no target cpu list defined .
*/
if ( ! evsel - > own_cpus | | evlist - > has_user_cpus ) {
perf_cpu_map__put ( evsel - > cpus ) ;
evsel - > cpus = perf_cpu_map__get ( evlist - > cpus ) ;
} else if ( evsel - > cpus ! = evsel - > own_cpus ) {
perf_cpu_map__put ( evsel - > cpus ) ;
evsel - > cpus = perf_cpu_map__get ( evsel - > own_cpus ) ;
}
perf_thread_map__put ( evsel - > threads ) ;
evsel - > threads = perf_thread_map__get ( evlist - > threads ) ;
}
static void perf_evlist__propagate_maps ( struct perf_evlist * evlist )
{
struct perf_evsel * evsel ;
perf_evlist__for_each_evsel ( evlist , evsel )
__perf_evlist__propagate_maps ( evlist , evsel ) ;
}
2019-07-21 13:24:26 +02:00
void perf_evlist__add ( struct perf_evlist * evlist ,
struct perf_evsel * evsel )
{
list_add_tail ( & evsel - > node , & evlist - > entries ) ;
2019-07-21 13:24:28 +02:00
evlist - > nr_entries + = 1 ;
2019-07-21 13:24:43 +02:00
__perf_evlist__propagate_maps ( evlist , evsel ) ;
2019-07-21 13:24:26 +02:00
}
2019-07-21 13:24:27 +02:00
void perf_evlist__remove ( struct perf_evlist * evlist ,
struct perf_evsel * evsel )
{
list_del_init ( & evsel - > node ) ;
2019-07-21 13:24:28 +02:00
evlist - > nr_entries - = 1 ;
2019-07-21 13:24:27 +02:00
}
2019-07-21 13:24:32 +02:00
struct perf_evlist * perf_evlist__new ( void )
{
struct perf_evlist * evlist = zalloc ( sizeof ( * evlist ) ) ;
if ( evlist ! = NULL )
perf_evlist__init ( evlist ) ;
return evlist ;
}
2019-07-21 13:24:34 +02:00
struct perf_evsel *
perf_evlist__next ( struct perf_evlist * evlist , struct perf_evsel * prev )
{
struct perf_evsel * next ;
if ( ! prev ) {
next = list_first_entry ( & evlist - > entries ,
struct perf_evsel ,
node ) ;
} else {
next = list_next_entry ( prev , node ) ;
}
/* Empty list is noticed here so don't need checking on entry. */
if ( & next - > node = = & evlist - > entries )
return NULL ;
return next ;
}
2019-07-21 13:24:35 +02:00
void perf_evlist__delete ( struct perf_evlist * evlist )
{
2019-10-07 14:53:21 +02:00
if ( evlist = = NULL )
return ;
perf_evlist__munmap ( evlist ) ;
2019-07-21 13:24:35 +02:00
free ( evlist ) ;
}
2019-07-21 13:24:43 +02:00
void perf_evlist__set_maps ( struct perf_evlist * evlist ,
struct perf_cpu_map * cpus ,
struct perf_thread_map * threads )
{
/*
* Allow for the possibility that one or another of the maps isn ' t being
* changed i . e . don ' t put it . Note we are assuming the maps that are
* being applied are brand new and evlist is taking ownership of the
* original reference count of 1. If that is not the case it is up to
* the caller to increase the reference count .
*/
if ( cpus ! = evlist - > cpus ) {
perf_cpu_map__put ( evlist - > cpus ) ;
evlist - > cpus = perf_cpu_map__get ( cpus ) ;
}
if ( threads ! = evlist - > threads ) {
perf_thread_map__put ( evlist - > threads ) ;
evlist - > threads = perf_thread_map__get ( threads ) ;
}
perf_evlist__propagate_maps ( evlist ) ;
}
2019-07-21 13:24:55 +02:00
int perf_evlist__open ( struct perf_evlist * evlist )
{
struct perf_evsel * evsel ;
int err ;
perf_evlist__for_each_entry ( evlist , evsel ) {
err = perf_evsel__open ( evsel , evsel - > cpus , evsel - > threads ) ;
if ( err < 0 )
goto out_err ;
}
return 0 ;
out_err :
perf_evlist__close ( evlist ) ;
return err ;
}
void perf_evlist__close ( struct perf_evlist * evlist )
{
struct perf_evsel * evsel ;
perf_evlist__for_each_entry_reverse ( evlist , evsel )
perf_evsel__close ( evsel ) ;
}
2019-07-21 13:24:56 +02:00
void perf_evlist__enable ( struct perf_evlist * evlist )
{
struct perf_evsel * evsel ;
perf_evlist__for_each_entry ( evlist , evsel )
perf_evsel__enable ( evsel ) ;
}
void perf_evlist__disable ( struct perf_evlist * evlist )
{
struct perf_evsel * evsel ;
perf_evlist__for_each_entry ( evlist , evsel )
perf_evsel__disable ( evsel ) ;
}
2019-09-03 10:54:48 +02:00
u64 perf_evlist__read_format ( struct perf_evlist * evlist )
{
struct perf_evsel * first = perf_evlist__first ( evlist ) ;
return first - > attr . read_format ;
}
2019-09-03 11:01:04 +02:00
# define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
static void perf_evlist__id_hash ( struct perf_evlist * evlist ,
struct perf_evsel * evsel ,
int cpu , int thread , u64 id )
{
int hash ;
struct perf_sample_id * sid = SID ( evsel , cpu , thread ) ;
sid - > id = id ;
sid - > evsel = evsel ;
hash = hash_64 ( sid - > id , PERF_EVLIST__HLIST_BITS ) ;
hlist_add_head ( & sid - > node , & evlist - > heads [ hash ] ) ;
}
void perf_evlist__id_add ( struct perf_evlist * evlist ,
struct perf_evsel * evsel ,
int cpu , int thread , u64 id )
{
perf_evlist__id_hash ( evlist , evsel , cpu , thread , id ) ;
evsel - > id [ evsel - > ids + + ] = id ;
}
2019-09-03 11:19:56 +02:00
int perf_evlist__id_add_fd ( struct perf_evlist * evlist ,
struct perf_evsel * evsel ,
int cpu , int thread , int fd )
{
u64 read_data [ 4 ] = { 0 , } ;
int id_idx = 1 ; /* The first entry is the counter value */
u64 id ;
int ret ;
ret = ioctl ( fd , PERF_EVENT_IOC_ID , & id ) ;
if ( ! ret )
goto add ;
if ( errno ! = ENOTTY )
return - 1 ;
/* Legacy way to get event id.. All hail to old kernels! */
/*
* This way does not work with group format read , so bail
* out in that case .
*/
if ( perf_evlist__read_format ( evlist ) & PERF_FORMAT_GROUP )
return - 1 ;
if ( ! ( evsel - > attr . read_format & PERF_FORMAT_ID ) | |
read ( fd , & read_data , sizeof ( read_data ) ) = = - 1 )
return - 1 ;
if ( evsel - > attr . read_format & PERF_FORMAT_TOTAL_TIME_ENABLED )
+ + id_idx ;
if ( evsel - > attr . read_format & PERF_FORMAT_TOTAL_TIME_RUNNING )
+ + id_idx ;
id = read_data [ id_idx ] ;
add :
perf_evlist__id_add ( evlist , evsel , cpu , thread , id ) ;
return 0 ;
}
2019-08-06 13:21:53 +02:00
int perf_evlist__alloc_pollfd ( struct perf_evlist * evlist )
{
int nr_cpus = perf_cpu_map__nr ( evlist - > cpus ) ;
int nr_threads = perf_thread_map__nr ( evlist - > threads ) ;
int nfds = 0 ;
struct perf_evsel * evsel ;
perf_evlist__for_each_entry ( evlist , evsel ) {
if ( evsel - > system_wide )
nfds + = nr_cpus ;
else
nfds + = nr_cpus * nr_threads ;
}
if ( fdarray__available_entries ( & evlist - > pollfd ) < nfds & &
fdarray__grow ( & evlist - > pollfd , nfds ) < 0 )
return - ENOMEM ;
return 0 ;
}
2019-08-16 16:00:45 +02:00
int perf_evlist__add_pollfd ( struct perf_evlist * evlist , int fd ,
void * ptr , short revent )
{
int pos = fdarray__add ( & evlist - > pollfd , fd , revent | POLLERR | POLLHUP ) ;
if ( pos > = 0 ) {
evlist - > pollfd . priv [ pos ] . ptr = ptr ;
fcntl ( fd , F_SETFL , O_NONBLOCK ) ;
}
return pos ;
}
2019-08-31 22:48:33 +02:00
int perf_evlist__poll ( struct perf_evlist * evlist , int timeout )
{
return fdarray__poll ( & evlist - > pollfd , timeout ) ;
}
2019-10-07 14:53:21 +02:00
static struct perf_mmap * perf_evlist__alloc_mmap ( struct perf_evlist * evlist , bool overwrite )
{
int i ;
struct perf_mmap * map ;
evlist - > nr_mmaps = perf_cpu_map__nr ( evlist - > cpus ) ;
if ( perf_cpu_map__empty ( evlist - > cpus ) )
evlist - > nr_mmaps = perf_thread_map__nr ( evlist - > threads ) ;
map = zalloc ( evlist - > nr_mmaps * sizeof ( struct perf_mmap ) ) ;
if ( ! map )
return NULL ;
for ( i = 0 ; i < evlist - > nr_mmaps ; i + + ) {
/*
* When the perf_mmap ( ) call is made we grab one refcount , plus
* one extra to let perf_mmap__consume ( ) get the last
* events after all real references ( perf_mmap__get ( ) ) are
* dropped .
*
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
* thus does perf_mmap__get ( ) on it .
*/
perf_mmap__init ( & map [ i ] , overwrite , NULL ) ;
}
return map ;
}
static void perf_evlist__set_sid_idx ( struct perf_evlist * evlist ,
struct perf_evsel * evsel , int idx , int cpu ,
int thread )
{
struct perf_sample_id * sid = SID ( evsel , cpu , thread ) ;
sid - > idx = idx ;
if ( evlist - > cpus & & cpu > = 0 )
sid - > cpu = evlist - > cpus - > map [ cpu ] ;
else
sid - > cpu = - 1 ;
if ( ! evsel - > system_wide & & evlist - > threads & & thread > = 0 )
sid - > tid = perf_thread_map__pid ( evlist - > threads , thread ) ;
else
sid - > tid = - 1 ;
}
static struct perf_mmap *
perf_evlist__map_get ( struct perf_evlist * evlist , bool overwrite , int idx )
{
struct perf_mmap * map = & evlist - > mmap [ idx ] ;
if ( overwrite ) {
if ( ! evlist - > mmap_ovw ) {
evlist - > mmap_ovw = perf_evlist__alloc_mmap ( evlist , true ) ;
if ( ! evlist - > mmap_ovw )
return NULL ;
}
map = & evlist - > mmap_ovw [ idx ] ;
}
return map ;
}
# define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
static int
mmap_per_evsel ( struct perf_evlist * evlist , int idx ,
struct perf_mmap_param * mp , int cpu_idx ,
int thread , int * _output , int * _output_overwrite )
{
int evlist_cpu = perf_cpu_map__cpu ( evlist - > cpus , cpu_idx ) ;
struct perf_evsel * evsel ;
int revent ;
perf_evlist__for_each_entry ( evlist , evsel ) {
bool overwrite = evsel - > attr . write_backward ;
struct perf_mmap * map ;
int * output , fd , cpu ;
if ( evsel - > system_wide & & thread )
continue ;
cpu = perf_cpu_map__idx ( evsel - > cpus , evlist_cpu ) ;
if ( cpu = = - 1 )
continue ;
map = perf_evlist__map_get ( evlist , overwrite , idx ) ;
if ( map = = NULL )
return - ENOMEM ;
if ( overwrite ) {
mp - > prot = PROT_READ ;
output = _output_overwrite ;
} else {
mp - > prot = PROT_READ | PROT_WRITE ;
output = _output ;
}
fd = FD ( evsel , cpu , thread ) ;
if ( * output = = - 1 ) {
* output = fd ;
if ( perf_mmap__mmap ( map , mp , * output , evlist_cpu ) < 0 )
return - 1 ;
} else {
if ( ioctl ( fd , PERF_EVENT_IOC_SET_OUTPUT , * output ) ! = 0 )
return - 1 ;
perf_mmap__get ( map ) ;
}
revent = ! overwrite ? POLLIN : 0 ;
if ( ! evsel - > system_wide & &
perf_evlist__add_pollfd ( evlist , fd , map , revent ) < 0 ) {
perf_mmap__put ( map ) ;
return - 1 ;
}
if ( evsel - > attr . read_format & PERF_FORMAT_ID ) {
if ( perf_evlist__id_add_fd ( evlist , evsel , cpu , thread ,
fd ) < 0 )
return - 1 ;
perf_evlist__set_sid_idx ( evlist , evsel , idx , cpu ,
thread ) ;
}
}
return 0 ;
}
static int
2019-10-07 14:53:23 +02:00
mmap_per_thread ( struct perf_evlist * evlist , struct perf_evlist_mmap_ops * ops ,
struct perf_mmap_param * mp )
2019-10-07 14:53:21 +02:00
{
int thread ;
int nr_threads = perf_thread_map__nr ( evlist - > threads ) ;
for ( thread = 0 ; thread < nr_threads ; thread + + ) {
int output = - 1 ;
int output_overwrite = - 1 ;
2019-10-07 14:53:23 +02:00
if ( ops - > idx )
ops - > idx ( evlist , mp , thread , false ) ;
2019-10-07 14:53:21 +02:00
if ( mmap_per_evsel ( evlist , thread , mp , 0 , thread ,
& output , & output_overwrite ) )
goto out_unmap ;
}
return 0 ;
out_unmap :
perf_evlist__munmap ( evlist ) ;
return - 1 ;
}
static int
2019-10-07 14:53:23 +02:00
mmap_per_cpu ( struct perf_evlist * evlist , struct perf_evlist_mmap_ops * ops ,
struct perf_mmap_param * mp )
2019-10-07 14:53:21 +02:00
{
int nr_threads = perf_thread_map__nr ( evlist - > threads ) ;
int nr_cpus = perf_cpu_map__nr ( evlist - > cpus ) ;
int cpu , thread ;
for ( cpu = 0 ; cpu < nr_cpus ; cpu + + ) {
int output = - 1 ;
int output_overwrite = - 1 ;
2019-10-07 14:53:23 +02:00
if ( ops - > idx )
ops - > idx ( evlist , mp , cpu , true ) ;
2019-10-07 14:53:21 +02:00
for ( thread = 0 ; thread < nr_threads ; thread + + ) {
if ( mmap_per_evsel ( evlist , cpu , mp , cpu ,
thread , & output , & output_overwrite ) )
goto out_unmap ;
}
}
return 0 ;
out_unmap :
perf_evlist__munmap ( evlist ) ;
return - 1 ;
}
2019-10-07 14:53:22 +02:00
int perf_evlist__mmap_ops ( struct perf_evlist * evlist ,
struct perf_evlist_mmap_ops * ops ,
struct perf_mmap_param * mp )
2019-10-07 14:53:21 +02:00
{
struct perf_evsel * evsel ;
const struct perf_cpu_map * cpus = evlist - > cpus ;
const struct perf_thread_map * threads = evlist - > threads ;
2019-10-07 14:53:22 +02:00
if ( ! ops )
return - EINVAL ;
2019-10-07 14:53:21 +02:00
if ( ! evlist - > mmap )
evlist - > mmap = perf_evlist__alloc_mmap ( evlist , false ) ;
if ( ! evlist - > mmap )
return - ENOMEM ;
perf_evlist__for_each_entry ( evlist , evsel ) {
if ( ( evsel - > attr . read_format & PERF_FORMAT_ID ) & &
evsel - > sample_id = = NULL & &
perf_evsel__alloc_id ( evsel , perf_cpu_map__nr ( cpus ) , threads - > nr ) < 0 )
return - ENOMEM ;
}
2019-10-07 14:53:22 +02:00
if ( perf_cpu_map__empty ( cpus ) )
2019-10-07 14:53:23 +02:00
return mmap_per_thread ( evlist , ops , mp ) ;
2019-10-07 14:53:22 +02:00
2019-10-07 14:53:23 +02:00
return mmap_per_cpu ( evlist , ops , mp ) ;
2019-10-07 14:53:22 +02:00
}
int perf_evlist__mmap ( struct perf_evlist * evlist , int pages )
{
struct perf_mmap_param mp ;
2019-10-07 14:53:23 +02:00
struct perf_evlist_mmap_ops ops = { 0 } ;
2019-10-07 14:53:22 +02:00
2019-10-07 14:53:21 +02:00
evlist - > mmap_len = ( pages + 1 ) * page_size ;
mp . mask = evlist - > mmap_len - page_size - 1 ;
2019-10-07 14:53:22 +02:00
return perf_evlist__mmap_ops ( evlist , & ops , & mp ) ;
2019-10-07 14:53:21 +02:00
}
void perf_evlist__munmap ( struct perf_evlist * evlist )
{
int i ;
if ( evlist - > mmap ) {
for ( i = 0 ; i < evlist - > nr_mmaps ; i + + )
perf_mmap__munmap ( & evlist - > mmap [ i ] ) ;
}
if ( evlist - > mmap_ovw ) {
for ( i = 0 ; i < evlist - > nr_mmaps ; i + + )
perf_mmap__munmap ( & evlist - > mmap_ovw [ i ] ) ;
}
zfree ( & evlist - > mmap ) ;
zfree ( & evlist - > mmap_ovw ) ;
}