2010-05-01 10:41:20 +04:00
/*
* builtin - inject . c
*
* Builtin inject command : Examine the live mode ( stdin ) event stream
* and repipe it to stdout while optionally injecting additional
* events into it .
*/
# include "builtin.h"
# include "perf.h"
2012-08-07 16:56:04 +04:00
# include "util/color.h"
# include "util/evlist.h"
# include "util/evsel.h"
2010-05-01 10:41:20 +04:00
# include "util/session.h"
2011-11-28 14:30:20 +04:00
# include "util/tool.h"
2010-05-01 10:41:20 +04:00
# include "util/debug.h"
2012-08-07 16:56:05 +04:00
# include "util/build-id.h"
2013-10-15 18:27:32 +04:00
# include "util/data.h"
2015-04-09 18:54:00 +03:00
# include "util/auxtrace.h"
2015-11-30 12:02:21 +03:00
# include "util/jit.h"
2010-05-01 10:41:20 +04:00
2015-12-15 18:39:39 +03:00
# include <subcmd/parse-options.h>
2010-05-01 10:41:20 +04:00
2012-08-07 16:56:04 +04:00
# include <linux/list.h>
2012-10-01 02:54:10 +04:00
struct perf_inject {
2013-10-29 22:04:57 +04:00
struct perf_tool tool ;
2014-08-12 10:40:37 +04:00
struct perf_session * session ;
2013-10-29 22:04:57 +04:00
bool build_ids ;
bool sched_stat ;
2015-04-30 17:37:26 +03:00
bool have_auxtrace ;
2015-09-25 16:15:55 +03:00
bool strip ;
2015-11-30 12:02:21 +03:00
bool jit_mode ;
2013-10-29 22:04:57 +04:00
const char * input_name ;
struct perf_data_file output ;
u64 bytes_written ;
2015-09-25 16:15:54 +03:00
u64 aux_id ;
2013-10-29 22:04:57 +04:00
struct list_head samples ;
2015-04-09 18:54:00 +03:00
struct itrace_synth_opts itrace_synth_opts ;
2012-08-07 16:56:04 +04:00
} ;
struct event_entry {
struct list_head node ;
u32 tid ;
union perf_event event [ 0 ] ;
2012-10-01 02:54:10 +04:00
} ;
2010-05-01 10:41:20 +04:00
2015-04-21 12:21:54 +03:00
static int output_bytes ( struct perf_inject * inject , void * buf , size_t sz )
2010-05-01 10:41:20 +04:00
{
2013-10-29 22:04:57 +04:00
ssize_t size ;
2010-05-01 10:41:20 +04:00
2015-04-21 12:21:54 +03:00
size = perf_data_file__write ( & inject - > output , buf , sz ) ;
2013-10-29 22:04:57 +04:00
if ( size < 0 )
return - errno ;
2010-05-01 10:41:20 +04:00
2013-10-29 22:04:57 +04:00
inject - > bytes_written + = size ;
2010-05-01 10:41:20 +04:00
return 0 ;
}
2015-04-21 12:21:54 +03:00
static int perf_event__repipe_synth ( struct perf_tool * tool ,
union perf_event * event )
{
struct perf_inject * inject = container_of ( tool , struct perf_inject ,
tool ) ;
return output_bytes ( inject , event , event - > header . size ) ;
}
2015-03-03 18:37:54 +03:00
static int perf_event__repipe_oe_synth ( struct perf_tool * tool ,
union perf_event * event ,
struct ordered_events * oe __maybe_unused )
{
return perf_event__repipe_synth ( tool , event ) ;
}
2016-03-10 19:41:13 +03:00
# ifdef HAVE_JITDUMP
2015-11-30 12:02:21 +03:00
static int perf_event__drop_oe ( struct perf_tool * tool __maybe_unused ,
union perf_event * event __maybe_unused ,
struct ordered_events * oe __maybe_unused )
{
return 0 ;
}
# endif
2011-11-28 14:30:20 +04:00
static int perf_event__repipe_op2_synth ( struct perf_tool * tool ,
2011-11-28 13:56:39 +04:00
union perf_event * event ,
2012-09-11 02:15:03 +04:00
struct perf_session * session
__maybe_unused )
2011-11-28 13:56:39 +04:00
{
2013-07-04 17:20:20 +04:00
return perf_event__repipe_synth ( tool , event ) ;
2011-11-28 13:56:39 +04:00
}
2013-07-04 17:20:21 +04:00
static int perf_event__repipe_attr ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_evlist * * pevlist )
2011-11-12 04:45:41 +04:00
{
2013-10-22 11:34:09 +04:00
struct perf_inject * inject = container_of ( tool , struct perf_inject ,
tool ) ;
2012-05-15 15:28:11 +04:00
int ret ;
2013-07-04 17:20:21 +04:00
ret = perf_event__process_attr ( tool , event , pevlist ) ;
2012-05-15 15:28:11 +04:00
if ( ret )
return ret ;
2014-06-05 20:51:44 +04:00
if ( ! inject - > output . is_pipe )
2013-10-22 11:34:09 +04:00
return 0 ;
2013-07-04 17:20:21 +04:00
return perf_event__repipe_synth ( tool , event ) ;
2011-11-12 04:45:41 +04:00
}
2015-04-30 17:37:27 +03:00
# ifdef HAVE_AUXTRACE_SUPPORT
static int copy_bytes ( struct perf_inject * inject , int fd , off_t size )
{
char buf [ 4096 ] ;
ssize_t ssz ;
int ret ;
while ( size > 0 ) {
ssz = read ( fd , buf , min ( size , ( off_t ) sizeof ( buf ) ) ) ;
if ( ssz < 0 )
return - errno ;
ret = output_bytes ( inject , buf , ssz ) ;
if ( ret )
return ret ;
size - = ssz ;
}
return 0 ;
}
2015-04-21 12:21:54 +03:00
static s64 perf_event__repipe_auxtrace ( struct perf_tool * tool ,
union perf_event * event ,
2016-03-22 19:09:37 +03:00
struct perf_session * session )
2015-04-21 12:21:54 +03:00
{
struct perf_inject * inject = container_of ( tool , struct perf_inject ,
tool ) ;
int ret ;
2015-04-30 17:37:26 +03:00
inject - > have_auxtrace = true ;
2015-04-30 17:37:25 +03:00
if ( ! inject - > output . is_pipe ) {
off_t offset ;
offset = lseek ( inject - > output . fd , 0 , SEEK_CUR ) ;
if ( offset = = - 1 )
return - errno ;
ret = auxtrace_index__auxtrace_event ( & session - > auxtrace_index ,
event , offset ) ;
if ( ret < 0 )
return ret ;
}
2015-04-21 12:21:54 +03:00
if ( perf_data_file__is_pipe ( session - > file ) | | ! session - > one_mmap ) {
ret = output_bytes ( inject , event , event - > header . size ) ;
if ( ret < 0 )
return ret ;
ret = copy_bytes ( inject , perf_data_file__fd ( session - > file ) ,
event - > auxtrace . size ) ;
} else {
ret = output_bytes ( inject , event ,
event - > header . size + event - > auxtrace . size ) ;
}
if ( ret < 0 )
return ret ;
return event - > auxtrace . size ;
}
2015-04-30 17:37:27 +03:00
# else
static s64
perf_event__repipe_auxtrace ( struct perf_tool * tool __maybe_unused ,
union perf_event * event __maybe_unused ,
struct perf_session * session __maybe_unused )
{
pr_err ( " AUX area tracing not supported \n " ) ;
return - EINVAL ;
}
# endif
2011-11-28 14:30:20 +04:00
static int perf_event__repipe ( struct perf_tool * tool ,
2011-11-25 14:19:45 +04:00
union perf_event * event ,
2012-09-11 02:15:03 +04:00
struct perf_sample * sample __maybe_unused ,
2013-07-04 17:20:20 +04:00
struct machine * machine __maybe_unused )
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 19:10:21 +03:00
{
2013-07-04 17:20:20 +04:00
return perf_event__repipe_synth ( tool , event ) ;
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 19:10:21 +03:00
}
2015-09-25 16:15:55 +03:00
static int perf_event__drop ( struct perf_tool * tool __maybe_unused ,
union perf_event * event __maybe_unused ,
struct perf_sample * sample __maybe_unused ,
struct machine * machine __maybe_unused )
{
return 0 ;
}
2015-09-25 16:15:54 +03:00
static int perf_event__drop_aux ( struct perf_tool * tool ,
union perf_event * event __maybe_unused ,
struct perf_sample * sample ,
struct machine * machine __maybe_unused )
{
struct perf_inject * inject = container_of ( tool , struct perf_inject , tool ) ;
if ( ! inject - > aux_id )
inject - > aux_id = sample - > id ;
return 0 ;
}
2012-08-07 16:56:04 +04:00
typedef int ( * inject_handler ) ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_sample * sample ,
struct perf_evsel * evsel ,
struct machine * machine ) ;
2011-11-28 14:30:20 +04:00
static int perf_event__repipe_sample ( struct perf_tool * tool ,
2011-11-25 14:19:45 +04:00
union perf_event * event ,
2012-08-07 16:56:04 +04:00
struct perf_sample * sample ,
struct perf_evsel * evsel ,
struct machine * machine )
2011-03-15 21:44:01 +03:00
{
2013-11-06 17:17:38 +04:00
if ( evsel - > handler ) {
inject_handler f = evsel - > handler ;
2012-08-07 16:56:04 +04:00
return f ( tool , event , sample , evsel , machine ) ;
}
2012-08-07 16:56:05 +04:00
build_id__mark_dso_hit ( tool , event , sample , evsel , machine ) ;
2013-07-04 17:20:20 +04:00
return perf_event__repipe_synth ( tool , event ) ;
2011-03-15 21:44:01 +03:00
}
2011-11-28 14:30:20 +04:00
static int perf_event__repipe_mmap ( struct perf_tool * tool ,
2011-11-25 14:19:45 +04:00
union perf_event * event ,
2011-01-29 19:01:45 +03:00
struct perf_sample * sample ,
2011-11-28 13:56:39 +04:00
struct machine * machine )
2010-05-01 10:41:20 +04:00
{
int err ;
2011-11-28 14:30:20 +04:00
err = perf_event__process_mmap ( tool , event , sample , machine ) ;
perf_event__repipe ( tool , event , sample , machine ) ;
2010-05-01 10:41:20 +04:00
return err ;
}
2016-03-10 19:41:13 +03:00
# ifdef HAVE_JITDUMP
2015-11-30 12:02:21 +03:00
static int perf_event__jit_repipe_mmap ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_sample * sample ,
struct machine * machine )
{
struct perf_inject * inject = container_of ( tool , struct perf_inject , tool ) ;
u64 n = 0 ;
2016-03-07 22:44:40 +03:00
int ret ;
2015-11-30 12:02:21 +03:00
/*
* if jit marker , then inject jit mmaps and generate ELF images
*/
2016-03-07 22:44:40 +03:00
ret = jit_process ( inject - > session , & inject - > output , machine ,
event - > mmap . filename , sample - > pid , & n ) ;
if ( ret < 0 )
return ret ;
if ( ret ) {
2015-11-30 12:02:21 +03:00
inject - > bytes_written + = n ;
return 0 ;
}
return perf_event__repipe_mmap ( tool , event , sample , machine ) ;
}
# endif
2013-08-21 14:10:25 +04:00
static int perf_event__repipe_mmap2 ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_sample * sample ,
struct machine * machine )
{
int err ;
err = perf_event__process_mmap2 ( tool , event , sample , machine ) ;
perf_event__repipe ( tool , event , sample , machine ) ;
return err ;
}
2016-03-10 19:41:13 +03:00
# ifdef HAVE_JITDUMP
2015-11-30 12:02:21 +03:00
static int perf_event__jit_repipe_mmap2 ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_sample * sample ,
struct machine * machine )
{
struct perf_inject * inject = container_of ( tool , struct perf_inject , tool ) ;
u64 n = 0 ;
2016-03-07 22:44:40 +03:00
int ret ;
2015-11-30 12:02:21 +03:00
/*
* if jit marker , then inject jit mmaps and generate ELF images
*/
2016-03-07 22:44:40 +03:00
ret = jit_process ( inject - > session , & inject - > output , machine ,
event - > mmap2 . filename , sample - > pid , & n ) ;
if ( ret < 0 )
return ret ;
if ( ret ) {
2015-11-30 12:02:21 +03:00
inject - > bytes_written + = n ;
return 0 ;
}
return perf_event__repipe_mmap2 ( tool , event , sample , machine ) ;
}
# endif
2012-10-06 22:44:59 +04:00
static int perf_event__repipe_fork ( struct perf_tool * tool ,
2011-11-25 14:19:45 +04:00
union perf_event * event ,
2011-01-29 19:01:45 +03:00
struct perf_sample * sample ,
2011-11-28 13:56:39 +04:00
struct machine * machine )
2010-05-01 10:41:20 +04:00
{
int err ;
2012-10-06 22:44:59 +04:00
err = perf_event__process_fork ( tool , event , sample , machine ) ;
2011-11-28 14:30:20 +04:00
perf_event__repipe ( tool , event , sample , machine ) ;
2010-05-01 10:41:20 +04:00
return err ;
}
2015-04-09 18:54:00 +03:00
static int perf_event__repipe_comm ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_sample * sample ,
struct machine * machine )
{
int err ;
err = perf_event__process_comm ( tool , event , sample , machine ) ;
perf_event__repipe ( tool , event , sample , machine ) ;
return err ;
}
perf tools: Add PERF_RECORD_NAMESPACES to include namespaces related info
Introduce a new option to record PERF_RECORD_NAMESPACES events emitted
by the kernel when fork, clone, setns or unshare are invoked. And update
perf-record documentation with the new option to record namespace
events.
Committer notes:
Combined it with a later patch to allow printing it via 'perf report -D'
and be able to test the feature introduced in this patch. Had to move
here also perf_ns__name(), that was introduced in another later patch.
Also used PRIu64 and PRIx64 to fix the build in some enfironments wrt:
util/event.c:1129:39: error: format '%lx' expects argument of type 'long unsigned int', but argument 6 has type 'long long unsigned int' [-Werror=format=]
ret += fprintf(fp, "%u/%s: %lu/0x%lx%s", idx
^
Testing it:
# perf record --namespaces -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 1.083 MB perf.data (423 samples) ]
#
# perf report -D
<SNIP>
3 2028902078892 0x115140 [0xa0]: PERF_RECORD_NAMESPACES 14783/14783 - nr_namespaces: 7
[0/net: 3/0xf0000081, 1/uts: 3/0xeffffffe, 2/ipc: 3/0xefffffff, 3/pid: 3/0xeffffffc,
4/user: 3/0xeffffffd, 5/mnt: 3/0xf0000000, 6/cgroup: 3/0xeffffffb]
0x1151e0 [0x30]: event: 9
.
. ... raw event: size 48 bytes
. 0000: 09 00 00 00 02 00 30 00 c4 71 82 68 0c 7f 00 00 ......0..q.h....
. 0010: a9 39 00 00 a9 39 00 00 94 28 fe 63 d8 01 00 00 .9...9...(.c....
. 0020: 03 00 00 00 00 00 00 00 ce c4 02 00 00 00 00 00 ................
<SNIP>
NAMESPACES events: 1
<SNIP>
#
Signed-off-by: Hari Bathini <hbathini@linux.vnet.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@fb.com>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Aravinda Prasad <aravinda@linux.vnet.ibm.com>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sargun Dhillon <sargun@sargun.me>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/148891930386.25309.18412039920746995488.stgit@hbathini.in.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-07 23:41:43 +03:00
static int perf_event__repipe_namespaces ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_sample * sample ,
struct machine * machine )
{
int err = perf_event__process_namespaces ( tool , event , sample , machine ) ;
perf_event__repipe ( tool , event , sample , machine ) ;
return err ;
}
2015-04-09 18:54:00 +03:00
static int perf_event__repipe_exit ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_sample * sample ,
struct machine * machine )
{
int err ;
err = perf_event__process_exit ( tool , event , sample , machine ) ;
perf_event__repipe ( tool , event , sample , machine ) ;
return err ;
}
2013-07-04 17:20:21 +04:00
static int perf_event__repipe_tracing_data ( struct perf_tool * tool ,
union perf_event * event ,
2011-01-29 19:01:45 +03:00
struct perf_session * session )
2010-05-01 10:41:20 +04:00
{
int err ;
2013-07-04 17:20:21 +04:00
perf_event__repipe_synth ( tool , event ) ;
err = perf_event__process_tracing_data ( tool , event , session ) ;
2010-05-01 10:41:20 +04:00
return err ;
}
2015-04-09 18:54:00 +03:00
static int perf_event__repipe_id_index ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_session * session )
{
int err ;
perf_event__repipe_synth ( tool , event ) ;
err = perf_event__process_id_index ( tool , event , session ) ;
return err ;
}
2013-10-23 02:01:31 +04:00
static int dso__read_build_id ( struct dso * dso )
2010-05-01 10:41:20 +04:00
{
2013-10-23 02:01:31 +04:00
if ( dso - > has_build_id )
2010-05-03 02:46:36 +04:00
return 0 ;
2010-05-01 10:41:20 +04:00
2013-10-23 02:01:31 +04:00
if ( filename__read_build_id ( dso - > long_name , dso - > build_id ,
sizeof ( dso - > build_id ) ) > 0 ) {
dso - > has_build_id = true ;
2010-05-03 02:46:36 +04:00
return 0 ;
}
2010-05-01 10:41:20 +04:00
2010-05-03 02:46:36 +04:00
return - 1 ;
}
2010-05-01 10:41:20 +04:00
2013-10-23 02:01:31 +04:00
static int dso__inject_build_id ( struct dso * dso , struct perf_tool * tool ,
2011-11-28 13:56:39 +04:00
struct machine * machine )
2010-05-03 02:46:36 +04:00
{
u16 misc = PERF_RECORD_MISC_USER ;
int err ;
2010-05-01 10:41:20 +04:00
2013-10-23 02:01:31 +04:00
if ( dso__read_build_id ( dso ) < 0 ) {
pr_debug ( " no build_id found for %s \n " , dso - > long_name ) ;
2010-05-03 02:46:36 +04:00
return - 1 ;
}
2010-05-01 10:41:20 +04:00
2013-10-23 02:01:31 +04:00
if ( dso - > kernel )
2010-05-03 02:46:36 +04:00
misc = PERF_RECORD_MISC_KERNEL ;
2010-05-01 10:41:20 +04:00
2013-10-23 02:01:31 +04:00
err = perf_event__synthesize_build_id ( tool , dso , misc , perf_event__repipe ,
2011-11-28 13:56:39 +04:00
machine ) ;
2010-05-03 02:46:36 +04:00
if ( err ) {
2013-10-23 02:01:31 +04:00
pr_err ( " Can't synthesize build_id event for %s \n " , dso - > long_name ) ;
2010-05-01 10:41:20 +04:00
return - 1 ;
}
return 0 ;
}
2011-11-28 14:30:20 +04:00
static int perf_event__inject_buildid ( struct perf_tool * tool ,
2011-11-25 14:19:45 +04:00
union perf_event * event ,
2011-01-29 19:01:45 +03:00
struct perf_sample * sample ,
2012-09-11 02:15:03 +04:00
struct perf_evsel * evsel __maybe_unused ,
2011-11-28 13:56:39 +04:00
struct machine * machine )
2010-05-01 10:41:20 +04:00
{
struct addr_location al ;
struct thread * thread ;
2014-05-12 04:56:42 +04:00
thread = machine__findnew_thread ( machine , sample - > pid , sample - > tid ) ;
2010-05-01 10:41:20 +04:00
if ( thread = = NULL ) {
pr_err ( " problem processing %d event, skipping it. \n " ,
event - > header . type ) ;
goto repipe ;
}
2016-03-23 00:23:43 +03:00
thread__find_addr_map ( thread , sample - > cpumode , MAP__FUNCTION , sample - > ip , & al ) ;
2010-05-01 10:41:20 +04:00
if ( al . map ! = NULL ) {
if ( ! al . map - > dso - > hit ) {
al . map - > dso - > hit = 1 ;
2016-09-02 01:25:52 +03:00
if ( map__load ( al . map ) > = 0 ) {
2011-11-28 14:30:20 +04:00
dso__inject_build_id ( al . map - > dso , tool , machine ) ;
2010-05-03 02:46:36 +04:00
/*
* If this fails , too bad , let the other side
* account this as unresolved .
*/
2012-08-06 08:41:21 +04:00
} else {
2013-09-30 14:07:11 +04:00
# ifdef HAVE_LIBELF_SUPPORT
2010-05-01 10:41:20 +04:00
pr_warning ( " no symbols found in %s, maybe "
" install a debug package? \n " ,
al . map - > dso - > long_name ) ;
2012-08-06 08:41:21 +04:00
# endif
}
2010-05-01 10:41:20 +04:00
}
}
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 02:43:22 +03:00
thread__put ( thread ) ;
2010-05-01 10:41:20 +04:00
repipe :
2011-11-28 14:30:20 +04:00
perf_event__repipe ( tool , event , sample , machine ) ;
2010-05-03 02:46:36 +04:00
return 0 ;
2010-05-01 10:41:20 +04:00
}
2012-08-07 16:56:04 +04:00
static int perf_inject__sched_process_exit ( struct perf_tool * tool ,
union perf_event * event __maybe_unused ,
struct perf_sample * sample ,
struct perf_evsel * evsel __maybe_unused ,
struct machine * machine __maybe_unused )
{
struct perf_inject * inject = container_of ( tool , struct perf_inject , tool ) ;
struct event_entry * ent ;
list_for_each_entry ( ent , & inject - > samples , node ) {
if ( sample - > tid = = ent - > tid ) {
list_del_init ( & ent - > node ) ;
free ( ent ) ;
break ;
}
}
return 0 ;
}
static int perf_inject__sched_switch ( struct perf_tool * tool ,
union perf_event * event ,
struct perf_sample * sample ,
struct perf_evsel * evsel ,
struct machine * machine )
{
struct perf_inject * inject = container_of ( tool , struct perf_inject , tool ) ;
struct event_entry * ent ;
perf_inject__sched_process_exit ( tool , event , sample , evsel , machine ) ;
ent = malloc ( event - > header . size + sizeof ( struct event_entry ) ) ;
if ( ent = = NULL ) {
color_fprintf ( stderr , PERF_COLOR_RED ,
" Not enough memory to process sched switch event! " ) ;
return - 1 ;
}
ent - > tid = sample - > tid ;
memcpy ( & ent - > event , event , event - > header . size ) ;
list_add ( & ent - > node , & inject - > samples ) ;
return 0 ;
}
static int perf_inject__sched_stat ( struct perf_tool * tool ,
union perf_event * event __maybe_unused ,
struct perf_sample * sample ,
struct perf_evsel * evsel ,
struct machine * machine )
{
struct event_entry * ent ;
union perf_event * event_sw ;
struct perf_sample sample_sw ;
struct perf_inject * inject = container_of ( tool , struct perf_inject , tool ) ;
u32 pid = perf_evsel__intval ( evsel , sample , " pid " ) ;
list_for_each_entry ( ent , & inject - > samples , node ) {
if ( pid = = ent - > tid )
goto found ;
}
return 0 ;
found :
event_sw = & ent - > event [ 0 ] ;
perf_evsel__parse_sample ( evsel , event_sw , & sample_sw ) ;
sample_sw . period = sample - > period ;
sample_sw . time = sample - > time ;
perf_event__synthesize_sample ( event_sw , evsel - > attr . sample_type ,
2013-08-27 12:23:11 +04:00
evsel - > attr . read_format , & sample_sw ,
false ) ;
2012-08-07 16:56:05 +04:00
build_id__mark_dso_hit ( tool , event_sw , & sample_sw , evsel , machine ) ;
2012-08-07 16:56:04 +04:00
return perf_event__repipe ( tool , event_sw , & sample_sw , machine ) ;
}
2012-09-11 02:15:03 +04:00
static void sig_handler ( int sig __maybe_unused )
2010-05-01 10:41:20 +04:00
{
session_done = 1 ;
}
2012-08-07 16:56:04 +04:00
static int perf_evsel__check_stype ( struct perf_evsel * evsel ,
u64 sample_type , const char * sample_msg )
{
struct perf_event_attr * attr = & evsel - > attr ;
const char * name = perf_evsel__name ( evsel ) ;
if ( ! ( attr - > sample_type & sample_type ) ) {
pr_err ( " Samples for %s event do not have %s attribute set. " ,
name , sample_msg ) ;
return - EINVAL ;
}
return 0 ;
}
2015-09-25 16:15:55 +03:00
static int drop_sample ( struct perf_tool * tool __maybe_unused ,
union perf_event * event __maybe_unused ,
struct perf_sample * sample __maybe_unused ,
struct perf_evsel * evsel __maybe_unused ,
struct machine * machine __maybe_unused )
{
return 0 ;
}
static void strip_init ( struct perf_inject * inject )
{
struct perf_evlist * evlist = inject - > session - > evlist ;
struct perf_evsel * evsel ;
inject - > tool . context_switch = perf_event__drop ;
2016-06-23 17:26:15 +03:00
evlist__for_each_entry ( evlist , evsel )
2015-09-25 16:15:55 +03:00
evsel - > handler = drop_sample ;
}
static bool has_tracking ( struct perf_evsel * evsel )
{
return evsel - > attr . mmap | | evsel - > attr . mmap2 | | evsel - > attr . comm | |
evsel - > attr . task ;
}
# define COMPAT_MASK (PERF_SAMPLE_ID | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER )
/*
* In order that the perf . data file is parsable , tracking events like MMAP need
* their selected event to exist , except if there is only 1 selected event left
* and it has a compatible sample type .
*/
static bool ok_to_remove ( struct perf_evlist * evlist ,
struct perf_evsel * evsel_to_remove )
{
struct perf_evsel * evsel ;
int cnt = 0 ;
bool ok = false ;
if ( ! has_tracking ( evsel_to_remove ) )
return true ;
2016-06-23 17:26:15 +03:00
evlist__for_each_entry ( evlist , evsel ) {
2015-09-25 16:15:55 +03:00
if ( evsel - > handler ! = drop_sample ) {
cnt + = 1 ;
if ( ( evsel - > attr . sample_type & COMPAT_MASK ) = =
( evsel_to_remove - > attr . sample_type & COMPAT_MASK ) )
ok = true ;
}
}
return ok & & cnt = = 1 ;
}
static void strip_fini ( struct perf_inject * inject )
{
struct perf_evlist * evlist = inject - > session - > evlist ;
struct perf_evsel * evsel , * tmp ;
/* Remove non-synthesized evsels if possible */
2016-06-23 17:26:15 +03:00
evlist__for_each_entry_safe ( evlist , tmp , evsel ) {
2015-09-25 16:15:55 +03:00
if ( evsel - > handler = = drop_sample & &
ok_to_remove ( evlist , evsel ) ) {
pr_debug ( " Deleting %s \n " , perf_evsel__name ( evsel ) ) ;
perf_evlist__remove ( evlist , evsel ) ;
perf_evsel__delete ( evsel ) ;
}
}
}
2012-10-01 02:54:10 +04:00
static int __cmd_inject ( struct perf_inject * inject )
2010-05-01 10:41:20 +04:00
{
int ret = - EINVAL ;
2014-08-12 10:40:37 +04:00
struct perf_session * session = inject - > session ;
2013-10-29 22:04:57 +04:00
struct perf_data_file * file_out = & inject - > output ;
2015-01-29 11:06:48 +03:00
int fd = perf_data_file__fd ( file_out ) ;
2015-04-09 18:54:00 +03:00
u64 output_data_offset ;
2010-05-01 10:41:20 +04:00
signal ( SIGINT , sig_handler ) ;
2015-04-09 18:54:00 +03:00
if ( inject - > build_ids | | inject - > sched_stat | |
inject - > itrace_synth_opts . set ) {
2012-10-01 02:54:10 +04:00
inject - > tool . mmap = perf_event__repipe_mmap ;
2013-08-21 14:10:25 +04:00
inject - > tool . mmap2 = perf_event__repipe_mmap2 ;
2012-10-06 22:44:59 +04:00
inject - > tool . fork = perf_event__repipe_fork ;
2012-10-01 02:54:10 +04:00
inject - > tool . tracing_data = perf_event__repipe_tracing_data ;
2010-05-01 10:41:20 +04:00
}
2015-04-09 18:54:00 +03:00
output_data_offset = session - > header . data_offset ;
2012-08-07 16:56:05 +04:00
if ( inject - > build_ids ) {
inject - > tool . sample = perf_event__inject_buildid ;
} else if ( inject - > sched_stat ) {
2012-08-07 16:56:04 +04:00
struct perf_evsel * evsel ;
2016-06-23 17:26:15 +03:00
evlist__for_each_entry ( session - > evlist , evsel ) {
2012-08-07 16:56:04 +04:00
const char * name = perf_evsel__name ( evsel ) ;
if ( ! strcmp ( name , " sched:sched_switch " ) ) {
if ( perf_evsel__check_stype ( evsel , PERF_SAMPLE_TID , " TID " ) )
return - EINVAL ;
2013-11-06 17:17:38 +04:00
evsel - > handler = perf_inject__sched_switch ;
2012-08-07 16:56:04 +04:00
} else if ( ! strcmp ( name , " sched:sched_process_exit " ) )
2013-11-06 17:17:38 +04:00
evsel - > handler = perf_inject__sched_process_exit ;
2012-08-07 16:56:04 +04:00
else if ( ! strncmp ( name , " sched:sched_stat_ " , 17 ) )
2013-11-06 17:17:38 +04:00
evsel - > handler = perf_inject__sched_stat ;
2012-08-07 16:56:04 +04:00
}
2015-04-09 18:54:00 +03:00
} else if ( inject - > itrace_synth_opts . set ) {
session - > itrace_synth_opts = & inject - > itrace_synth_opts ;
inject - > itrace_synth_opts . inject = true ;
inject - > tool . comm = perf_event__repipe_comm ;
perf tools: Add PERF_RECORD_NAMESPACES to include namespaces related info
Introduce a new option to record PERF_RECORD_NAMESPACES events emitted
by the kernel when fork, clone, setns or unshare are invoked. And update
perf-record documentation with the new option to record namespace
events.
Committer notes:
Combined it with a later patch to allow printing it via 'perf report -D'
and be able to test the feature introduced in this patch. Had to move
here also perf_ns__name(), that was introduced in another later patch.
Also used PRIu64 and PRIx64 to fix the build in some enfironments wrt:
util/event.c:1129:39: error: format '%lx' expects argument of type 'long unsigned int', but argument 6 has type 'long long unsigned int' [-Werror=format=]
ret += fprintf(fp, "%u/%s: %lu/0x%lx%s", idx
^
Testing it:
# perf record --namespaces -a
^C[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 1.083 MB perf.data (423 samples) ]
#
# perf report -D
<SNIP>
3 2028902078892 0x115140 [0xa0]: PERF_RECORD_NAMESPACES 14783/14783 - nr_namespaces: 7
[0/net: 3/0xf0000081, 1/uts: 3/0xeffffffe, 2/ipc: 3/0xefffffff, 3/pid: 3/0xeffffffc,
4/user: 3/0xeffffffd, 5/mnt: 3/0xf0000000, 6/cgroup: 3/0xeffffffb]
0x1151e0 [0x30]: event: 9
.
. ... raw event: size 48 bytes
. 0000: 09 00 00 00 02 00 30 00 c4 71 82 68 0c 7f 00 00 ......0..q.h....
. 0010: a9 39 00 00 a9 39 00 00 94 28 fe 63 d8 01 00 00 .9...9...(.c....
. 0020: 03 00 00 00 00 00 00 00 ce c4 02 00 00 00 00 00 ................
<SNIP>
NAMESPACES events: 1
<SNIP>
#
Signed-off-by: Hari Bathini <hbathini@linux.vnet.ibm.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@fb.com>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Aravinda Prasad <aravinda@linux.vnet.ibm.com>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sargun Dhillon <sargun@sargun.me>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/148891930386.25309.18412039920746995488.stgit@hbathini.in.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-07 23:41:43 +03:00
inject - > tool . namespaces = perf_event__repipe_namespaces ;
2015-04-09 18:54:00 +03:00
inject - > tool . exit = perf_event__repipe_exit ;
inject - > tool . id_index = perf_event__repipe_id_index ;
inject - > tool . auxtrace_info = perf_event__process_auxtrace_info ;
inject - > tool . auxtrace = perf_event__process_auxtrace ;
2015-09-25 16:15:54 +03:00
inject - > tool . aux = perf_event__drop_aux ;
inject - > tool . itrace_start = perf_event__drop_aux ,
2015-04-09 18:54:00 +03:00
inject - > tool . ordered_events = true ;
inject - > tool . ordering_requires_timestamps = true ;
/* Allow space in the header for new attributes */
output_data_offset = 4096 ;
2015-09-25 16:15:55 +03:00
if ( inject - > strip )
strip_init ( inject ) ;
2012-08-07 16:56:04 +04:00
}
2015-04-30 17:37:25 +03:00
if ( ! inject - > itrace_synth_opts . set )
auxtrace_index__free ( & session - > auxtrace_index ) ;
2013-10-29 22:04:57 +04:00
if ( ! file_out - > is_pipe )
2015-04-09 18:54:00 +03:00
lseek ( fd , output_data_offset , SEEK_SET ) ;
2012-08-07 16:56:02 +04:00
2015-03-03 17:58:45 +03:00
ret = perf_session__process_events ( session ) ;
2017-04-10 23:14:26 +03:00
if ( ret )
return ret ;
2010-05-01 10:41:20 +04:00
2013-10-29 22:04:57 +04:00
if ( ! file_out - > is_pipe ) {
2016-03-07 22:44:38 +03:00
if ( inject - > build_ids )
2014-07-14 14:02:34 +04:00
perf_header__set_feat ( & session - > header ,
HEADER_BUILD_ID ) ;
2016-03-07 22:44:38 +03:00
/*
* Keep all buildids when there is unprocessed AUX data because
* it is not known which ones the AUX trace hits .
*/
if ( perf_header__has_feat ( & session - > header , HEADER_BUILD_ID ) & &
inject - > have_auxtrace & & ! inject - > itrace_synth_opts . set )
dsos__hit_all ( session ) ;
2015-04-09 18:54:00 +03:00
/*
* The AUX areas have been removed and replaced with
2015-09-25 16:15:54 +03:00
* synthesized hardware events , so clear the feature flag and
* remove the evsel .
2015-04-09 18:54:00 +03:00
*/
2015-09-25 16:15:43 +03:00
if ( inject - > itrace_synth_opts . set ) {
2015-09-25 16:15:54 +03:00
struct perf_evsel * evsel ;
2015-04-09 18:54:00 +03:00
perf_header__clear_feat ( & session - > header ,
HEADER_AUXTRACE ) ;
2015-09-25 16:15:43 +03:00
if ( inject - > itrace_synth_opts . last_branch )
perf_header__set_feat ( & session - > header ,
HEADER_BRANCH_STACK ) ;
2015-09-25 16:15:54 +03:00
evsel = perf_evlist__id2evsel_strict ( session - > evlist ,
inject - > aux_id ) ;
if ( evsel ) {
pr_debug ( " Deleting %s \n " ,
perf_evsel__name ( evsel ) ) ;
perf_evlist__remove ( session - > evlist , evsel ) ;
perf_evsel__delete ( evsel ) ;
}
2015-09-25 16:15:55 +03:00
if ( inject - > strip )
strip_fini ( inject ) ;
2015-09-25 16:15:43 +03:00
}
2015-04-09 18:54:00 +03:00
session - > header . data_offset = output_data_offset ;
2012-08-07 16:56:02 +04:00
session - > header . data_size = inject - > bytes_written ;
2015-01-29 11:06:48 +03:00
perf_session__write_header ( session , session - > evlist , fd , true ) ;
2012-08-07 16:56:02 +04:00
}
2010-05-01 10:41:20 +04:00
return ret ;
}
2017-03-27 17:47:20 +03:00
int cmd_inject ( int argc , const char * * argv )
2010-05-01 10:41:20 +04:00
{
2012-10-01 02:54:10 +04:00
struct perf_inject inject = {
. tool = {
. sample = perf_event__repipe_sample ,
. mmap = perf_event__repipe ,
2013-08-21 14:10:25 +04:00
. mmap2 = perf_event__repipe ,
2012-10-01 02:54:10 +04:00
. comm = perf_event__repipe ,
. fork = perf_event__repipe ,
. exit = perf_event__repipe ,
. lost = perf_event__repipe ,
2015-11-13 12:48:32 +03:00
. lost_samples = perf_event__repipe ,
2015-04-30 17:37:29 +03:00
. aux = perf_event__repipe ,
2015-04-30 17:37:30 +03:00
. itrace_start = perf_event__repipe ,
2015-07-21 12:44:03 +03:00
. context_switch = perf_event__repipe ,
2012-10-01 02:54:10 +04:00
. read = perf_event__repipe_sample ,
. throttle = perf_event__repipe ,
. unthrottle = perf_event__repipe ,
. attr = perf_event__repipe_attr ,
2013-07-04 17:20:21 +04:00
. tracing_data = perf_event__repipe_op2_synth ,
2015-04-21 12:21:54 +03:00
. auxtrace_info = perf_event__repipe_op2_synth ,
. auxtrace = perf_event__repipe_auxtrace ,
. auxtrace_error = perf_event__repipe_op2_synth ,
2016-03-08 11:38:44 +03:00
. time_conv = perf_event__repipe_op2_synth ,
2015-03-03 18:37:54 +03:00
. finished_round = perf_event__repipe_oe_synth ,
2012-10-01 02:54:10 +04:00
. build_id = perf_event__repipe_op2_synth ,
2014-10-27 16:49:22 +03:00
. id_index = perf_event__repipe_op2_synth ,
2012-10-01 02:54:10 +04:00
} ,
2012-08-07 16:56:02 +04:00
. input_name = " - " ,
2012-08-07 16:56:04 +04:00
. samples = LIST_HEAD_INIT ( inject . samples ) ,
2013-10-29 22:04:57 +04:00
. output = {
. path = " - " ,
. mode = PERF_DATA_MODE_WRITE ,
} ,
2012-10-01 02:54:10 +04:00
} ;
2014-08-12 10:40:37 +04:00
struct perf_data_file file = {
. mode = PERF_DATA_MODE_READ ,
} ;
int ret ;
2015-11-30 12:02:21 +03:00
struct option options [ ] = {
2012-10-01 02:54:10 +04:00
OPT_BOOLEAN ( ' b ' , " build-ids " , & inject . build_ids ,
" Inject build-ids into the output stream " ) ,
2012-08-07 16:56:02 +04:00
OPT_STRING ( ' i ' , " input " , & inject . input_name , " file " ,
" input file name " ) ,
2013-10-29 22:04:57 +04:00
OPT_STRING ( ' o ' , " output " , & inject . output . path , " file " ,
2012-08-07 16:56:02 +04:00
" output file name " ) ,
2012-08-07 16:56:04 +04:00
OPT_BOOLEAN ( ' s ' , " sched-stat " , & inject . sched_stat ,
" Merge sched-stat and sched-switch for getting events "
" where and how long tasks slept " ) ,
2016-03-10 19:41:13 +03:00
# ifdef HAVE_JITDUMP
2015-11-30 12:02:21 +03:00
OPT_BOOLEAN ( ' j ' , " jit " , & inject . jit_mode , " merge jitdump files into perf.data file " ) ,
2016-03-10 19:41:13 +03:00
# endif
2012-10-01 02:54:10 +04:00
OPT_INCR ( ' v ' , " verbose " , & verbose ,
" be more verbose (show build ids, etc) " ) ,
2014-07-22 17:17:38 +04:00
OPT_STRING ( 0 , " kallsyms " , & symbol_conf . kallsyms_name , " file " ,
" kallsyms pathname " ) ,
2015-04-02 16:47:11 +03:00
OPT_BOOLEAN ( ' f ' , " force " , & file . force , " don't complain, do it " ) ,
2015-04-09 18:54:00 +03:00
OPT_CALLBACK_OPTARG ( 0 , " itrace " , & inject . itrace_synth_opts ,
NULL , " opts " , " Instruction Tracing options " ,
itrace_parse_synth_opts ) ,
2015-09-25 16:15:55 +03:00
OPT_BOOLEAN ( 0 , " strip " , & inject . strip ,
" strip non-synthesized events (use with --itrace) " ) ,
2012-10-01 02:54:10 +04:00
OPT_END ( )
} ;
2012-10-01 22:20:58 +04:00
const char * const inject_usage [ ] = {
" perf inject [<options>] " ,
NULL
} ;
2016-03-10 19:41:13 +03:00
# ifndef HAVE_JITDUMP
2015-11-30 12:02:21 +03:00
set_option_nobuild ( options , ' j ' , " jit " , " NO_LIBELF=1 " , true ) ;
# endif
2012-10-01 22:20:58 +04:00
argc = parse_options ( argc , argv , options , inject_usage , 0 ) ;
2010-05-01 10:41:20 +04:00
/*
* Any ( unrecognized ) arguments left ?
*/
if ( argc )
2012-10-01 22:20:58 +04:00
usage_with_options ( inject_usage , options ) ;
2010-05-01 10:41:20 +04:00
2015-09-25 16:15:55 +03:00
if ( inject . strip & & ! inject . itrace_synth_opts . set ) {
pr_err ( " --strip option requires --itrace option \n " ) ;
return - 1 ;
}
2013-10-29 22:04:57 +04:00
if ( perf_data_file__open ( & inject . output ) ) {
perror ( " failed to create output file " ) ;
return - 1 ;
2012-08-07 16:56:02 +04:00
}
2015-03-03 17:58:45 +03:00
inject . tool . ordered_events = inject . sched_stat ;
2014-08-12 10:40:37 +04:00
file . path = inject . input_name ;
inject . session = perf_session__new ( & file , true , & inject . tool ) ;
if ( inject . session = = NULL )
2014-09-24 05:33:37 +04:00
return - 1 ;
2014-08-12 10:40:37 +04:00
2016-01-23 00:41:00 +03:00
if ( inject . build_ids ) {
/*
* to make sure the mmap records are ordered correctly
* and so that the correct especially due to jitted code
* mmaps . We cannot generate the buildid hit list and
* inject the jit mmaps at the same time for now .
*/
inject . tool . ordered_events = true ;
inject . tool . ordering_requires_timestamps = true ;
}
2016-03-10 19:41:13 +03:00
# ifdef HAVE_JITDUMP
2015-11-30 12:02:21 +03:00
if ( inject . jit_mode ) {
inject . tool . mmap2 = perf_event__jit_repipe_mmap2 ;
inject . tool . mmap = perf_event__jit_repipe_mmap ;
inject . tool . ordered_events = true ;
inject . tool . ordering_requires_timestamps = true ;
/*
* JIT MMAP injection injects all MMAP events in one go , so it
* does not obey finished_round semantics .
*/
inject . tool . finished_round = perf_event__drop_oe ;
}
# endif
2015-06-30 11:15:20 +03:00
ret = symbol__init ( & inject . session - > header . env ) ;
if ( ret < 0 )
goto out_delete ;
2010-05-01 10:41:20 +04:00
2014-08-12 10:40:37 +04:00
ret = __cmd_inject ( & inject ) ;
2015-06-30 11:15:20 +03:00
out_delete :
2014-08-12 10:40:37 +04:00
perf_session__delete ( inject . session ) ;
return ret ;
2010-05-01 10:41:20 +04:00
}