2018-08-16 11:23:53 -04:00
// SPDX-License-Identifier: GPL-2.0
2009-03-19 20:26:15 +01:00
/*
2010-03-05 05:35:37 +01:00
* trace event based perf event profiling / tracing
2009-03-19 20:26:15 +01:00
*
2015-11-16 11:08:45 +01:00
* Copyright ( C ) 2009 Red Hat Inc , Peter Zijlstra
2010-03-03 07:16:16 +01:00
* Copyright ( C ) 2009 - 2010 Frederic Weisbecker < fweisbec @ gmail . com >
2009-03-19 20:26:15 +01:00
*/
2009-08-24 12:19:47 +08:00
# include <linux/module.h>
2010-01-28 09:32:29 +08:00
# include <linux/kprobes.h>
2009-03-19 20:26:15 +01:00
# include "trace.h"
2017-12-06 14:45:15 -08:00
# include "trace_probe.h"
2009-03-19 20:26:15 +01:00
2010-08-11 12:47:59 +09:00
static char __percpu * perf_trace_buf [ PERF_NR_CONTEXTS ] ;
2009-09-18 06:10:28 +02:00
2010-03-23 00:08:59 +01:00
/*
* Force it to be aligned to unsigned long to avoid misaligned accesses
* suprises
*/
typedef typeof ( unsigned long [ PERF_MAX_TRACE_SIZE / sizeof ( unsigned long ) ] )
perf_trace_t ;
2009-11-22 05:26:55 +01:00
2009-09-18 06:10:28 +02:00
/* Count the events in use (per event id, not per instance) */
2010-03-05 05:35:37 +01:00
static int total_ref_count ;
2009-09-18 06:10:28 +02:00
2015-05-05 11:45:27 -04:00
static int perf_trace_event_perm ( struct trace_event_call * tp_event ,
2010-11-18 01:39:17 +01:00
struct perf_event * p_event )
{
2013-11-14 16:23:04 +01:00
if ( tp_event - > perf_perm ) {
int ret = tp_event - > perf_perm ( tp_event , p_event ) ;
if ( ret )
return ret ;
}
2014-07-16 14:33:29 +02:00
/*
* We checked and allowed to create parent ,
* allow children without checking .
*/
if ( p_event - > parent )
return 0 ;
/*
* It ' s ok to check current process ( owner ) permissions in here ,
* because code below is called only via perf_event_open syscall .
*/
2012-02-15 15:51:52 +01:00
/* The ftrace function trace is allowed only for root. */
2014-03-02 16:56:38 +01:00
if ( ftrace_event_is_function ( tp_event ) ) {
if ( perf_paranoid_tracepoint_raw ( ) & & ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2016-03-16 15:34:29 +01:00
if ( ! is_sampling_event ( p_event ) )
return 0 ;
2014-03-02 16:56:38 +01:00
/*
* We don ' t allow user space callchains for function trace
* event , due to issues with page faults while tracing page
* fault handler and its overall trickiness nature .
*/
if ( ! p_event - > attr . exclude_callchain_user )
return - EINVAL ;
2014-03-02 16:56:39 +01:00
/*
* Same reason to disable user stack dump as for user space
* callchains above .
*/
if ( p_event - > attr . sample_type & PERF_SAMPLE_STACK_USER )
return - EINVAL ;
2014-03-02 16:56:38 +01:00
}
2012-02-15 15:51:52 +01:00
2010-11-18 01:39:17 +01:00
/* No tracing, just counting, so no obvious leak */
if ( ! ( p_event - > attr . sample_type & PERF_SAMPLE_RAW ) )
return 0 ;
/* Some events are ok to be traced by non-root users... */
if ( p_event - > attach_state = = PERF_ATTACH_TASK ) {
if ( tp_event - > flags & TRACE_EVENT_FL_CAP_ANY )
return 0 ;
}
/*
* . . . otherwise raw tracepoint data can be a severe data leak ,
* only allow root to have these .
*/
if ( perf_paranoid_tracepoint_raw ( ) & & ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
return 0 ;
}
2015-05-05 11:45:27 -04:00
static int perf_trace_event_reg ( struct trace_event_call * tp_event ,
2012-02-15 15:51:49 +01:00
struct perf_event * p_event )
2009-09-18 00:54:43 +02:00
{
2010-08-11 12:47:59 +09:00
struct hlist_head __percpu * list ;
2012-02-15 15:51:49 +01:00
int ret = - ENOMEM ;
2010-05-19 14:02:22 +02:00
int cpu ;
2009-09-18 06:10:28 +02:00
2010-05-19 14:02:22 +02:00
p_event - > tp_event = tp_event ;
if ( tp_event - > perf_refcount + + > 0 )
2009-09-18 00:54:43 +02:00
return 0 ;
2010-05-19 14:02:22 +02:00
list = alloc_percpu ( struct hlist_head ) ;
if ( ! list )
goto fail ;
for_each_possible_cpu ( cpu )
INIT_HLIST_HEAD ( per_cpu_ptr ( list , cpu ) ) ;
2009-09-18 06:10:28 +02:00
2010-05-19 14:02:22 +02:00
tp_event - > perf_events = list ;
2009-09-18 00:54:43 +02:00
2010-03-05 05:35:37 +01:00
if ( ! total_ref_count ) {
2010-08-11 12:47:59 +09:00
char __percpu * buf ;
2010-05-19 10:52:27 +02:00
int i ;
2009-09-18 06:10:28 +02:00
2010-08-14 20:45:13 +02:00
for ( i = 0 ; i < PERF_NR_CONTEXTS ; i + + ) {
2010-08-11 12:47:59 +09:00
buf = ( char __percpu * ) alloc_percpu ( perf_trace_t ) ;
2010-05-19 10:52:27 +02:00
if ( ! buf )
2010-05-19 14:02:22 +02:00
goto fail ;
2009-09-18 06:10:28 +02:00
2010-05-19 14:02:22 +02:00
perf_trace_buf [ i ] = buf ;
2010-05-19 10:52:27 +02:00
}
2009-09-18 06:10:28 +02:00
}
2012-02-15 15:51:49 +01:00
ret = tp_event - > class - > reg ( tp_event , TRACE_REG_PERF_REGISTER , NULL ) ;
2010-05-19 14:02:22 +02:00
if ( ret )
goto fail ;
2009-09-18 06:10:28 +02:00
2010-05-19 14:02:22 +02:00
total_ref_count + + ;
return 0 ;
fail :
2010-03-05 05:35:37 +01:00
if ( ! total_ref_count ) {
2010-05-19 10:52:27 +02:00
int i ;
2010-08-14 20:45:13 +02:00
for ( i = 0 ; i < PERF_NR_CONTEXTS ; i + + ) {
2010-05-19 10:52:27 +02:00
free_percpu ( perf_trace_buf [ i ] ) ;
perf_trace_buf [ i ] = NULL ;
}
2009-10-03 14:55:18 +02:00
}
2010-05-19 14:02:22 +02:00
if ( ! - - tp_event - > perf_refcount ) {
free_percpu ( tp_event - > perf_events ) ;
tp_event - > perf_events = NULL ;
2009-10-03 14:55:18 +02:00
}
2009-09-18 06:10:28 +02:00
return ret ;
2009-09-18 00:54:43 +02:00
}
2012-02-15 15:51:49 +01:00
static void perf_trace_event_unreg ( struct perf_event * p_event )
{
2015-05-05 11:45:27 -04:00
struct trace_event_call * tp_event = p_event - > tp_event ;
2012-02-15 15:51:49 +01:00
int i ;
if ( - - tp_event - > perf_refcount > 0 )
goto out ;
tp_event - > class - > reg ( tp_event , TRACE_REG_PERF_UNREGISTER , NULL ) ;
/*
* Ensure our callback won ' t be called anymore . The buffers
* will be freed after that .
*/
tracepoint_synchronize_unregister ( ) ;
free_percpu ( tp_event - > perf_events ) ;
tp_event - > perf_events = NULL ;
if ( ! - - total_ref_count ) {
for ( i = 0 ; i < PERF_NR_CONTEXTS ; i + + ) {
free_percpu ( perf_trace_buf [ i ] ) ;
perf_trace_buf [ i ] = NULL ;
}
}
out :
module_put ( tp_event - > mod ) ;
}
static int perf_trace_event_open ( struct perf_event * p_event )
{
2015-05-05 11:45:27 -04:00
struct trace_event_call * tp_event = p_event - > tp_event ;
2012-02-15 15:51:49 +01:00
return tp_event - > class - > reg ( tp_event , TRACE_REG_PERF_OPEN , p_event ) ;
}
static void perf_trace_event_close ( struct perf_event * p_event )
{
2015-05-05 11:45:27 -04:00
struct trace_event_call * tp_event = p_event - > tp_event ;
2012-02-15 15:51:49 +01:00
tp_event - > class - > reg ( tp_event , TRACE_REG_PERF_CLOSE , p_event ) ;
}
2015-05-05 11:45:27 -04:00
static int perf_trace_event_init ( struct trace_event_call * tp_event ,
2012-02-15 15:51:49 +01:00
struct perf_event * p_event )
{
int ret ;
ret = perf_trace_event_perm ( tp_event , p_event ) ;
if ( ret )
return ret ;
ret = perf_trace_event_reg ( tp_event , p_event ) ;
if ( ret )
return ret ;
ret = perf_trace_event_open ( p_event ) ;
if ( ret ) {
perf_trace_event_unreg ( p_event ) ;
return ret ;
}
return 0 ;
}
2010-05-19 14:02:22 +02:00
int perf_trace_init ( struct perf_event * p_event )
2009-03-19 20:26:15 +01:00
{
2015-05-05 11:45:27 -04:00
struct trace_event_call * tp_event ;
2013-11-15 12:39:45 -05:00
u64 event_id = p_event - > attr . config ;
2009-05-06 10:33:45 +08:00
int ret = - EINVAL ;
2009-03-19 20:26:15 +01:00
2009-05-06 10:33:45 +08:00
mutex_lock ( & event_mutex ) ;
2010-05-19 14:02:22 +02:00
list_for_each_entry ( tp_event , & ftrace_events , list ) {
2010-05-21 11:49:57 -04:00
if ( tp_event - > event . type = = event_id & &
2010-06-08 11:22:06 -04:00
tp_event - > class & & tp_event - > class - > reg & &
2010-05-19 14:02:22 +02:00
try_module_get ( tp_event - > mod ) ) {
ret = perf_trace_event_init ( tp_event , p_event ) ;
2010-09-01 12:58:43 +02:00
if ( ret )
module_put ( tp_event - > mod ) ;
2009-05-06 10:33:45 +08:00
break ;
}
2009-03-19 20:26:15 +01:00
}
2009-05-06 10:33:45 +08:00
mutex_unlock ( & event_mutex ) ;
2009-03-19 20:26:15 +01:00
2009-05-06 10:33:45 +08:00
return ret ;
2009-03-19 20:26:15 +01:00
}
2012-02-15 15:51:49 +01:00
void perf_trace_destroy ( struct perf_event * p_event )
{
mutex_lock ( & event_mutex ) ;
perf_trace_event_close ( p_event ) ;
perf_trace_event_unreg ( p_event ) ;
mutex_unlock ( & event_mutex ) ;
}
2017-12-06 14:45:15 -08:00
# ifdef CONFIG_KPROBE_EVENTS
int perf_kprobe_init ( struct perf_event * p_event , bool is_retprobe )
{
int ret ;
char * func = NULL ;
struct trace_event_call * tp_event ;
if ( p_event - > attr . kprobe_func ) {
func = kzalloc ( KSYM_NAME_LEN , GFP_KERNEL ) ;
if ( ! func )
return - ENOMEM ;
ret = strncpy_from_user (
func , u64_to_user_ptr ( p_event - > attr . kprobe_func ) ,
KSYM_NAME_LEN ) ;
2018-04-09 21:16:54 +09:00
if ( ret = = KSYM_NAME_LEN )
ret = - E2BIG ;
2017-12-06 14:45:15 -08:00
if ( ret < 0 )
goto out ;
if ( func [ 0 ] = = ' \0 ' ) {
kfree ( func ) ;
func = NULL ;
}
}
tp_event = create_local_trace_kprobe (
func , ( void * ) ( unsigned long ) ( p_event - > attr . kprobe_addr ) ,
p_event - > attr . probe_offset , is_retprobe ) ;
if ( IS_ERR ( tp_event ) ) {
ret = PTR_ERR ( tp_event ) ;
goto out ;
}
ret = perf_trace_event_init ( tp_event , p_event ) ;
if ( ret )
destroy_local_trace_kprobe ( tp_event ) ;
out :
kfree ( func ) ;
return ret ;
}
void perf_kprobe_destroy ( struct perf_event * p_event )
{
perf_trace_event_close ( p_event ) ;
perf_trace_event_unreg ( p_event ) ;
destroy_local_trace_kprobe ( p_event - > tp_event ) ;
}
# endif /* CONFIG_KPROBE_EVENTS */
2017-12-06 14:45:16 -08:00
# ifdef CONFIG_UPROBE_EVENTS
2018-10-01 22:36:36 -07:00
int perf_uprobe_init ( struct perf_event * p_event ,
unsigned long ref_ctr_offset , bool is_retprobe )
2017-12-06 14:45:16 -08:00
{
int ret ;
char * path = NULL ;
struct trace_event_call * tp_event ;
if ( ! p_event - > attr . uprobe_path )
return - EINVAL ;
2019-02-20 17:54:43 +01:00
path = strndup_user ( u64_to_user_ptr ( p_event - > attr . uprobe_path ) ,
PATH_MAX ) ;
if ( IS_ERR ( path ) ) {
ret = PTR_ERR ( path ) ;
return ( ret = = - EINVAL ) ? - E2BIG : ret ;
}
2017-12-06 14:45:16 -08:00
if ( path [ 0 ] = = ' \0 ' ) {
ret = - EINVAL ;
goto out ;
}
2018-10-01 22:36:36 -07:00
tp_event = create_local_trace_uprobe ( path , p_event - > attr . probe_offset ,
ref_ctr_offset , is_retprobe ) ;
2017-12-06 14:45:16 -08:00
if ( IS_ERR ( tp_event ) ) {
ret = PTR_ERR ( tp_event ) ;
goto out ;
}
/*
* local trace_uprobe need to hold event_mutex to call
* uprobe_buffer_enable ( ) and uprobe_buffer_disable ( ) .
* event_mutex is not required for local trace_kprobes .
*/
mutex_lock ( & event_mutex ) ;
ret = perf_trace_event_init ( tp_event , p_event ) ;
if ( ret )
destroy_local_trace_uprobe ( tp_event ) ;
mutex_unlock ( & event_mutex ) ;
out :
kfree ( path ) ;
return ret ;
}
void perf_uprobe_destroy ( struct perf_event * p_event )
{
mutex_lock ( & event_mutex ) ;
perf_trace_event_close ( p_event ) ;
perf_trace_event_unreg ( p_event ) ;
mutex_unlock ( & event_mutex ) ;
destroy_local_trace_uprobe ( p_event - > tp_event ) ;
}
# endif /* CONFIG_UPROBE_EVENTS */
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 14:37:10 +02:00
int perf_trace_add ( struct perf_event * p_event , int flags )
2009-09-18 00:54:43 +02:00
{
2015-05-05 11:45:27 -04:00
struct trace_event_call * tp_event = p_event - > tp_event ;
2009-09-18 06:10:28 +02:00
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 14:37:10 +02:00
if ( ! ( flags & PERF_EF_START ) )
p_event - > hw . state = PERF_HES_STOPPED ;
2017-10-10 17:15:47 +02:00
/*
* If TRACE_REG_PERF_ADD returns false ; no custom action was performed
* and we need to take the default action of enqueueing our event on
* the right per - cpu hlist .
*/
if ( ! tp_event - > class - > reg ( tp_event , TRACE_REG_PERF_ADD , p_event ) ) {
struct hlist_head __percpu * pcpu_list ;
struct hlist_head * list ;
pcpu_list = tp_event - > perf_events ;
if ( WARN_ON_ONCE ( ! pcpu_list ) )
return - EINVAL ;
list = this_cpu_ptr ( pcpu_list ) ;
hlist_add_head_rcu ( & p_event - > hlist_entry , list ) ;
}
2009-09-18 06:10:28 +02:00
2017-10-10 17:15:47 +02:00
return 0 ;
2010-05-19 14:02:22 +02:00
}
2009-09-18 06:10:28 +02:00
perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with
pmu::{add,del,start,stop}, all of which take a flags argument.
The new interface extends the capability to stop a counter while
keeping it scheduled on the PMU. We replace the throttled state with
the generic stopped state.
This also allows us to efficiently stop/start counters over certain
code paths (like IRQ handlers).
It also allows scheduling a counter without it starting, allowing for
a generic frozen state (useful for rotating stopped counters).
The stopped state is implemented in two different ways, depending on
how the architecture implemented the throttled state:
1) We disable the counter:
a) the pmu has per-counter enable bits, we flip that
b) we program a NOP event, preserving the counter state
2) We store the counter state and ignore all read/overflow events
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2010-06-16 14:37:10 +02:00
void perf_trace_del ( struct perf_event * p_event , int flags )
2010-05-19 14:02:22 +02:00
{
2015-05-05 11:45:27 -04:00
struct trace_event_call * tp_event = p_event - > tp_event ;
2017-10-10 17:15:47 +02:00
/*
* If TRACE_REG_PERF_DEL returns false ; no custom action was performed
* and we need to take the default action of dequeueing our event from
* the right per - cpu hlist .
*/
if ( ! tp_event - > class - > reg ( tp_event , TRACE_REG_PERF_DEL , p_event ) )
hlist_del_rcu ( & p_event - > hlist_entry ) ;
2009-09-18 00:54:43 +02:00
}
2016-04-06 18:43:24 -07:00
void * perf_trace_buf_alloc ( int size , struct pt_regs * * regs , int * rctxp )
2010-01-28 09:32:29 +08:00
{
2010-05-19 14:02:22 +02:00
char * raw_data ;
2016-04-06 18:43:24 -07:00
int rctx ;
2010-01-28 09:32:29 +08:00
2010-03-23 00:08:59 +01:00
BUILD_BUG_ON ( PERF_MAX_TRACE_SIZE % sizeof ( unsigned long ) ) ;
2013-06-17 19:02:11 +02:00
if ( WARN_ONCE ( size > PERF_MAX_TRACE_SIZE ,
2016-04-06 18:43:24 -07:00
" perf buffer not large enough " ) )
2013-06-17 19:02:11 +02:00
return NULL ;
2016-04-06 18:43:24 -07:00
* rctxp = rctx = perf_swevent_get_recursion_context ( ) ;
if ( rctx < 0 )
2010-05-19 14:02:22 +02:00
return NULL ;
2010-01-28 09:32:29 +08:00
2014-12-16 12:47:34 +01:00
if ( regs )
2016-04-06 18:43:24 -07:00
* regs = this_cpu_ptr ( & __perf_regs [ rctx ] ) ;
raw_data = this_cpu_ptr ( perf_trace_buf [ rctx ] ) ;
2010-01-28 09:32:29 +08:00
/* zero the dead bytes from align to not leak stack to user */
2010-03-23 00:08:59 +01:00
memset ( & raw_data [ size - sizeof ( u64 ) ] , 0 , sizeof ( u64 ) ) ;
2016-04-06 18:43:24 -07:00
return raw_data ;
}
EXPORT_SYMBOL_GPL ( perf_trace_buf_alloc ) ;
NOKPROBE_SYMBOL ( perf_trace_buf_alloc ) ;
void perf_trace_buf_update ( void * record , u16 type )
{
struct trace_entry * entry = record ;
int pc = preempt_count ( ) ;
unsigned long flags ;
2010-01-28 09:32:29 +08:00
2010-05-25 11:02:55 +02:00
local_save_flags ( flags ) ;
2019-05-25 09:57:59 -07:00
tracing_generic_entry_update ( entry , type , flags , pc ) ;
2010-01-28 09:32:29 +08:00
}
2016-04-06 18:43:24 -07:00
NOKPROBE_SYMBOL ( perf_trace_buf_update ) ;
2012-02-15 15:51:52 +01:00
# ifdef CONFIG_FUNCTION_TRACER
static void
2011-08-08 16:57:47 -04:00
perf_ftrace_function_call ( unsigned long ip , unsigned long parent_ip ,
2011-08-09 12:50:46 -04:00
struct ftrace_ops * ops , struct pt_regs * pt_regs )
2012-02-15 15:51:52 +01:00
{
struct ftrace_entry * entry ;
2017-10-10 17:15:47 +02:00
struct perf_event * event ;
struct hlist_head head ;
2012-02-15 15:51:52 +01:00
struct pt_regs regs ;
int rctx ;
2017-10-10 17:15:47 +02:00
if ( ( unsigned long ) ops - > private ! = smp_processor_id ( ) )
2013-06-17 19:02:04 +02:00
return ;
2017-10-10 17:15:47 +02:00
event = container_of ( ops , struct perf_event , ftrace_ops ) ;
/*
* @ event - > hlist entry is NULL ( per INIT_HLIST_NODE ) , and all
* the perf code does is hlist_for_each_entry_rcu ( ) , so we can
* get away with simply setting the @ head . first pointer in order
* to create a singular list .
*/
head . first = & event - > hlist_entry ;
2012-02-15 15:51:52 +01:00
# define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
sizeof ( u64 ) ) - sizeof ( u32 ) )
BUILD_BUG_ON ( ENTRY_SIZE > PERF_MAX_TRACE_SIZE ) ;
2016-04-06 18:43:22 -07:00
memset ( & regs , 0 , sizeof ( regs ) ) ;
2012-02-15 15:51:52 +01:00
perf_fetch_caller_regs ( & regs ) ;
2016-04-06 18:43:24 -07:00
entry = perf_trace_buf_alloc ( ENTRY_SIZE , NULL , & rctx ) ;
2012-02-15 15:51:52 +01:00
if ( ! entry )
return ;
entry - > ip = ip ;
entry - > parent_ip = parent_ip ;
2016-04-06 18:43:24 -07:00
perf_trace_buf_submit ( entry , ENTRY_SIZE , rctx , TRACE_FN ,
2017-10-10 17:15:47 +02:00
1 , & regs , & head , NULL ) ;
2012-02-15 15:51:52 +01:00
# undef ENTRY_SIZE
}
static int perf_ftrace_function_register ( struct perf_event * event )
{
struct ftrace_ops * ops = & event - > ftrace_ops ;
2017-10-11 09:45:31 +02:00
ops - > flags = FTRACE_OPS_FL_RCU ;
2017-10-10 17:15:47 +02:00
ops - > func = perf_ftrace_function_call ;
ops - > private = ( void * ) ( unsigned long ) nr_cpu_ids ;
2012-02-15 15:51:52 +01:00
return register_ftrace_function ( ops ) ;
}
static int perf_ftrace_function_unregister ( struct perf_event * event )
{
struct ftrace_ops * ops = & event - > ftrace_ops ;
ftrace, perf: Add filter support for function trace event
Adding support to filter function trace event via perf
interface. It is now possible to use filter interface
in the perf tool like:
perf record -e ftrace:function --filter="(ip == mm_*)" ls
The filter syntax is restricted to the the 'ip' field only,
and following operators are accepted '==' '!=' '||', ending
up with the filter strings like:
ip == f1[, ]f2 ... || ip != f3[, ]f4 ...
with comma ',' or space ' ' as a function separator. If the
space ' ' is used as a separator, the right side of the
assignment needs to be enclosed in double quotes '"', e.g.:
perf record -e ftrace:function --filter '(ip == do_execve,sys_*,ext*)' ls
perf record -e ftrace:function --filter '(ip == "do_execve,sys_*,ext*")' ls
perf record -e ftrace:function --filter '(ip == "do_execve sys_* ext*")' ls
The '==' operator adds trace filter with same effect as would
be added via set_ftrace_filter file.
The '!=' operator adds trace filter with same effect as would
be added via set_ftrace_notrace file.
The right side of the '!=', '==' operators is list of functions
or regexp. to be added to filter separated by space.
The '||' operator is used for connecting multiple filter definitions
together. It is possible to have more than one '==' and '!='
operators within one filter string.
Link: http://lkml.kernel.org/r/1329317514-8131-8-git-send-email-jolsa@redhat.com
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2012-02-15 15:51:54 +01:00
int ret = unregister_ftrace_function ( ops ) ;
ftrace_free_filter ( ops ) ;
return ret ;
2012-02-15 15:51:52 +01:00
}
2015-05-05 11:45:27 -04:00
int perf_ftrace_event_register ( struct trace_event_call * call ,
2012-02-15 15:51:52 +01:00
enum trace_reg type , void * data )
{
2017-10-10 17:15:47 +02:00
struct perf_event * event = data ;
2012-02-15 15:51:52 +01:00
switch ( type ) {
case TRACE_REG_REGISTER :
case TRACE_REG_UNREGISTER :
break ;
case TRACE_REG_PERF_REGISTER :
case TRACE_REG_PERF_UNREGISTER :
return 0 ;
case TRACE_REG_PERF_OPEN :
return perf_ftrace_function_register ( data ) ;
case TRACE_REG_PERF_CLOSE :
return perf_ftrace_function_unregister ( data ) ;
case TRACE_REG_PERF_ADD :
2017-10-10 17:15:47 +02:00
event - > ftrace_ops . private = ( void * ) ( unsigned long ) smp_processor_id ( ) ;
return 1 ;
2012-02-15 15:51:52 +01:00
case TRACE_REG_PERF_DEL :
2017-10-10 17:15:47 +02:00
event - > ftrace_ops . private = ( void * ) ( unsigned long ) nr_cpu_ids ;
return 1 ;
2012-02-15 15:51:52 +01:00
}
return - EINVAL ;
}
# endif /* CONFIG_FUNCTION_TRACER */