2009-03-19 22:26:15 +03:00
/*
2010-03-05 07:35:37 +03:00
* trace event based perf event profiling / tracing
2009-03-19 22:26:15 +03:00
*
* Copyright ( C ) 2009 Red Hat Inc , Peter Zijlstra < pzijlstr @ redhat . com >
2010-03-03 09:16:16 +03:00
* Copyright ( C ) 2009 - 2010 Frederic Weisbecker < fweisbec @ gmail . com >
2009-03-19 22:26:15 +03:00
*/
2009-08-24 08:19:47 +04:00
# include <linux/module.h>
2010-01-28 04:32:29 +03:00
# include <linux/kprobes.h>
2009-03-19 22:26:15 +03:00
# include "trace.h"
2010-03-16 03:05:02 +03:00
EXPORT_SYMBOL_GPL ( perf_arch_fetch_caller_regs ) ;
2009-09-18 08:10:28 +04:00
2010-05-19 12:52:27 +04:00
static char * perf_trace_buf [ 4 ] ;
2009-09-18 08:10:28 +04:00
2010-03-23 02:08:59 +03:00
/*
* Force it to be aligned to unsigned long to avoid misaligned accesses
* suprises
*/
typedef typeof ( unsigned long [ PERF_MAX_TRACE_SIZE / sizeof ( unsigned long ) ] )
perf_trace_t ;
2009-11-22 07:26:55 +03:00
2009-09-18 08:10:28 +04:00
/* Count the events in use (per event id, not per instance) */
2010-03-05 07:35:37 +03:00
static int total_ref_count ;
2009-09-18 08:10:28 +04:00
2010-05-19 16:02:22 +04:00
static int perf_trace_event_init ( struct ftrace_event_call * tp_event ,
struct perf_event * p_event )
2009-09-18 02:54:43 +04:00
{
2010-05-19 16:02:22 +04:00
struct hlist_head * list ;
2009-09-18 08:10:28 +04:00
int ret = - ENOMEM ;
2010-05-19 16:02:22 +04:00
int cpu ;
2009-09-18 08:10:28 +04:00
2010-05-19 16:02:22 +04:00
p_event - > tp_event = tp_event ;
if ( tp_event - > perf_refcount + + > 0 )
2009-09-18 02:54:43 +04:00
return 0 ;
2010-05-19 16:02:22 +04:00
list = alloc_percpu ( struct hlist_head ) ;
if ( ! list )
goto fail ;
for_each_possible_cpu ( cpu )
INIT_HLIST_HEAD ( per_cpu_ptr ( list , cpu ) ) ;
2009-09-18 08:10:28 +04:00
2010-05-19 16:02:22 +04:00
tp_event - > perf_events = list ;
2009-09-18 02:54:43 +04:00
2010-03-05 07:35:37 +03:00
if ( ! total_ref_count ) {
2010-05-19 12:52:27 +04:00
char * buf ;
int i ;
2009-09-18 08:10:28 +04:00
2010-05-19 12:52:27 +04:00
for ( i = 0 ; i < 4 ; i + + ) {
buf = ( char * ) alloc_percpu ( perf_trace_t ) ;
if ( ! buf )
2010-05-19 16:02:22 +04:00
goto fail ;
2009-09-18 08:10:28 +04:00
2010-05-19 16:02:22 +04:00
perf_trace_buf [ i ] = buf ;
2010-05-19 12:52:27 +04:00
}
2009-09-18 08:10:28 +04:00
}
2010-05-21 19:49:57 +04:00
if ( tp_event - > class - > reg )
ret = tp_event - > class - > reg ( tp_event , TRACE_REG_PERF_REGISTER ) ;
2010-04-21 20:27:06 +04:00
else
2010-05-21 19:49:57 +04:00
ret = tracepoint_probe_register ( tp_event - > name ,
tp_event - > class - > perf_probe ,
tp_event ) ;
2010-05-19 16:02:22 +04:00
if ( ret )
goto fail ;
2009-09-18 08:10:28 +04:00
2010-05-19 16:02:22 +04:00
total_ref_count + + ;
return 0 ;
fail :
2010-03-05 07:35:37 +03:00
if ( ! total_ref_count ) {
2010-05-19 12:52:27 +04:00
int i ;
for ( i = 0 ; i < 4 ; i + + ) {
free_percpu ( perf_trace_buf [ i ] ) ;
perf_trace_buf [ i ] = NULL ;
}
2009-10-03 16:55:18 +04:00
}
2010-05-19 16:02:22 +04:00
if ( ! - - tp_event - > perf_refcount ) {
free_percpu ( tp_event - > perf_events ) ;
tp_event - > perf_events = NULL ;
2009-10-03 16:55:18 +04:00
}
2009-09-18 08:10:28 +04:00
return ret ;
2009-09-18 02:54:43 +04:00
}
2010-05-19 16:02:22 +04:00
int perf_trace_init ( struct perf_event * p_event )
2009-03-19 22:26:15 +03:00
{
2010-05-19 16:02:22 +04:00
struct ftrace_event_call * tp_event ;
int event_id = p_event - > attr . config ;
2009-05-06 06:33:45 +04:00
int ret = - EINVAL ;
2009-03-19 22:26:15 +03:00
2009-05-06 06:33:45 +04:00
mutex_lock ( & event_mutex ) ;
2010-05-19 16:02:22 +04:00
list_for_each_entry ( tp_event , & ftrace_events , list ) {
2010-05-21 19:49:57 +04:00
if ( tp_event - > event . type = = event_id & &
tp_event - > class & & tp_event - > class - > perf_probe & &
2010-05-19 16:02:22 +04:00
try_module_get ( tp_event - > mod ) ) {
ret = perf_trace_event_init ( tp_event , p_event ) ;
2009-05-06 06:33:45 +04:00
break ;
}
2009-03-19 22:26:15 +03:00
}
2009-05-06 06:33:45 +04:00
mutex_unlock ( & event_mutex ) ;
2009-03-19 22:26:15 +03:00
2009-05-06 06:33:45 +04:00
return ret ;
2009-03-19 22:26:15 +03:00
}
2010-05-19 16:02:22 +04:00
int perf_trace_enable ( struct perf_event * p_event )
2009-09-18 02:54:43 +04:00
{
2010-05-19 16:02:22 +04:00
struct ftrace_event_call * tp_event = p_event - > tp_event ;
struct hlist_head * list ;
2009-09-18 08:10:28 +04:00
2010-05-19 16:02:22 +04:00
list = tp_event - > perf_events ;
if ( WARN_ON_ONCE ( ! list ) )
return - EINVAL ;
2009-09-18 08:10:28 +04:00
2010-05-19 16:02:22 +04:00
list = per_cpu_ptr ( list , smp_processor_id ( ) ) ;
hlist_add_head_rcu ( & p_event - > hlist_entry , list ) ;
2009-09-18 08:10:28 +04:00
2010-05-19 16:02:22 +04:00
return 0 ;
}
2009-09-18 08:10:28 +04:00
2010-05-19 16:02:22 +04:00
void perf_trace_disable ( struct perf_event * p_event )
{
hlist_del_rcu ( & p_event - > hlist_entry ) ;
2009-09-18 02:54:43 +04:00
}
2010-05-19 16:02:22 +04:00
void perf_trace_destroy ( struct perf_event * p_event )
2009-03-19 22:26:15 +03:00
{
2010-05-19 16:02:22 +04:00
struct ftrace_event_call * tp_event = p_event - > tp_event ;
int i ;
2009-03-19 22:26:15 +03:00
2010-05-19 16:02:22 +04:00
if ( - - tp_event - > perf_refcount > 0 )
return ;
2010-05-21 19:49:57 +04:00
if ( tp_event - > class - > reg )
tp_event - > class - > reg ( tp_event , TRACE_REG_PERF_UNREGISTER ) ;
else
tracepoint_probe_unregister ( tp_event - > name ,
tp_event - > class - > perf_probe ,
tp_event ) ;
2010-05-19 16:02:22 +04:00
free_percpu ( tp_event - > perf_events ) ;
tp_event - > perf_events = NULL ;
if ( ! - - total_ref_count ) {
for ( i = 0 ; i < 4 ; i + + ) {
free_percpu ( perf_trace_buf [ i ] ) ;
perf_trace_buf [ i ] = NULL ;
2009-05-06 06:33:45 +04:00
}
2009-03-19 22:26:15 +03:00
}
}
2010-01-28 04:32:29 +03:00
2010-03-05 07:35:37 +03:00
__kprobes void * perf_trace_buf_prepare ( int size , unsigned short type ,
2010-05-19 12:52:27 +04:00
struct pt_regs * regs , int * rctxp )
2010-01-28 04:32:29 +03:00
{
struct trace_entry * entry ;
2010-05-25 13:02:55 +04:00
unsigned long flags ;
2010-05-19 16:02:22 +04:00
char * raw_data ;
2010-05-19 12:52:27 +04:00
int pc ;
2010-01-28 04:32:29 +03:00
2010-03-23 02:08:59 +03:00
BUILD_BUG_ON ( PERF_MAX_TRACE_SIZE % sizeof ( unsigned long ) ) ;
2010-01-28 04:32:29 +03:00
pc = preempt_count ( ) ;
* rctxp = perf_swevent_get_recursion_context ( ) ;
if ( * rctxp < 0 )
2010-05-19 16:02:22 +04:00
return NULL ;
2010-01-28 04:32:29 +03:00
2010-05-19 16:02:22 +04:00
raw_data = per_cpu_ptr ( perf_trace_buf [ * rctxp ] , smp_processor_id ( ) ) ;
2010-01-28 04:32:29 +03:00
/* zero the dead bytes from align to not leak stack to user */
2010-03-23 02:08:59 +03:00
memset ( & raw_data [ size - sizeof ( u64 ) ] , 0 , sizeof ( u64 ) ) ;
2010-01-28 04:32:29 +03:00
entry = ( struct trace_entry * ) raw_data ;
2010-05-25 13:02:55 +04:00
local_save_flags ( flags ) ;
tracing_generic_entry_update ( entry , flags , pc ) ;
2010-01-28 04:32:29 +03:00
entry - > type = type ;
return raw_data ;
}
2010-03-05 07:35:37 +03:00
EXPORT_SYMBOL_GPL ( perf_trace_buf_prepare ) ;