2009-03-19 22:26:15 +03:00
/*
* trace event based perf counter profiling
*
* Copyright ( C ) 2009 Red Hat Inc , Peter Zijlstra < pzijlstr @ redhat . com >
*
*/
2009-08-24 08:19:47 +04:00
# include <linux/module.h>
2009-03-19 22:26:15 +03:00
# include "trace.h"
2009-09-18 08:10:28 +04:00
/*
* We can ' t use a size but a type in alloc_percpu ( )
* So let ' s create a dummy type that matches the desired size
*/
typedef struct { char buf [ FTRACE_MAX_PROFILE_SIZE ] ; } profile_buf_t ;
char * trace_profile_buf ;
2009-09-20 14:34:38 +04:00
EXPORT_SYMBOL_GPL ( trace_profile_buf ) ;
char * trace_profile_buf_nmi ;
EXPORT_SYMBOL_GPL ( trace_profile_buf_nmi ) ;
2009-09-18 08:10:28 +04:00
/* Count the events in use (per event id, not per instance) */
static int total_profile_count ;
2009-09-18 02:54:43 +04:00
static int ftrace_profile_enable_event ( struct ftrace_event_call * event )
{
2009-09-18 08:10:28 +04:00
char * buf ;
int ret = - ENOMEM ;
2009-09-18 02:54:43 +04:00
if ( atomic_inc_return ( & event - > profile_count ) )
return 0 ;
2009-10-03 16:55:18 +04:00
if ( ! total_profile_count ) {
2009-09-18 08:10:28 +04:00
buf = ( char * ) alloc_percpu ( profile_buf_t ) ;
if ( ! buf )
goto fail_buf ;
rcu_assign_pointer ( trace_profile_buf , buf ) ;
buf = ( char * ) alloc_percpu ( profile_buf_t ) ;
if ( ! buf )
goto fail_buf_nmi ;
rcu_assign_pointer ( trace_profile_buf_nmi , buf ) ;
}
2009-09-24 01:08:43 +04:00
ret = event - > profile_enable ( event ) ;
2009-10-03 16:55:18 +04:00
if ( ! ret ) {
total_profile_count + + ;
2009-09-18 08:10:28 +04:00
return 0 ;
2009-10-03 16:55:18 +04:00
}
2009-09-18 08:10:28 +04:00
fail_buf_nmi :
2009-10-03 16:55:18 +04:00
if ( ! total_profile_count ) {
2009-10-03 17:08:54 +04:00
free_percpu ( trace_profile_buf_nmi ) ;
free_percpu ( trace_profile_buf ) ;
2009-10-03 16:55:18 +04:00
trace_profile_buf_nmi = NULL ;
trace_profile_buf = NULL ;
}
2009-09-18 08:10:28 +04:00
fail_buf :
atomic_dec ( & event - > profile_count ) ;
return ret ;
2009-09-18 02:54:43 +04:00
}
2009-03-19 22:26:15 +03:00
int ftrace_profile_enable ( int event_id )
{
struct ftrace_event_call * event ;
2009-05-06 06:33:45 +04:00
int ret = - EINVAL ;
2009-03-19 22:26:15 +03:00
2009-05-06 06:33:45 +04:00
mutex_lock ( & event_mutex ) ;
2009-04-10 21:52:20 +04:00
list_for_each_entry ( event , & ftrace_events , list ) {
2009-08-24 08:19:47 +04:00
if ( event - > id = = event_id & & event - > profile_enable & &
try_module_get ( event - > mod ) ) {
2009-09-18 02:54:43 +04:00
ret = ftrace_profile_enable_event ( event ) ;
2009-05-06 06:33:45 +04:00
break ;
}
2009-03-19 22:26:15 +03:00
}
2009-05-06 06:33:45 +04:00
mutex_unlock ( & event_mutex ) ;
2009-03-19 22:26:15 +03:00
2009-05-06 06:33:45 +04:00
return ret ;
2009-03-19 22:26:15 +03:00
}
2009-09-18 02:54:43 +04:00
static void ftrace_profile_disable_event ( struct ftrace_event_call * event )
{
2009-09-18 08:10:28 +04:00
char * buf , * nmi_buf ;
2009-09-18 02:54:43 +04:00
if ( ! atomic_add_negative ( - 1 , & event - > profile_count ) )
return ;
2009-09-24 01:08:43 +04:00
event - > profile_disable ( event ) ;
2009-09-18 08:10:28 +04:00
if ( ! - - total_profile_count ) {
buf = trace_profile_buf ;
rcu_assign_pointer ( trace_profile_buf , NULL ) ;
nmi_buf = trace_profile_buf_nmi ;
rcu_assign_pointer ( trace_profile_buf_nmi , NULL ) ;
/*
* Ensure every events in profiling have finished before
* releasing the buffers
*/
synchronize_sched ( ) ;
free_percpu ( buf ) ;
free_percpu ( nmi_buf ) ;
}
2009-09-18 02:54:43 +04:00
}
2009-03-19 22:26:15 +03:00
void ftrace_profile_disable ( int event_id )
{
struct ftrace_event_call * event ;
2009-05-06 06:33:45 +04:00
mutex_lock ( & event_mutex ) ;
2009-04-10 21:52:20 +04:00
list_for_each_entry ( event , & ftrace_events , list ) {
2009-05-06 06:33:45 +04:00
if ( event - > id = = event_id ) {
2009-09-18 02:54:43 +04:00
ftrace_profile_disable_event ( event ) ;
2009-08-24 08:19:47 +04:00
module_put ( event - > mod ) ;
2009-05-06 06:33:45 +04:00
break ;
}
2009-03-19 22:26:15 +03:00
}
2009-05-06 06:33:45 +04:00
mutex_unlock ( & event_mutex ) ;
2009-03-19 22:26:15 +03:00
}