2008-05-12 21:20:42 +02:00
/*
* trace context switch
*
* Copyright ( C ) 2007 Steven Rostedt < srostedt @ redhat . com >
*
*/
# include <linux/module.h>
# include <linux/fs.h>
# include <linux/debugfs.h>
# include <linux/kallsyms.h>
# include <linux/uaccess.h>
# include <linux/marker.h>
# include <linux/ftrace.h>
# include "trace.h"
static struct trace_array * ctx_trace ;
static int __read_mostly tracer_enabled ;
2008-05-12 21:21:10 +02:00
static atomic_t sched_ref ;
2008-05-12 21:20:42 +02:00
2008-05-12 21:20:51 +02:00
static void
2008-05-12 21:21:10 +02:00
sched_switch_func ( void * private , void * __rq , struct task_struct * prev ,
struct task_struct * next )
2008-05-12 21:20:42 +02:00
{
2008-05-12 21:21:10 +02:00
struct trace_array * * ptr = private ;
struct trace_array * tr = * ptr ;
2008-05-12 21:20:42 +02:00
struct trace_array_cpu * data ;
unsigned long flags ;
long disabled ;
int cpu ;
2008-05-22 11:49:22 -04:00
tracing_record_cmdline ( prev ) ;
tracing_record_cmdline ( next ) ;
2008-05-12 21:20:42 +02:00
if ( ! tracer_enabled )
return ;
2008-05-12 21:20:44 +02:00
local_irq_save ( flags ) ;
2008-05-12 21:20:42 +02:00
cpu = raw_smp_processor_id ( ) ;
data = tr - > data [ cpu ] ;
disabled = atomic_inc_return ( & data - > disabled ) ;
2008-05-12 21:20:54 +02:00
if ( likely ( disabled = = 1 ) )
2008-05-12 21:20:42 +02:00
tracing_sched_switch_trace ( tr , data , prev , next , flags ) ;
atomic_dec ( & data - > disabled ) ;
2008-05-12 21:20:44 +02:00
local_irq_restore ( flags ) ;
2008-05-12 21:20:42 +02:00
}
2008-05-12 21:21:10 +02:00
static notrace void
sched_switch_callback ( void * probe_data , void * call_data ,
const char * format , va_list * args )
{
struct task_struct * prev ;
struct task_struct * next ;
struct rq * __rq ;
if ( ! atomic_read ( & sched_ref ) )
return ;
/* skip prev_pid %d next_pid %d prev_state %ld */
( void ) va_arg ( * args , int ) ;
( void ) va_arg ( * args , int ) ;
( void ) va_arg ( * args , long ) ;
__rq = va_arg ( * args , typeof ( __rq ) ) ;
prev = va_arg ( * args , typeof ( prev ) ) ;
next = va_arg ( * args , typeof ( next ) ) ;
/*
* If tracer_switch_func only points to the local
* switch func , it still needs the ptr passed to it .
*/
sched_switch_func ( probe_data , __rq , prev , next ) ;
}
2008-05-12 21:20:52 +02:00
static void
2008-05-12 21:21:10 +02:00
wakeup_func ( void * private , void * __rq , struct task_struct * wakee , struct
task_struct * curr )
2008-05-12 21:20:51 +02:00
{
2008-05-12 21:21:10 +02:00
struct trace_array * * ptr = private ;
struct trace_array * tr = * ptr ;
2008-05-12 21:20:51 +02:00
struct trace_array_cpu * data ;
unsigned long flags ;
long disabled ;
int cpu ;
if ( ! tracer_enabled )
return ;
2008-05-12 21:20:53 +02:00
tracing_record_cmdline ( curr ) ;
2008-05-12 21:20:51 +02:00
local_irq_save ( flags ) ;
cpu = raw_smp_processor_id ( ) ;
data = tr - > data [ cpu ] ;
disabled = atomic_inc_return ( & data - > disabled ) ;
2008-05-12 21:20:54 +02:00
if ( likely ( disabled = = 1 ) )
2008-05-12 21:20:51 +02:00
tracing_sched_wakeup_trace ( tr , data , wakee , curr , flags ) ;
atomic_dec ( & data - > disabled ) ;
local_irq_restore ( flags ) ;
}
2008-05-12 21:21:10 +02:00
static notrace void
wake_up_callback ( void * probe_data , void * call_data ,
const char * format , va_list * args )
2008-05-12 21:20:42 +02:00
{
2008-05-12 21:21:10 +02:00
struct task_struct * curr ;
struct task_struct * task ;
struct rq * __rq ;
2008-05-12 21:21:00 +02:00
2008-05-12 21:21:10 +02:00
if ( likely ( ! tracer_enabled ) )
return ;
2008-05-12 21:20:42 +02:00
2008-05-12 21:21:10 +02:00
/* Skip pid %d state %ld */
( void ) va_arg ( * args , int ) ;
( void ) va_arg ( * args , long ) ;
/* now get the meat: "rq %p task %p rq->curr %p" */
__rq = va_arg ( * args , typeof ( __rq ) ) ;
task = va_arg ( * args , typeof ( task ) ) ;
curr = va_arg ( * args , typeof ( curr ) ) ;
2008-05-12 21:20:42 +02:00
2008-05-12 21:21:10 +02:00
tracing_record_cmdline ( task ) ;
tracing_record_cmdline ( curr ) ;
2008-05-12 21:20:51 +02:00
2008-05-12 21:21:10 +02:00
wakeup_func ( probe_data , __rq , task , curr ) ;
2008-05-12 21:20:51 +02:00
}
2008-05-12 21:20:51 +02:00
static void sched_switch_reset ( struct trace_array * tr )
2008-05-12 21:20:42 +02:00
{
int cpu ;
2008-05-12 21:20:46 +02:00
tr - > time_start = ftrace_now ( tr - > cpu ) ;
2008-05-12 21:20:42 +02:00
for_each_online_cpu ( cpu )
tracing_reset ( tr - > data [ cpu ] ) ;
}
2008-05-12 21:21:10 +02:00
static int tracing_sched_register ( void )
{
int ret ;
ret = marker_probe_register ( " kernel_sched_wakeup " ,
" pid %d state %ld ## rq %p task %p rq->curr %p " ,
wake_up_callback ,
& ctx_trace ) ;
if ( ret ) {
pr_info ( " wakeup trace: Couldn't add marker "
" probe to kernel_sched_wakeup \n " ) ;
return ret ;
}
ret = marker_probe_register ( " kernel_sched_wakeup_new " ,
" pid %d state %ld ## rq %p task %p rq->curr %p " ,
wake_up_callback ,
& ctx_trace ) ;
if ( ret ) {
pr_info ( " wakeup trace: Couldn't add marker "
" probe to kernel_sched_wakeup_new \n " ) ;
goto fail_deprobe ;
}
ret = marker_probe_register ( " kernel_sched_schedule " ,
" prev_pid %d next_pid %d prev_state %ld "
" ## rq %p prev %p next %p " ,
sched_switch_callback ,
& ctx_trace ) ;
if ( ret ) {
pr_info ( " sched trace: Couldn't add marker "
" probe to kernel_sched_schedule \n " ) ;
goto fail_deprobe_wake_new ;
}
return ret ;
fail_deprobe_wake_new :
marker_probe_unregister ( " kernel_sched_wakeup_new " ,
wake_up_callback ,
& ctx_trace ) ;
fail_deprobe :
marker_probe_unregister ( " kernel_sched_wakeup " ,
wake_up_callback ,
& ctx_trace ) ;
return ret ;
}
static void tracing_sched_unregister ( void )
{
marker_probe_unregister ( " kernel_sched_schedule " ,
sched_switch_callback ,
& ctx_trace ) ;
marker_probe_unregister ( " kernel_sched_wakeup_new " ,
wake_up_callback ,
& ctx_trace ) ;
marker_probe_unregister ( " kernel_sched_wakeup " ,
wake_up_callback ,
& ctx_trace ) ;
}
2008-05-22 10:37:48 +02:00
static void tracing_start_sched_switch ( void )
2008-05-12 21:21:10 +02:00
{
long ref ;
ref = atomic_inc_return ( & sched_ref ) ;
if ( ref = = 1 )
tracing_sched_register ( ) ;
}
2008-05-22 10:37:48 +02:00
static void tracing_stop_sched_switch ( void )
2008-05-12 21:21:10 +02:00
{
long ref ;
ref = atomic_dec_and_test ( & sched_ref ) ;
if ( ref )
tracing_sched_unregister ( ) ;
}
2008-05-22 11:49:22 -04:00
void tracing_start_cmdline_record ( void )
{
tracing_start_sched_switch ( ) ;
}
void tracing_stop_cmdline_record ( void )
{
tracing_stop_sched_switch ( ) ;
}
2008-05-12 21:20:51 +02:00
static void start_sched_trace ( struct trace_array * tr )
2008-05-12 21:20:42 +02:00
{
sched_switch_reset ( tr ) ;
2008-05-22 11:49:22 -04:00
tracing_start_cmdline_record ( ) ;
2008-07-10 20:58:09 -04:00
tracer_enabled = 1 ;
2008-05-12 21:20:42 +02:00
}
2008-05-12 21:20:51 +02:00
static void stop_sched_trace ( struct trace_array * tr )
2008-05-12 21:20:42 +02:00
{
tracer_enabled = 0 ;
2008-07-10 20:58:09 -04:00
tracing_stop_cmdline_record ( ) ;
2008-05-12 21:20:42 +02:00
}
2008-05-12 21:20:51 +02:00
static void sched_switch_trace_init ( struct trace_array * tr )
2008-05-12 21:20:42 +02:00
{
ctx_trace = tr ;
if ( tr - > ctrl )
start_sched_trace ( tr ) ;
}
2008-05-12 21:20:51 +02:00
static void sched_switch_trace_reset ( struct trace_array * tr )
2008-05-12 21:20:42 +02:00
{
if ( tr - > ctrl )
stop_sched_trace ( tr ) ;
}
static void sched_switch_trace_ctrl_update ( struct trace_array * tr )
{
/* When starting a new trace, reset the buffers */
if ( tr - > ctrl )
start_sched_trace ( tr ) ;
else
stop_sched_trace ( tr ) ;
}
static struct tracer sched_switch_trace __read_mostly =
{
. name = " sched_switch " ,
. init = sched_switch_trace_init ,
. reset = sched_switch_trace_reset ,
. ctrl_update = sched_switch_trace_ctrl_update ,
2008-05-12 21:20:44 +02:00
# ifdef CONFIG_FTRACE_SELFTEST
. selftest = trace_selftest_startup_sched_switch ,
# endif
2008-05-12 21:20:42 +02:00
} ;
__init static int init_sched_switch_trace ( void )
{
2008-05-12 21:21:10 +02:00
int ret = 0 ;
if ( atomic_read ( & sched_ref ) )
ret = tracing_sched_register ( ) ;
if ( ret ) {
pr_info ( " error registering scheduler trace \n " ) ;
return ret ;
}
2008-05-12 21:20:42 +02:00
return register_tracer ( & sched_switch_trace ) ;
}
device_initcall ( init_sched_switch_trace ) ;