2008-05-12 23:20:42 +04:00
/*
* ring buffer based function tracer
*
* Copyright ( C ) 2007 - 2008 Steven Rostedt < srostedt @ redhat . com >
* Copyright ( C ) 2008 Ingo Molnar < mingo @ redhat . com >
*
* Originally taken from the RT patch by :
* Arnaldo Carvalho de Melo < acme @ redhat . com >
*
* Based on code from the latency_tracer , that is :
* Copyright ( C ) 2004 - 2006 Ingo Molnar
* Copyright ( C ) 2004 William Lee Irwin III
*/
# include <linux/utsrelease.h>
# include <linux/kallsyms.h>
# include <linux/seq_file.h>
# include <linux/debugfs.h>
2008-05-12 23:20:43 +04:00
# include <linux/pagemap.h>
2008-05-12 23:20:42 +04:00
# include <linux/hardirq.h>
# include <linux/linkage.h>
# include <linux/uaccess.h>
# include <linux/ftrace.h>
# include <linux/module.h>
# include <linux/percpu.h>
# include <linux/ctype.h>
# include <linux/init.h>
# include <linux/gfp.h>
# include <linux/fs.h>
# include "trace.h"
unsigned long __read_mostly tracing_max_latency = ( cycle_t ) ULONG_MAX ;
unsigned long __read_mostly tracing_thresh ;
2008-05-12 23:20:44 +04:00
static int tracing_disabled = 1 ;
2008-05-12 23:20:42 +04:00
static long notrace
ns2usecs ( cycle_t nsec )
{
nsec + = 500 ;
do_div ( nsec , 1000 ) ;
return nsec ;
}
2008-05-12 23:20:46 +04:00
static const int time_sync_freq_max = 128 ;
static const cycle_t time_sync_thresh = 100000 ;
static DEFINE_PER_CPU ( cycle_t , time_offset ) ;
static DEFINE_PER_CPU ( cycle_t , prev_cpu_time ) ;
static DEFINE_PER_CPU ( int , time_sync_count ) ;
static DEFINE_PER_CPU ( int , time_sync_freq ) ;
/*
* Global lock which we take every now and then to synchronize
* the CPUs time . This method is not warp - safe , but it ' s good
* enough to synchronize slowly diverging time sources and thus
* it ' s good enough for tracing :
*/
static DEFINE_SPINLOCK ( time_sync_lock ) ;
static cycle_t prev_global_time ;
static notrace cycle_t __ftrace_now_sync ( cycles_t time , int cpu )
{
unsigned long flags ;
spin_lock_irqsave ( & time_sync_lock , flags ) ;
/*
* Update the synchronization frequency :
*/
if ( per_cpu ( time_sync_freq , cpu ) < time_sync_freq_max )
per_cpu ( time_sync_freq , cpu ) * = 2 ;
per_cpu ( time_sync_count , cpu ) = per_cpu ( time_sync_freq , cpu ) ;
if ( time < prev_global_time ) {
per_cpu ( time_offset , cpu ) + = prev_global_time - time ;
time = prev_global_time ;
} else {
prev_global_time = time ;
}
spin_unlock_irqrestore ( & time_sync_lock , flags ) ;
return time ;
}
2008-05-12 23:20:46 +04:00
notrace cycle_t ftrace_now ( int cpu )
{
2008-05-12 23:20:46 +04:00
cycle_t prev_cpu_time , time , delta_time ;
prev_cpu_time = per_cpu ( prev_cpu_time , cpu ) ;
time = sched_clock ( ) + per_cpu ( time_offset , cpu ) ;
delta_time = time - prev_cpu_time ;
if ( unlikely ( delta_time > time_sync_thresh | |
- - per_cpu ( time_sync_count , cpu ) < = 0 ) )
time = __ftrace_now_sync ( time , cpu ) ;
return time ;
2008-05-12 23:20:46 +04:00
}
2008-05-12 23:20:42 +04:00
static struct trace_array global_trace ;
static DEFINE_PER_CPU ( struct trace_array_cpu , global_trace_cpu ) ;
static struct trace_array max_tr ;
static DEFINE_PER_CPU ( struct trace_array_cpu , max_data ) ;
static int tracer_enabled ;
2008-05-12 23:20:43 +04:00
static unsigned long trace_nr_entries = 16384UL ;
2008-05-12 23:20:42 +04:00
static struct tracer * trace_types __read_mostly ;
static struct tracer * current_trace __read_mostly ;
static int max_tracer_type_len ;
static DEFINE_MUTEX ( trace_types_lock ) ;
2008-05-12 23:20:43 +04:00
# define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
2008-05-12 23:20:42 +04:00
static int __init set_nr_entries ( char * str )
{
if ( ! str )
return 0 ;
trace_nr_entries = simple_strtoul ( str , & str , 0 ) ;
return 1 ;
}
__setup ( " trace_entries= " , set_nr_entries ) ;
2008-05-12 23:20:44 +04:00
unsigned long nsecs_to_usecs ( unsigned long nsecs )
{
return nsecs / 1000 ;
}
2008-05-12 23:20:42 +04:00
enum trace_type {
__TRACE_FIRST_TYPE = 0 ,
TRACE_FN ,
TRACE_CTX ,
2008-05-12 23:20:47 +04:00
TRACE_SPECIAL ,
2008-05-12 23:20:42 +04:00
__TRACE_LAST_TYPE
} ;
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01 ,
TRACE_FLAG_NEED_RESCHED = 0x02 ,
TRACE_FLAG_HARDIRQ = 0x04 ,
TRACE_FLAG_SOFTIRQ = 0x08 ,
} ;
enum trace_iterator_flags {
TRACE_ITER_PRINT_PARENT = 0x01 ,
TRACE_ITER_SYM_OFFSET = 0x02 ,
TRACE_ITER_SYM_ADDR = 0x04 ,
TRACE_ITER_VERBOSE = 0x08 ,
2008-05-12 23:20:47 +04:00
TRACE_ITER_RAW = 0x10 ,
2008-05-12 23:20:47 +04:00
TRACE_ITER_BIN = 0x20 ,
2008-05-12 23:20:42 +04:00
} ;
# define TRACE_ITER_SYM_MASK \
( TRACE_ITER_PRINT_PARENT | TRACE_ITER_SYM_OFFSET | TRACE_ITER_SYM_ADDR )
/* These must match the bit postions above */
static const char * trace_options [ ] = {
" print-parent " ,
" sym-offset " ,
" sym-addr " ,
" verbose " ,
2008-05-12 23:20:47 +04:00
" raw " ,
2008-05-12 23:20:47 +04:00
" bin " ,
2008-05-12 23:20:42 +04:00
NULL
} ;
static unsigned trace_flags ;
2008-05-12 23:20:43 +04:00
static DEFINE_SPINLOCK ( ftrace_max_lock ) ;
2008-05-12 23:20:42 +04:00
/*
* Copy the new maximum trace into the separate maximum - trace
* structure . ( this way the maximum trace is permanently saved ,
* for later retrieval via / debugfs / tracing / latency_trace )
*/
2008-05-12 23:20:45 +04:00
static notrace void
2008-05-12 23:20:42 +04:00
__update_max_tr ( struct trace_array * tr , struct task_struct * tsk , int cpu )
{
struct trace_array_cpu * data = tr - > data [ cpu ] ;
max_tr . cpu = cpu ;
max_tr . time_start = data - > preempt_timestamp ;
data = max_tr . data [ cpu ] ;
data - > saved_latency = tracing_max_latency ;
memcpy ( data - > comm , tsk - > comm , TASK_COMM_LEN ) ;
data - > pid = tsk - > pid ;
data - > uid = tsk - > uid ;
data - > nice = tsk - > static_prio - 20 - MAX_RT_PRIO ;
data - > policy = tsk - > policy ;
data - > rt_priority = tsk - > rt_priority ;
/* record this tasks comm */
tracing_record_cmdline ( current ) ;
}
2008-05-12 23:20:45 +04:00
void check_pages ( struct trace_array_cpu * data )
{
struct page * page , * tmp ;
BUG_ON ( data - > trace_pages . next - > prev ! = & data - > trace_pages ) ;
BUG_ON ( data - > trace_pages . prev - > next ! = & data - > trace_pages ) ;
list_for_each_entry_safe ( page , tmp , & data - > trace_pages , lru ) {
BUG_ON ( page - > lru . next - > prev ! = & page - > lru ) ;
BUG_ON ( page - > lru . prev - > next ! = & page - > lru ) ;
}
}
void * head_page ( struct trace_array_cpu * data )
{
struct page * page ;
check_pages ( data ) ;
if ( list_empty ( & data - > trace_pages ) )
return NULL ;
page = list_entry ( data - > trace_pages . next , struct page , lru ) ;
BUG_ON ( & page - > lru = = & data - > trace_pages ) ;
return page_address ( page ) ;
}
2008-05-12 23:20:46 +04:00
static notrace int
trace_seq_printf ( struct trace_seq * s , const char * fmt , . . . )
{
int len = ( PAGE_SIZE - 1 ) - s - > len ;
va_list ap ;
2008-05-12 23:20:46 +04:00
int ret ;
2008-05-12 23:20:46 +04:00
if ( ! len )
return 0 ;
va_start ( ap , fmt ) ;
2008-05-12 23:20:46 +04:00
ret = vsnprintf ( s - > buffer + s - > len , len , fmt , ap ) ;
2008-05-12 23:20:46 +04:00
va_end ( ap ) ;
2008-05-12 23:20:46 +04:00
/* If we can't write it all, don't bother writing anything */
if ( ret > len )
return 0 ;
s - > len + = ret ;
2008-05-12 23:20:46 +04:00
return len ;
}
static notrace int
trace_seq_puts ( struct trace_seq * s , const char * str )
{
int len = strlen ( str ) ;
if ( len > ( ( PAGE_SIZE - 1 ) - s - > len ) )
2008-05-12 23:20:46 +04:00
return 0 ;
2008-05-12 23:20:46 +04:00
memcpy ( s - > buffer + s - > len , str , len ) ;
s - > len + = len ;
return len ;
}
static notrace int
trace_seq_putc ( struct trace_seq * s , unsigned char c )
{
if ( s - > len > = ( PAGE_SIZE - 1 ) )
return 0 ;
s - > buffer [ s - > len + + ] = c ;
return 1 ;
}
2008-05-12 23:20:47 +04:00
static notrace int
trace_seq_putmem ( struct trace_seq * s , void * mem , size_t len )
{
if ( len > ( ( PAGE_SIZE - 1 ) - s - > len ) )
return 0 ;
memcpy ( s - > buffer + s - > len , mem , len ) ;
s - > len + = len ;
return len ;
}
2008-05-12 23:20:46 +04:00
static notrace void
trace_seq_reset ( struct trace_seq * s )
{
s - > len = 0 ;
}
static notrace void
trace_print_seq ( struct seq_file * m , struct trace_seq * s )
{
int len = s - > len > = PAGE_SIZE ? PAGE_SIZE - 1 : s - > len ;
s - > buffer [ len ] = 0 ;
seq_puts ( m , s - > buffer ) ;
trace_seq_reset ( s ) ;
}
2008-05-12 23:20:45 +04:00
notrace static void
flip_trace ( struct trace_array_cpu * tr1 , struct trace_array_cpu * tr2 )
{
struct list_head flip_pages ;
INIT_LIST_HEAD ( & flip_pages ) ;
2008-05-12 23:20:45 +04:00
memcpy ( & tr1 - > trace_head_idx , & tr2 - > trace_head_idx ,
2008-05-12 23:20:45 +04:00
sizeof ( struct trace_array_cpu ) -
2008-05-12 23:20:45 +04:00
offsetof ( struct trace_array_cpu , trace_head_idx ) ) ;
2008-05-12 23:20:45 +04:00
check_pages ( tr1 ) ;
check_pages ( tr2 ) ;
list_splice_init ( & tr1 - > trace_pages , & flip_pages ) ;
list_splice_init ( & tr2 - > trace_pages , & tr1 - > trace_pages ) ;
list_splice_init ( & flip_pages , & tr2 - > trace_pages ) ;
BUG_ON ( ! list_empty ( & flip_pages ) ) ;
check_pages ( tr1 ) ;
check_pages ( tr2 ) ;
}
2008-05-12 23:20:42 +04:00
notrace void
update_max_tr ( struct trace_array * tr , struct task_struct * tsk , int cpu )
{
struct trace_array_cpu * data ;
int i ;
2008-05-12 23:20:43 +04:00
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
spin_lock ( & ftrace_max_lock ) ;
2008-05-12 23:20:42 +04:00
/* clear out all the previous traces */
for_each_possible_cpu ( i ) {
data = tr - > data [ i ] ;
2008-05-12 23:20:45 +04:00
flip_trace ( max_tr . data [ i ] , data ) ;
2008-05-12 23:20:44 +04:00
tracing_reset ( data ) ;
2008-05-12 23:20:42 +04:00
}
__update_max_tr ( tr , tsk , cpu ) ;
2008-05-12 23:20:43 +04:00
spin_unlock ( & ftrace_max_lock ) ;
2008-05-12 23:20:42 +04:00
}
/**
* update_max_tr_single - only copy one trace over , and reset the rest
* @ tr - tracer
* @ tsk - task with the latency
* @ cpu - the cpu of the buffer to copy .
*/
notrace void
update_max_tr_single ( struct trace_array * tr , struct task_struct * tsk , int cpu )
{
struct trace_array_cpu * data = tr - > data [ cpu ] ;
int i ;
2008-05-12 23:20:43 +04:00
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
spin_lock ( & ftrace_max_lock ) ;
2008-05-12 23:20:42 +04:00
for_each_possible_cpu ( i )
tracing_reset ( max_tr . data [ i ] ) ;
2008-05-12 23:20:45 +04:00
flip_trace ( max_tr . data [ cpu ] , data ) ;
2008-05-12 23:20:44 +04:00
tracing_reset ( data ) ;
2008-05-12 23:20:42 +04:00
__update_max_tr ( tr , tsk , cpu ) ;
2008-05-12 23:20:43 +04:00
spin_unlock ( & ftrace_max_lock ) ;
2008-05-12 23:20:42 +04:00
}
int register_tracer ( struct tracer * type )
{
struct tracer * t ;
int len ;
int ret = 0 ;
if ( ! type - > name ) {
pr_info ( " Tracer must have a name \n " ) ;
return - 1 ;
}
mutex_lock ( & trace_types_lock ) ;
for ( t = trace_types ; t ; t = t - > next ) {
if ( strcmp ( type - > name , t - > name ) = = 0 ) {
/* already found */
pr_info ( " Trace %s already registered \n " ,
type - > name ) ;
ret = - 1 ;
goto out ;
}
}
2008-05-12 23:20:44 +04:00
# ifdef CONFIG_FTRACE_STARTUP_TEST
if ( type - > selftest ) {
struct tracer * saved_tracer = current_trace ;
struct trace_array_cpu * data ;
struct trace_array * tr = & global_trace ;
int saved_ctrl = tr - > ctrl ;
int i ;
/*
* Run a selftest on this tracer .
* Here we reset the trace buffer , and set the current
* tracer to be this tracer . The tracer can then run some
* internal tracing to verify that everything is in order .
* If we fail , we do not register this tracer .
*/
for_each_possible_cpu ( i ) {
data = tr - > data [ i ] ;
2008-05-12 23:20:45 +04:00
if ( ! head_page ( data ) )
continue ;
2008-05-12 23:20:44 +04:00
tracing_reset ( data ) ;
}
current_trace = type ;
tr - > ctrl = 0 ;
/* the test is responsible for initializing and enabling */
pr_info ( " Testing tracer %s: " , type - > name ) ;
ret = type - > selftest ( type , tr ) ;
/* the test is responsible for resetting too */
current_trace = saved_tracer ;
tr - > ctrl = saved_ctrl ;
if ( ret ) {
printk ( KERN_CONT " FAILED! \n " ) ;
goto out ;
}
2008-05-12 23:20:45 +04:00
/* Only reset on passing, to avoid touching corrupted buffers */
for_each_possible_cpu ( i ) {
data = tr - > data [ i ] ;
if ( ! head_page ( data ) )
continue ;
tracing_reset ( data ) ;
}
2008-05-12 23:20:44 +04:00
printk ( KERN_CONT " PASSED \n " ) ;
}
# endif
2008-05-12 23:20:42 +04:00
type - > next = trace_types ;
trace_types = type ;
len = strlen ( type - > name ) ;
if ( len > max_tracer_type_len )
max_tracer_type_len = len ;
2008-05-12 23:20:44 +04:00
2008-05-12 23:20:42 +04:00
out :
mutex_unlock ( & trace_types_lock ) ;
return ret ;
}
void unregister_tracer ( struct tracer * type )
{
struct tracer * * t ;
int len ;
mutex_lock ( & trace_types_lock ) ;
for ( t = & trace_types ; * t ; t = & ( * t ) - > next ) {
if ( * t = = type )
goto found ;
}
pr_info ( " Trace %s not registered \n " , type - > name ) ;
goto out ;
found :
* t = ( * t ) - > next ;
if ( strlen ( type - > name ) ! = max_tracer_type_len )
goto out ;
max_tracer_type_len = 0 ;
for ( t = & trace_types ; * t ; t = & ( * t ) - > next ) {
len = strlen ( ( * t ) - > name ) ;
if ( len > max_tracer_type_len )
max_tracer_type_len = len ;
}
out :
mutex_unlock ( & trace_types_lock ) ;
}
2008-05-12 23:20:45 +04:00
notrace void tracing_reset ( struct trace_array_cpu * data )
2008-05-12 23:20:42 +04:00
{
data - > trace_idx = 0 ;
2008-05-12 23:20:45 +04:00
data - > trace_head = data - > trace_tail = head_page ( data ) ;
data - > trace_head_idx = 0 ;
data - > trace_tail_idx = 0 ;
2008-05-12 23:20:42 +04:00
}
# ifdef CONFIG_FTRACE
2008-05-12 23:20:45 +04:00
static notrace void
2008-05-12 23:20:42 +04:00
function_trace_call ( unsigned long ip , unsigned long parent_ip )
{
struct trace_array * tr = & global_trace ;
struct trace_array_cpu * data ;
unsigned long flags ;
long disabled ;
int cpu ;
if ( unlikely ( ! tracer_enabled ) )
return ;
2008-05-12 23:20:44 +04:00
local_irq_save ( flags ) ;
2008-05-12 23:20:42 +04:00
cpu = raw_smp_processor_id ( ) ;
data = tr - > data [ cpu ] ;
disabled = atomic_inc_return ( & data - > disabled ) ;
if ( likely ( disabled = = 1 ) )
ftrace ( tr , data , ip , parent_ip , flags ) ;
atomic_dec ( & data - > disabled ) ;
2008-05-12 23:20:44 +04:00
local_irq_restore ( flags ) ;
2008-05-12 23:20:42 +04:00
}
static struct ftrace_ops trace_ops __read_mostly =
{
. func = function_trace_call ,
} ;
# endif
notrace void tracing_start_function_trace ( void )
{
register_ftrace_function ( & trace_ops ) ;
}
notrace void tracing_stop_function_trace ( void )
{
unregister_ftrace_function ( & trace_ops ) ;
}
# define SAVED_CMDLINES 128
static unsigned map_pid_to_cmdline [ PID_MAX_DEFAULT + 1 ] ;
static unsigned map_cmdline_to_pid [ SAVED_CMDLINES ] ;
static char saved_cmdlines [ SAVED_CMDLINES ] [ TASK_COMM_LEN ] ;
static int cmdline_idx ;
static DEFINE_SPINLOCK ( trace_cmdline_lock ) ;
atomic_t trace_record_cmdline_disabled ;
static void trace_init_cmdlines ( void )
{
memset ( & map_pid_to_cmdline , - 1 , sizeof ( map_pid_to_cmdline ) ) ;
memset ( & map_cmdline_to_pid , - 1 , sizeof ( map_cmdline_to_pid ) ) ;
cmdline_idx = 0 ;
}
notrace void trace_stop_cmdline_recording ( void ) ;
2008-05-12 23:20:45 +04:00
static notrace void trace_save_cmdline ( struct task_struct * tsk )
2008-05-12 23:20:42 +04:00
{
unsigned map ;
unsigned idx ;
if ( ! tsk - > pid | | unlikely ( tsk - > pid > PID_MAX_DEFAULT ) )
return ;
/*
* It ' s not the end of the world if we don ' t get
* the lock , but we also don ' t want to spin
* nor do we want to disable interrupts ,
* so if we miss here , then better luck next time .
*/
if ( ! spin_trylock ( & trace_cmdline_lock ) )
return ;
idx = map_pid_to_cmdline [ tsk - > pid ] ;
if ( idx > = SAVED_CMDLINES ) {
idx = ( cmdline_idx + 1 ) % SAVED_CMDLINES ;
map = map_cmdline_to_pid [ idx ] ;
if ( map < = PID_MAX_DEFAULT )
map_pid_to_cmdline [ map ] = ( unsigned ) - 1 ;
map_pid_to_cmdline [ tsk - > pid ] = idx ;
cmdline_idx = idx ;
}
memcpy ( & saved_cmdlines [ idx ] , tsk - > comm , TASK_COMM_LEN ) ;
spin_unlock ( & trace_cmdline_lock ) ;
}
static notrace char * trace_find_cmdline ( int pid )
{
char * cmdline = " <...> " ;
unsigned map ;
if ( ! pid )
return " <idle> " ;
if ( pid > PID_MAX_DEFAULT )
goto out ;
map = map_pid_to_cmdline [ pid ] ;
if ( map > = SAVED_CMDLINES )
goto out ;
cmdline = saved_cmdlines [ map ] ;
out :
return cmdline ;
}
notrace void tracing_record_cmdline ( struct task_struct * tsk )
{
if ( atomic_read ( & trace_record_cmdline_disabled ) )
return ;
trace_save_cmdline ( tsk ) ;
}
2008-05-12 23:20:45 +04:00
static inline notrace struct list_head *
trace_next_list ( struct trace_array_cpu * data , struct list_head * next )
{
/*
* Roundrobin - but skip the head ( which is not a real page ) :
*/
next = next - > next ;
if ( unlikely ( next = = & data - > trace_pages ) )
next = next - > next ;
BUG_ON ( next = = & data - > trace_pages ) ;
return next ;
}
static inline notrace void *
trace_next_page ( struct trace_array_cpu * data , void * addr )
{
struct list_head * next ;
struct page * page ;
page = virt_to_page ( addr ) ;
next = trace_next_list ( data , & page - > lru ) ;
page = list_entry ( next , struct page , lru ) ;
return page_address ( page ) ;
}
2008-05-12 23:20:42 +04:00
static inline notrace struct trace_entry *
2008-05-12 23:20:45 +04:00
tracing_get_trace_entry ( struct trace_array * tr , struct trace_array_cpu * data )
2008-05-12 23:20:42 +04:00
{
unsigned long idx , idx_next ;
struct trace_entry * entry ;
2008-05-12 23:20:43 +04:00
data - > trace_idx + + ;
2008-05-12 23:20:45 +04:00
idx = data - > trace_head_idx ;
2008-05-12 23:20:42 +04:00
idx_next = idx + 1 ;
2008-05-12 23:20:45 +04:00
BUG_ON ( idx * TRACE_ENTRY_SIZE > = PAGE_SIZE ) ;
2008-05-12 23:20:45 +04:00
entry = data - > trace_head + idx * TRACE_ENTRY_SIZE ;
2008-05-12 23:20:43 +04:00
if ( unlikely ( idx_next > = ENTRIES_PER_PAGE ) ) {
2008-05-12 23:20:45 +04:00
data - > trace_head = trace_next_page ( data , data - > trace_head ) ;
2008-05-12 23:20:42 +04:00
idx_next = 0 ;
}
2008-05-12 23:20:45 +04:00
if ( data - > trace_head = = data - > trace_tail & &
idx_next = = data - > trace_tail_idx ) {
/* overrun */
data - > trace_tail_idx + + ;
if ( data - > trace_tail_idx > = ENTRIES_PER_PAGE ) {
data - > trace_tail =
trace_next_page ( data , data - > trace_tail ) ;
data - > trace_tail_idx = 0 ;
}
}
data - > trace_head_idx = idx_next ;
2008-05-12 23:20:42 +04:00
return entry ;
}
static inline notrace void
2008-05-12 23:20:45 +04:00
tracing_generic_entry_update ( struct trace_entry * entry , unsigned long flags )
2008-05-12 23:20:42 +04:00
{
struct task_struct * tsk = current ;
unsigned long pc ;
pc = preempt_count ( ) ;
2008-05-12 23:20:45 +04:00
entry - > preempt_count = pc & 0xff ;
entry - > pid = tsk - > pid ;
2008-05-12 23:20:46 +04:00
entry - > t = ftrace_now ( raw_smp_processor_id ( ) ) ;
2008-05-12 23:20:42 +04:00
entry - > flags = ( irqs_disabled_flags ( flags ) ? TRACE_FLAG_IRQS_OFF : 0 ) |
( ( pc & HARDIRQ_MASK ) ? TRACE_FLAG_HARDIRQ : 0 ) |
( ( pc & SOFTIRQ_MASK ) ? TRACE_FLAG_SOFTIRQ : 0 ) |
( need_resched ( ) ? TRACE_FLAG_NEED_RESCHED : 0 ) ;
}
notrace void
ftrace ( struct trace_array * tr , struct trace_array_cpu * data ,
2008-05-12 23:20:45 +04:00
unsigned long ip , unsigned long parent_ip , unsigned long flags )
2008-05-12 23:20:42 +04:00
{
struct trace_entry * entry ;
2008-05-12 23:20:48 +04:00
unsigned long irq_flags ;
2008-05-12 23:20:42 +04:00
2008-05-12 23:20:48 +04:00
spin_lock_irqsave ( & data - > lock , irq_flags ) ;
2008-05-12 23:20:45 +04:00
entry = tracing_get_trace_entry ( tr , data ) ;
2008-05-12 23:20:42 +04:00
tracing_generic_entry_update ( entry , flags ) ;
2008-05-12 23:20:45 +04:00
entry - > type = TRACE_FN ;
entry - > fn . ip = ip ;
entry - > fn . parent_ip = parent_ip ;
2008-05-12 23:20:48 +04:00
spin_unlock_irqrestore ( & data - > lock , irq_flags ) ;
2008-05-12 23:20:42 +04:00
}
2008-05-12 23:20:47 +04:00
notrace void
trace_special ( struct trace_array * tr , struct trace_array_cpu * data ,
unsigned long arg1 , unsigned long arg2 , unsigned long arg3 )
{
struct trace_entry * entry ;
2008-05-12 23:20:48 +04:00
unsigned long irq_flags ;
2008-05-12 23:20:47 +04:00
2008-05-12 23:20:48 +04:00
spin_lock_irqsave ( & data - > lock , irq_flags ) ;
2008-05-12 23:20:47 +04:00
entry = tracing_get_trace_entry ( tr , data ) ;
tracing_generic_entry_update ( entry , 0 ) ;
entry - > type = TRACE_SPECIAL ;
entry - > special . arg1 = arg1 ;
entry - > special . arg2 = arg2 ;
entry - > special . arg3 = arg3 ;
2008-05-12 23:20:48 +04:00
spin_unlock_irqrestore ( & data - > lock , irq_flags ) ;
2008-05-12 23:20:47 +04:00
}
2008-05-12 23:20:42 +04:00
notrace void
tracing_sched_switch_trace ( struct trace_array * tr ,
struct trace_array_cpu * data ,
struct task_struct * prev , struct task_struct * next ,
unsigned long flags )
{
struct trace_entry * entry ;
2008-05-12 23:20:48 +04:00
unsigned long irq_flags ;
2008-05-12 23:20:42 +04:00
2008-05-12 23:20:48 +04:00
spin_lock_irqsave ( & data - > lock , irq_flags ) ;
2008-05-12 23:20:45 +04:00
entry = tracing_get_trace_entry ( tr , data ) ;
2008-05-12 23:20:42 +04:00
tracing_generic_entry_update ( entry , flags ) ;
entry - > type = TRACE_CTX ;
entry - > ctx . prev_pid = prev - > pid ;
entry - > ctx . prev_prio = prev - > prio ;
entry - > ctx . prev_state = prev - > state ;
entry - > ctx . next_pid = next - > pid ;
entry - > ctx . next_prio = next - > prio ;
2008-05-12 23:20:48 +04:00
spin_unlock_irqrestore ( & data - > lock , irq_flags ) ;
2008-05-12 23:20:42 +04:00
}
enum trace_file_type {
TRACE_FILE_LAT_FMT = 1 ,
} ;
static struct trace_entry *
2008-05-12 23:20:43 +04:00
trace_entry_idx ( struct trace_array * tr , struct trace_array_cpu * data ,
struct trace_iterator * iter , int cpu )
2008-05-12 23:20:42 +04:00
{
2008-05-12 23:20:43 +04:00
struct page * page ;
struct trace_entry * array ;
2008-05-12 23:20:42 +04:00
2008-05-12 23:20:43 +04:00
if ( iter - > next_idx [ cpu ] > = tr - > entries | |
2008-05-12 23:20:46 +04:00
iter - > next_idx [ cpu ] > = data - > trace_idx | |
( data - > trace_head = = data - > trace_tail & &
data - > trace_head_idx = = data - > trace_tail_idx ) )
2008-05-12 23:20:42 +04:00
return NULL ;
2008-05-12 23:20:43 +04:00
if ( ! iter - > next_page [ cpu ] ) {
2008-05-12 23:20:45 +04:00
/* Initialize the iterator for this cpu trace buffer */
WARN_ON ( ! data - > trace_tail ) ;
page = virt_to_page ( data - > trace_tail ) ;
iter - > next_page [ cpu ] = & page - > lru ;
iter - > next_page_idx [ cpu ] = data - > trace_tail_idx ;
2008-05-12 23:20:43 +04:00
}
2008-05-12 23:20:42 +04:00
2008-05-12 23:20:43 +04:00
page = list_entry ( iter - > next_page [ cpu ] , struct page , lru ) ;
2008-05-12 23:20:45 +04:00
BUG_ON ( & data - > trace_pages = = & page - > lru ) ;
2008-05-12 23:20:43 +04:00
array = page_address ( page ) ;
2008-05-12 23:20:45 +04:00
/* Still possible to catch up to the tail */
if ( iter - > next_idx [ cpu ] & & array = = data - > trace_tail & &
iter - > next_page_idx [ cpu ] = = data - > trace_tail_idx )
return NULL ;
WARN_ON ( iter - > next_page_idx [ cpu ] > = ENTRIES_PER_PAGE ) ;
2008-05-12 23:20:43 +04:00
return & array [ iter - > next_page_idx [ cpu ] ] ;
2008-05-12 23:20:42 +04:00
}
static struct notrace trace_entry *
find_next_entry ( struct trace_iterator * iter , int * ent_cpu )
{
struct trace_array * tr = iter - > tr ;
struct trace_entry * ent , * next = NULL ;
int next_cpu = - 1 ;
int cpu ;
for_each_possible_cpu ( cpu ) {
2008-05-12 23:20:45 +04:00
if ( ! head_page ( tr - > data [ cpu ] ) )
2008-05-12 23:20:42 +04:00
continue ;
2008-05-12 23:20:43 +04:00
ent = trace_entry_idx ( tr , tr - > data [ cpu ] , iter , cpu ) ;
2008-05-12 23:20:46 +04:00
/*
* Pick the entry with the smallest timestamp :
*/
if ( ent & & ( ! next | | ent - > t < next - > t ) ) {
2008-05-12 23:20:42 +04:00
next = ent ;
next_cpu = cpu ;
}
}
if ( ent_cpu )
* ent_cpu = next_cpu ;
return next ;
}
2008-05-12 23:20:46 +04:00
static notrace void trace_iterator_increment ( struct trace_iterator * iter )
2008-05-12 23:20:42 +04:00
{
2008-05-12 23:20:46 +04:00
iter - > idx + + ;
iter - > next_idx [ iter - > cpu ] + + ;
iter - > next_page_idx [ iter - > cpu ] + + ;
2008-05-12 23:20:46 +04:00
2008-05-12 23:20:46 +04:00
if ( iter - > next_page_idx [ iter - > cpu ] > = ENTRIES_PER_PAGE ) {
struct trace_array_cpu * data = iter - > tr - > data [ iter - > cpu ] ;
2008-05-12 23:20:42 +04:00
2008-05-12 23:20:46 +04:00
iter - > next_page_idx [ iter - > cpu ] = 0 ;
iter - > next_page [ iter - > cpu ] =
trace_next_list ( data , iter - > next_page [ iter - > cpu ] ) ;
}
}
2008-05-12 23:20:42 +04:00
2008-05-12 23:20:46 +04:00
static notrace void trace_consume ( struct trace_iterator * iter )
2008-05-12 23:20:46 +04:00
{
struct trace_array_cpu * data = iter - > tr - > data [ iter - > cpu ] ;
data - > trace_tail_idx + + ;
if ( data - > trace_tail_idx > = ENTRIES_PER_PAGE ) {
data - > trace_tail = trace_next_page ( data , data - > trace_tail ) ;
data - > trace_tail_idx = 0 ;
}
2008-05-12 23:20:45 +04:00
2008-05-12 23:20:46 +04:00
/* Check if we empty it, then reset the index */
if ( data - > trace_head = = data - > trace_tail & &
data - > trace_head_idx = = data - > trace_tail_idx )
data - > trace_idx = 0 ;
}
2008-05-12 23:20:46 +04:00
static notrace void * find_next_entry_inc ( struct trace_iterator * iter )
2008-05-12 23:20:46 +04:00
{
struct trace_entry * next ;
int next_cpu = - 1 ;
next = find_next_entry ( iter , & next_cpu ) ;
2008-05-12 23:20:45 +04:00
2008-05-12 23:20:45 +04:00
iter - > prev_ent = iter - > ent ;
iter - > prev_cpu = iter - > cpu ;
2008-05-12 23:20:42 +04:00
iter - > ent = next ;
iter - > cpu = next_cpu ;
2008-05-12 23:20:46 +04:00
if ( next )
trace_iterator_increment ( iter ) ;
2008-05-12 23:20:42 +04:00
return next ? iter : NULL ;
}
2008-05-12 23:20:45 +04:00
static notrace void * s_next ( struct seq_file * m , void * v , loff_t * pos )
2008-05-12 23:20:42 +04:00
{
struct trace_iterator * iter = m - > private ;
void * last_ent = iter - > ent ;
int i = ( int ) * pos ;
2008-05-12 23:20:45 +04:00
void * ent ;
2008-05-12 23:20:42 +04:00
( * pos ) + + ;
/* can't go backwards */
if ( iter - > idx > i )
return NULL ;
if ( iter - > idx < 0 )
ent = find_next_entry_inc ( iter ) ;
else
ent = iter ;
while ( ent & & iter - > idx < i )
ent = find_next_entry_inc ( iter ) ;
iter - > pos = * pos ;
if ( last_ent & & ! ent )
seq_puts ( m , " \n \n vim:ft=help \n " ) ;
return ent ;
}
static void * s_start ( struct seq_file * m , loff_t * pos )
{
struct trace_iterator * iter = m - > private ;
void * p = NULL ;
loff_t l = 0 ;
int i ;
mutex_lock ( & trace_types_lock ) ;
if ( ! current_trace | | current_trace ! = iter - > trace )
return NULL ;
atomic_inc ( & trace_record_cmdline_disabled ) ;
/* let the tracer grab locks here if needed */
if ( current_trace - > start )
current_trace - > start ( iter ) ;
if ( * pos ! = iter - > pos ) {
iter - > ent = NULL ;
iter - > cpu = 0 ;
iter - > idx = - 1 ;
2008-05-12 23:20:45 +04:00
iter - > prev_ent = NULL ;
iter - > prev_cpu = - 1 ;
2008-05-12 23:20:42 +04:00
2008-05-12 23:20:43 +04:00
for_each_possible_cpu ( i ) {
2008-05-12 23:20:42 +04:00
iter - > next_idx [ i ] = 0 ;
2008-05-12 23:20:43 +04:00
iter - > next_page [ i ] = NULL ;
}
2008-05-12 23:20:42 +04:00
for ( p = iter ; p & & l < * pos ; p = s_next ( m , p , & l ) )
;
} else {
2008-05-12 23:20:43 +04:00
l = * pos - 1 ;
2008-05-12 23:20:42 +04:00
p = s_next ( m , p , & l ) ;
}
return p ;
}
static void s_stop ( struct seq_file * m , void * p )
{
struct trace_iterator * iter = m - > private ;
atomic_dec ( & trace_record_cmdline_disabled ) ;
/* let the tracer release locks here if needed */
if ( current_trace & & current_trace = = iter - > trace & & iter - > trace - > stop )
iter - > trace - > stop ( iter ) ;
mutex_unlock ( & trace_types_lock ) ;
}
2008-05-12 23:20:46 +04:00
static int
2008-05-12 23:20:46 +04:00
seq_print_sym_short ( struct trace_seq * s , const char * fmt , unsigned long address )
2008-05-12 23:20:42 +04:00
{
# ifdef CONFIG_KALLSYMS
char str [ KSYM_SYMBOL_LEN ] ;
kallsyms_lookup ( address , NULL , NULL , NULL , str ) ;
2008-05-12 23:20:46 +04:00
return trace_seq_printf ( s , fmt , str ) ;
2008-05-12 23:20:42 +04:00
# endif
2008-05-12 23:20:46 +04:00
return 1 ;
2008-05-12 23:20:42 +04:00
}
2008-05-12 23:20:46 +04:00
static int
2008-05-12 23:20:46 +04:00
seq_print_sym_offset ( struct trace_seq * s , const char * fmt ,
unsigned long address )
2008-05-12 23:20:42 +04:00
{
# ifdef CONFIG_KALLSYMS
char str [ KSYM_SYMBOL_LEN ] ;
sprint_symbol ( str , address ) ;
2008-05-12 23:20:46 +04:00
return trace_seq_printf ( s , fmt , str ) ;
2008-05-12 23:20:42 +04:00
# endif
2008-05-12 23:20:46 +04:00
return 1 ;
2008-05-12 23:20:42 +04:00
}
# ifndef CONFIG_64BIT
# define IP_FMT "%08lx"
# else
# define IP_FMT "%016lx"
# endif
2008-05-12 23:20:46 +04:00
static notrace int
2008-05-12 23:20:46 +04:00
seq_print_ip_sym ( struct trace_seq * s , unsigned long ip , unsigned long sym_flags )
2008-05-12 23:20:42 +04:00
{
2008-05-12 23:20:46 +04:00
int ret ;
if ( ! ip )
return trace_seq_printf ( s , " 0 " ) ;
2008-05-12 23:20:42 +04:00
if ( sym_flags & TRACE_ITER_SYM_OFFSET )
2008-05-12 23:20:46 +04:00
ret = seq_print_sym_offset ( s , " %s " , ip ) ;
2008-05-12 23:20:42 +04:00
else
2008-05-12 23:20:46 +04:00
ret = seq_print_sym_short ( s , " %s " , ip ) ;
if ( ! ret )
return 0 ;
2008-05-12 23:20:42 +04:00
if ( sym_flags & TRACE_ITER_SYM_ADDR )
2008-05-12 23:20:46 +04:00
ret = trace_seq_printf ( s , " < " IP_FMT " > " , ip ) ;
return ret ;
2008-05-12 23:20:42 +04:00
}
2008-05-12 23:20:45 +04:00
static notrace void print_lat_help_header ( struct seq_file * m )
2008-05-12 23:20:42 +04:00
{
seq_puts ( m , " # _------=> CPU# \n " ) ;
seq_puts ( m , " # / _-----=> irqs-off \n " ) ;
seq_puts ( m , " # | / _----=> need-resched \n " ) ;
seq_puts ( m , " # || / _---=> hardirq/softirq \n " ) ;
seq_puts ( m , " # ||| / _--=> preempt-depth \n " ) ;
seq_puts ( m , " # |||| / \n " ) ;
seq_puts ( m , " # ||||| delay \n " ) ;
seq_puts ( m , " # cmd pid ||||| time | caller \n " ) ;
seq_puts ( m , " # \\ / ||||| \\ | / \n " ) ;
}
2008-05-12 23:20:45 +04:00
static notrace void print_func_help_header ( struct seq_file * m )
2008-05-12 23:20:42 +04:00
{
seq_puts ( m , " # TASK-PID CPU# TIMESTAMP FUNCTION \n " ) ;
seq_puts ( m , " # | | | | | \n " ) ;
}
2008-05-12 23:20:45 +04:00
static notrace void
2008-05-12 23:20:42 +04:00
print_trace_header ( struct seq_file * m , struct trace_iterator * iter )
{
unsigned long sym_flags = ( trace_flags & TRACE_ITER_SYM_MASK ) ;
struct trace_array * tr = iter - > tr ;
struct trace_array_cpu * data = tr - > data [ tr - > cpu ] ;
struct tracer * type = current_trace ;
2008-05-12 23:20:43 +04:00
unsigned long total = 0 ;
unsigned long entries = 0 ;
2008-05-12 23:20:42 +04:00
int cpu ;
const char * name = " preemption " ;
if ( type )
name = type - > name ;
for_each_possible_cpu ( cpu ) {
2008-05-12 23:20:45 +04:00
if ( head_page ( tr - > data [ cpu ] ) ) {
2008-05-12 23:20:43 +04:00
total + = tr - > data [ cpu ] - > trace_idx ;
if ( tr - > data [ cpu ] - > trace_idx > tr - > entries )
2008-05-12 23:20:42 +04:00
entries + = tr - > entries ;
2008-05-12 23:20:43 +04:00
else
2008-05-12 23:20:42 +04:00
entries + = tr - > data [ cpu ] - > trace_idx ;
}
}
seq_printf ( m , " %s latency trace v1.1.5 on %s \n " ,
name , UTS_RELEASE ) ;
seq_puts ( m , " ----------------------------------- "
" --------------------------------- \n " ) ;
seq_printf ( m , " latency: %lu us, #%lu/%lu, CPU#%d | "
" (M:%s VP:%d, KP:%d, SP:%d HP:%d " ,
2008-05-12 23:20:44 +04:00
nsecs_to_usecs ( data - > saved_latency ) ,
2008-05-12 23:20:42 +04:00
entries ,
2008-05-12 23:20:43 +04:00
total ,
2008-05-12 23:20:42 +04:00
tr - > cpu ,
# if defined(CONFIG_PREEMPT_NONE)
" server " ,
# elif defined(CONFIG_PREEMPT_VOLUNTARY)
" desktop " ,
# elif defined(CONFIG_PREEMPT_DESKTOP)
" preempt " ,
# else
" unknown " ,
# endif
/* These are reserved for later use */
0 , 0 , 0 , 0 ) ;
# ifdef CONFIG_SMP
seq_printf ( m , " #P:%d) \n " , num_online_cpus ( ) ) ;
# else
seq_puts ( m , " ) \n " ) ;
# endif
seq_puts ( m , " ----------------- \n " ) ;
seq_printf ( m , " | task: %.16s-%d "
" (uid:%d nice:%ld policy:%ld rt_prio:%ld) \n " ,
data - > comm , data - > pid , data - > uid , data - > nice ,
data - > policy , data - > rt_priority ) ;
seq_puts ( m , " ----------------- \n " ) ;
if ( data - > critical_start ) {
seq_puts ( m , " => started at: " ) ;
2008-05-12 23:20:46 +04:00
seq_print_ip_sym ( & iter - > seq , data - > critical_start , sym_flags ) ;
trace_print_seq ( m , & iter - > seq ) ;
2008-05-12 23:20:42 +04:00
seq_puts ( m , " \n => ended at: " ) ;
2008-05-12 23:20:46 +04:00
seq_print_ip_sym ( & iter - > seq , data - > critical_end , sym_flags ) ;
trace_print_seq ( m , & iter - > seq ) ;
2008-05-12 23:20:42 +04:00
seq_puts ( m , " \n " ) ;
}
seq_puts ( m , " \n " ) ;
}
2008-05-12 23:20:45 +04:00
static notrace void
2008-05-12 23:20:46 +04:00
lat_print_generic ( struct trace_seq * s , struct trace_entry * entry , int cpu )
2008-05-12 23:20:42 +04:00
{
int hardirq , softirq ;
char * comm ;
comm = trace_find_cmdline ( entry - > pid ) ;
2008-05-12 23:20:46 +04:00
trace_seq_printf ( s , " %8.8s-%-5d " , comm , entry - > pid ) ;
trace_seq_printf ( s , " %d " , cpu ) ;
trace_seq_printf ( s , " %c%c " ,
( entry - > flags & TRACE_FLAG_IRQS_OFF ) ? ' d ' : ' . ' ,
( ( entry - > flags & TRACE_FLAG_NEED_RESCHED ) ? ' N ' : ' . ' ) ) ;
2008-05-12 23:20:42 +04:00
hardirq = entry - > flags & TRACE_FLAG_HARDIRQ ;
softirq = entry - > flags & TRACE_FLAG_SOFTIRQ ;
if ( hardirq & & softirq )
2008-05-12 23:20:46 +04:00
trace_seq_putc ( s , ' H ' ) ;
2008-05-12 23:20:42 +04:00
else {
if ( hardirq )
2008-05-12 23:20:46 +04:00
trace_seq_putc ( s , ' h ' ) ;
2008-05-12 23:20:42 +04:00
else {
if ( softirq )
2008-05-12 23:20:46 +04:00
trace_seq_putc ( s , ' s ' ) ;
2008-05-12 23:20:42 +04:00
else
2008-05-12 23:20:46 +04:00
trace_seq_putc ( s , ' . ' ) ;
2008-05-12 23:20:42 +04:00
}
}
if ( entry - > preempt_count )
2008-05-12 23:20:46 +04:00
trace_seq_printf ( s , " %x " , entry - > preempt_count ) ;
2008-05-12 23:20:42 +04:00
else
2008-05-12 23:20:46 +04:00
trace_seq_puts ( s , " . " ) ;
2008-05-12 23:20:42 +04:00
}
unsigned long preempt_mark_thresh = 100 ;
2008-05-12 23:20:45 +04:00
static notrace void
2008-05-12 23:20:46 +04:00
lat_print_timestamp ( struct trace_seq * s , unsigned long long abs_usecs ,
2008-05-12 23:20:42 +04:00
unsigned long rel_usecs )
{
2008-05-12 23:20:46 +04:00
trace_seq_printf ( s , " %4lldus " , abs_usecs ) ;
2008-05-12 23:20:42 +04:00
if ( rel_usecs > preempt_mark_thresh )
2008-05-12 23:20:46 +04:00
trace_seq_puts ( s , " !: " ) ;
2008-05-12 23:20:42 +04:00
else if ( rel_usecs > 1 )
2008-05-12 23:20:46 +04:00
trace_seq_puts ( s , " +: " ) ;
2008-05-12 23:20:42 +04:00
else
2008-05-12 23:20:46 +04:00
trace_seq_puts ( s , " : " ) ;
2008-05-12 23:20:42 +04:00
}
static const char state_to_char [ ] = TASK_STATE_TO_CHAR_STR ;
2008-05-12 23:20:47 +04:00
static notrace int
2008-05-12 23:20:46 +04:00
print_lat_fmt ( struct trace_iterator * iter , unsigned int trace_idx , int cpu )
2008-05-12 23:20:42 +04:00
{
2008-05-12 23:20:46 +04:00
struct trace_seq * s = & iter - > seq ;
2008-05-12 23:20:42 +04:00
unsigned long sym_flags = ( trace_flags & TRACE_ITER_SYM_MASK ) ;
struct trace_entry * next_entry = find_next_entry ( iter , NULL ) ;
unsigned long verbose = ( trace_flags & TRACE_ITER_VERBOSE ) ;
struct trace_entry * entry = iter - > ent ;
unsigned long abs_usecs ;
unsigned long rel_usecs ;
char * comm ;
int S ;
if ( ! next_entry )
next_entry = entry ;
rel_usecs = ns2usecs ( next_entry - > t - entry - > t ) ;
abs_usecs = ns2usecs ( entry - > t - iter - > tr - > time_start ) ;
if ( verbose ) {
comm = trace_find_cmdline ( entry - > pid ) ;
2008-05-12 23:20:46 +04:00
trace_seq_printf ( s , " %16s %5d %d %d %08x %08x [%08lx] "
" %ld.%03ldms (+%ld.%03ldms): " ,
comm ,
entry - > pid , cpu , entry - > flags ,
entry - > preempt_count , trace_idx ,
ns2usecs ( entry - > t ) ,
abs_usecs / 1000 ,
abs_usecs % 1000 , rel_usecs / 1000 ,
rel_usecs % 1000 ) ;
2008-05-12 23:20:42 +04:00
} else {
2008-05-12 23:20:46 +04:00
lat_print_generic ( s , entry , cpu ) ;
lat_print_timestamp ( s , abs_usecs , rel_usecs ) ;
2008-05-12 23:20:42 +04:00
}
switch ( entry - > type ) {
case TRACE_FN :
2008-05-12 23:20:46 +04:00
seq_print_ip_sym ( s , entry - > fn . ip , sym_flags ) ;
trace_seq_puts ( s , " ( " ) ;
seq_print_ip_sym ( s , entry - > fn . parent_ip , sym_flags ) ;
trace_seq_puts ( s , " ) \n " ) ;
2008-05-12 23:20:42 +04:00
break ;
case TRACE_CTX :
S = entry - > ctx . prev_state < sizeof ( state_to_char ) ?
state_to_char [ entry - > ctx . prev_state ] : ' X ' ;
comm = trace_find_cmdline ( entry - > ctx . next_pid ) ;
2008-05-12 23:20:46 +04:00
trace_seq_printf ( s , " %d:%d:%c --> %d:%d %s \n " ,
entry - > ctx . prev_pid ,
entry - > ctx . prev_prio ,
S ,
entry - > ctx . next_pid ,
entry - > ctx . next_prio ,
comm ) ;
2008-05-12 23:20:42 +04:00
break ;
2008-05-12 23:20:47 +04:00
case TRACE_SPECIAL :
trace_seq_printf ( s , " %lx %lx %lx \n " ,
entry - > special . arg1 ,
entry - > special . arg2 ,
entry - > special . arg3 ) ;
break ;
2008-05-12 23:20:44 +04:00
default :
2008-05-12 23:20:46 +04:00
trace_seq_printf ( s , " Unknown type %d \n " , entry - > type ) ;
2008-05-12 23:20:42 +04:00
}
2008-05-12 23:20:47 +04:00
return 1 ;
2008-05-12 23:20:42 +04:00
}
2008-05-12 23:20:47 +04:00
static notrace int print_trace_fmt ( struct trace_iterator * iter )
2008-05-12 23:20:42 +04:00
{
2008-05-12 23:20:46 +04:00
struct trace_seq * s = & iter - > seq ;
2008-05-12 23:20:42 +04:00
unsigned long sym_flags = ( trace_flags & TRACE_ITER_SYM_MASK ) ;
2008-05-12 23:20:45 +04:00
struct trace_entry * entry ;
2008-05-12 23:20:42 +04:00
unsigned long usec_rem ;
unsigned long long t ;
unsigned long secs ;
char * comm ;
int S ;
2008-05-12 23:20:46 +04:00
int ret ;
2008-05-12 23:20:42 +04:00
2008-05-12 23:20:45 +04:00
entry = iter - > ent ;
2008-05-12 23:20:42 +04:00
comm = trace_find_cmdline ( iter - > ent - > pid ) ;
2008-05-12 23:20:46 +04:00
t = ns2usecs ( entry - > t ) ;
2008-05-12 23:20:42 +04:00
usec_rem = do_div ( t , 1000000ULL ) ;
secs = ( unsigned long ) t ;
2008-05-12 23:20:46 +04:00
ret = trace_seq_printf ( s , " %16s-%-5d " , comm , entry - > pid ) ;
if ( ! ret )
return 0 ;
ret = trace_seq_printf ( s , " [%02d] " , iter - > cpu ) ;
if ( ! ret )
return 0 ;
ret = trace_seq_printf ( s , " %5lu.%06lu: " , secs , usec_rem ) ;
if ( ! ret )
return 0 ;
2008-05-12 23:20:42 +04:00
switch ( entry - > type ) {
case TRACE_FN :
2008-05-12 23:20:46 +04:00
ret = seq_print_ip_sym ( s , entry - > fn . ip , sym_flags ) ;
if ( ! ret )
return 0 ;
2008-05-12 23:20:42 +04:00
if ( ( sym_flags & TRACE_ITER_PRINT_PARENT ) & &
entry - > fn . parent_ip ) {
2008-05-12 23:20:46 +04:00
ret = trace_seq_printf ( s , " <- " ) ;
if ( ! ret )
return 0 ;
ret = seq_print_ip_sym ( s , entry - > fn . parent_ip ,
sym_flags ) ;
if ( ! ret )
return 0 ;
2008-05-12 23:20:42 +04:00
}
2008-05-12 23:20:46 +04:00
ret = trace_seq_printf ( s , " \n " ) ;
if ( ! ret )
return 0 ;
2008-05-12 23:20:42 +04:00
break ;
case TRACE_CTX :
S = entry - > ctx . prev_state < sizeof ( state_to_char ) ?
state_to_char [ entry - > ctx . prev_state ] : ' X ' ;
2008-05-12 23:20:46 +04:00
ret = trace_seq_printf ( s , " %d:%d:%c ==> %d:%d \n " ,
entry - > ctx . prev_pid ,
entry - > ctx . prev_prio ,
S ,
entry - > ctx . next_pid ,
entry - > ctx . next_prio ) ;
if ( ! ret )
return 0 ;
2008-05-12 23:20:42 +04:00
break ;
2008-05-12 23:20:47 +04:00
case TRACE_SPECIAL :
ret = trace_seq_printf ( s , " %lx %lx %lx \n " ,
entry - > special . arg1 ,
entry - > special . arg2 ,
entry - > special . arg3 ) ;
if ( ! ret )
return 0 ;
break ;
2008-05-12 23:20:42 +04:00
}
2008-05-12 23:20:46 +04:00
return 1 ;
2008-05-12 23:20:42 +04:00
}
2008-05-12 23:20:47 +04:00
static notrace int print_raw_fmt ( struct trace_iterator * iter )
{
struct trace_seq * s = & iter - > seq ;
struct trace_entry * entry ;
int ret ;
int S ;
entry = iter - > ent ;
ret = trace_seq_printf ( s , " %d %d %llu " ,
entry - > pid , iter - > cpu , entry - > t ) ;
if ( ! ret )
return 0 ;
switch ( entry - > type ) {
case TRACE_FN :
ret = trace_seq_printf ( s , " %x %x \n " ,
entry - > fn . ip , entry - > fn . parent_ip ) ;
if ( ! ret )
return 0 ;
break ;
case TRACE_CTX :
S = entry - > ctx . prev_state < sizeof ( state_to_char ) ?
state_to_char [ entry - > ctx . prev_state ] : ' X ' ;
ret = trace_seq_printf ( s , " %d %d %c %d %d \n " ,
entry - > ctx . prev_pid ,
entry - > ctx . prev_prio ,
S ,
entry - > ctx . next_pid ,
entry - > ctx . next_prio ) ;
if ( ! ret )
return 0 ;
break ;
2008-05-12 23:20:47 +04:00
case TRACE_SPECIAL :
ret = trace_seq_printf ( s , " %lx %lx %lx \n " ,
entry - > special . arg1 ,
entry - > special . arg2 ,
entry - > special . arg3 ) ;
if ( ! ret )
return 0 ;
break ;
2008-05-12 23:20:47 +04:00
}
return 1 ;
}
2008-05-12 23:20:47 +04:00
# define SEQ_PUT_FIELD_RET(s, x) \
do { \
if ( ! trace_seq_putmem ( s , & ( x ) , sizeof ( x ) ) ) \
return 0 ; \
} while ( 0 )
static notrace int print_bin_fmt ( struct trace_iterator * iter )
{
struct trace_seq * s = & iter - > seq ;
struct trace_entry * entry ;
entry = iter - > ent ;
SEQ_PUT_FIELD_RET ( s , entry - > pid ) ;
SEQ_PUT_FIELD_RET ( s , entry - > cpu ) ;
SEQ_PUT_FIELD_RET ( s , entry - > t ) ;
switch ( entry - > type ) {
case TRACE_FN :
SEQ_PUT_FIELD_RET ( s , entry - > fn . ip ) ;
SEQ_PUT_FIELD_RET ( s , entry - > fn . parent_ip ) ;
break ;
case TRACE_CTX :
SEQ_PUT_FIELD_RET ( s , entry - > ctx . prev_pid ) ;
SEQ_PUT_FIELD_RET ( s , entry - > ctx . prev_prio ) ;
SEQ_PUT_FIELD_RET ( s , entry - > ctx . prev_state ) ;
SEQ_PUT_FIELD_RET ( s , entry - > ctx . next_pid ) ;
SEQ_PUT_FIELD_RET ( s , entry - > ctx . next_prio ) ;
break ;
2008-05-12 23:20:47 +04:00
case TRACE_SPECIAL :
SEQ_PUT_FIELD_RET ( s , entry - > special . arg1 ) ;
SEQ_PUT_FIELD_RET ( s , entry - > special . arg2 ) ;
SEQ_PUT_FIELD_RET ( s , entry - > special . arg3 ) ;
break ;
2008-05-12 23:20:47 +04:00
}
return 1 ;
}
2008-05-12 23:20:42 +04:00
static int trace_empty ( struct trace_iterator * iter )
{
struct trace_array_cpu * data ;
int cpu ;
for_each_possible_cpu ( cpu ) {
data = iter - > tr - > data [ cpu ] ;
2008-05-12 23:20:46 +04:00
if ( head_page ( data ) & & data - > trace_idx & &
( data - > trace_tail ! = data - > trace_head | |
data - > trace_tail_idx ! = data - > trace_head_idx ) )
2008-05-12 23:20:42 +04:00
return 0 ;
}
return 1 ;
}
2008-05-12 23:20:47 +04:00
static int print_trace_line ( struct trace_iterator * iter )
{
2008-05-12 23:20:47 +04:00
if ( trace_flags & TRACE_ITER_BIN )
return print_bin_fmt ( iter ) ;
2008-05-12 23:20:47 +04:00
if ( trace_flags & TRACE_ITER_RAW )
return print_raw_fmt ( iter ) ;
if ( iter - > iter_flags & TRACE_FILE_LAT_FMT )
return print_lat_fmt ( iter , iter - > idx , iter - > cpu ) ;
return print_trace_fmt ( iter ) ;
}
2008-05-12 23:20:42 +04:00
static int s_show ( struct seq_file * m , void * v )
{
struct trace_iterator * iter = v ;
if ( iter - > ent = = NULL ) {
if ( iter - > tr ) {
seq_printf ( m , " # tracer: %s \n " , iter - > trace - > name ) ;
seq_puts ( m , " # \n " ) ;
}
if ( iter - > iter_flags & TRACE_FILE_LAT_FMT ) {
/* print nothing if the buffers are empty */
if ( trace_empty ( iter ) )
return 0 ;
print_trace_header ( m , iter ) ;
if ( ! ( trace_flags & TRACE_ITER_VERBOSE ) )
print_lat_help_header ( m ) ;
} else {
if ( ! ( trace_flags & TRACE_ITER_VERBOSE ) )
print_func_help_header ( m ) ;
}
} else {
2008-05-12 23:20:47 +04:00
print_trace_line ( iter ) ;
2008-05-12 23:20:46 +04:00
trace_print_seq ( m , & iter - > seq ) ;
2008-05-12 23:20:42 +04:00
}
return 0 ;
}
static struct seq_operations tracer_seq_ops = {
2008-05-12 23:20:46 +04:00
. start = s_start ,
. next = s_next ,
. stop = s_stop ,
. show = s_show ,
2008-05-12 23:20:42 +04:00
} ;
static struct trace_iterator notrace *
__tracing_open ( struct inode * inode , struct file * file , int * ret )
{
struct trace_iterator * iter ;
2008-05-12 23:20:44 +04:00
if ( tracing_disabled ) {
* ret = - ENODEV ;
return NULL ;
}
2008-05-12 23:20:42 +04:00
iter = kzalloc ( sizeof ( * iter ) , GFP_KERNEL ) ;
if ( ! iter ) {
* ret = - ENOMEM ;
goto out ;
}
mutex_lock ( & trace_types_lock ) ;
if ( current_trace & & current_trace - > print_max )
iter - > tr = & max_tr ;
else
iter - > tr = inode - > i_private ;
iter - > trace = current_trace ;
iter - > pos = - 1 ;
/* TODO stop tracer */
* ret = seq_open ( file , & tracer_seq_ops ) ;
if ( ! * ret ) {
struct seq_file * m = file - > private_data ;
m - > private = iter ;
/* stop the trace while dumping */
if ( iter - > tr - > ctrl )
tracer_enabled = 0 ;
if ( iter - > trace & & iter - > trace - > open )
iter - > trace - > open ( iter ) ;
} else {
kfree ( iter ) ;
iter = NULL ;
}
mutex_unlock ( & trace_types_lock ) ;
out :
return iter ;
}
int tracing_open_generic ( struct inode * inode , struct file * filp )
{
2008-05-12 23:20:44 +04:00
if ( tracing_disabled )
return - ENODEV ;
2008-05-12 23:20:42 +04:00
filp - > private_data = inode - > i_private ;
return 0 ;
}
int tracing_release ( struct inode * inode , struct file * file )
{
struct seq_file * m = ( struct seq_file * ) file - > private_data ;
struct trace_iterator * iter = m - > private ;
mutex_lock ( & trace_types_lock ) ;
if ( iter - > trace & & iter - > trace - > close )
iter - > trace - > close ( iter ) ;
/* reenable tracing if it was previously enabled */
if ( iter - > tr - > ctrl )
tracer_enabled = 1 ;
mutex_unlock ( & trace_types_lock ) ;
seq_release ( inode , file ) ;
kfree ( iter ) ;
return 0 ;
}
static int tracing_open ( struct inode * inode , struct file * file )
{
int ret ;
__tracing_open ( inode , file , & ret ) ;
return ret ;
}
static int tracing_lt_open ( struct inode * inode , struct file * file )
{
struct trace_iterator * iter ;
int ret ;
iter = __tracing_open ( inode , file , & ret ) ;
if ( ! ret )
iter - > iter_flags | = TRACE_FILE_LAT_FMT ;
return ret ;
}
2008-05-12 23:20:45 +04:00
static notrace void *
2008-05-12 23:20:42 +04:00
t_next ( struct seq_file * m , void * v , loff_t * pos )
{
struct tracer * t = m - > private ;
( * pos ) + + ;
if ( t )
t = t - > next ;
m - > private = t ;
return t ;
}
static void * t_start ( struct seq_file * m , loff_t * pos )
{
struct tracer * t = m - > private ;
loff_t l = 0 ;
mutex_lock ( & trace_types_lock ) ;
for ( ; t & & l < * pos ; t = t_next ( m , t , & l ) )
;
return t ;
}
static void t_stop ( struct seq_file * m , void * p )
{
mutex_unlock ( & trace_types_lock ) ;
}
static int t_show ( struct seq_file * m , void * v )
{
struct tracer * t = v ;
if ( ! t )
return 0 ;
seq_printf ( m , " %s " , t - > name ) ;
if ( t - > next )
seq_putc ( m , ' ' ) ;
else
seq_putc ( m , ' \n ' ) ;
return 0 ;
}
static struct seq_operations show_traces_seq_ops = {
2008-05-12 23:20:46 +04:00
. start = t_start ,
. next = t_next ,
. stop = t_stop ,
. show = t_show ,
2008-05-12 23:20:42 +04:00
} ;
static int show_traces_open ( struct inode * inode , struct file * file )
{
int ret ;
2008-05-12 23:20:44 +04:00
if ( tracing_disabled )
return - ENODEV ;
2008-05-12 23:20:42 +04:00
ret = seq_open ( file , & show_traces_seq_ops ) ;
if ( ! ret ) {
struct seq_file * m = file - > private_data ;
m - > private = trace_types ;
}
return ret ;
}
static struct file_operations tracing_fops = {
2008-05-12 23:20:46 +04:00
. open = tracing_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = tracing_release ,
2008-05-12 23:20:42 +04:00
} ;
static struct file_operations tracing_lt_fops = {
2008-05-12 23:20:46 +04:00
. open = tracing_lt_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = tracing_release ,
2008-05-12 23:20:42 +04:00
} ;
static struct file_operations show_traces_fops = {
. open = show_traces_open ,
. read = seq_read ,
. release = seq_release ,
} ;
static ssize_t
tracing_iter_ctrl_read ( struct file * filp , char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
char * buf ;
int r = 0 ;
int len = 0 ;
int i ;
/* calulate max size */
for ( i = 0 ; trace_options [ i ] ; i + + ) {
len + = strlen ( trace_options [ i ] ) ;
len + = 3 ; /* "no" and space */
}
/* +2 for \n and \0 */
buf = kmalloc ( len + 2 , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
for ( i = 0 ; trace_options [ i ] ; i + + ) {
if ( trace_flags & ( 1 < < i ) )
r + = sprintf ( buf + r , " %s " , trace_options [ i ] ) ;
else
r + = sprintf ( buf + r , " no%s " , trace_options [ i ] ) ;
}
r + = sprintf ( buf + r , " \n " ) ;
WARN_ON ( r > = len + 2 ) ;
r = simple_read_from_buffer ( ubuf , cnt , ppos ,
buf , r ) ;
kfree ( buf ) ;
return r ;
}
static ssize_t
tracing_iter_ctrl_write ( struct file * filp , const char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
char buf [ 64 ] ;
char * cmp = buf ;
int neg = 0 ;
int i ;
if ( cnt > 63 )
cnt = 63 ;
if ( copy_from_user ( & buf , ubuf , cnt ) )
return - EFAULT ;
buf [ cnt ] = 0 ;
if ( strncmp ( buf , " no " , 2 ) = = 0 ) {
neg = 1 ;
cmp + = 2 ;
}
for ( i = 0 ; trace_options [ i ] ; i + + ) {
int len = strlen ( trace_options [ i ] ) ;
if ( strncmp ( cmp , trace_options [ i ] , len ) = = 0 ) {
if ( neg )
trace_flags & = ~ ( 1 < < i ) ;
else
trace_flags | = ( 1 < < i ) ;
break ;
}
}
filp - > f_pos + = cnt ;
return cnt ;
}
static struct file_operations tracing_iter_fops = {
. open = tracing_open_generic ,
. read = tracing_iter_ctrl_read ,
. write = tracing_iter_ctrl_write ,
} ;
2008-05-12 23:20:45 +04:00
static const char readme_msg [ ] =
" tracing mini-HOWTO: \n \n "
" # mkdir /debug \n "
" # mount -t debugfs nodev /debug \n \n "
" # cat /debug/tracing/available_tracers \n "
" wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none \n \n "
" # cat /debug/tracing/current_tracer \n "
" none \n "
" # echo sched_switch > /debug/tracing/current_tracer \n "
" # cat /debug/tracing/current_tracer \n "
" sched_switch \n "
" # cat /debug/tracing/iter_ctrl \n "
" noprint-parent nosym-offset nosym-addr noverbose \n "
" # echo print-parent > /debug/tracing/iter_ctrl \n "
" # echo 1 > /debug/tracing/tracing_enabled \n "
" # cat /debug/tracing/trace > /tmp/trace.txt \n "
" echo 0 > /debug/tracing/tracing_enabled \n "
;
static ssize_t
tracing_readme_read ( struct file * filp , char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
return simple_read_from_buffer ( ubuf , cnt , ppos ,
readme_msg , strlen ( readme_msg ) ) ;
}
static struct file_operations tracing_readme_fops = {
. open = tracing_open_generic ,
. read = tracing_readme_read ,
} ;
2008-05-12 23:20:42 +04:00
static ssize_t
tracing_ctrl_read ( struct file * filp , char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
struct trace_array * tr = filp - > private_data ;
char buf [ 64 ] ;
int r ;
r = sprintf ( buf , " %ld \n " , tr - > ctrl ) ;
2008-05-12 23:20:45 +04:00
return simple_read_from_buffer ( ubuf , cnt , ppos , buf , r ) ;
2008-05-12 23:20:42 +04:00
}
static ssize_t
tracing_ctrl_write ( struct file * filp , const char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
struct trace_array * tr = filp - > private_data ;
long val ;
char buf [ 64 ] ;
if ( cnt > 63 )
cnt = 63 ;
if ( copy_from_user ( & buf , ubuf , cnt ) )
return - EFAULT ;
buf [ cnt ] = 0 ;
val = simple_strtoul ( buf , NULL , 10 ) ;
val = ! ! val ;
mutex_lock ( & trace_types_lock ) ;
if ( tr - > ctrl ^ val ) {
if ( val )
tracer_enabled = 1 ;
else
tracer_enabled = 0 ;
tr - > ctrl = val ;
if ( current_trace & & current_trace - > ctrl_update )
current_trace - > ctrl_update ( tr ) ;
}
mutex_unlock ( & trace_types_lock ) ;
filp - > f_pos + = cnt ;
return cnt ;
}
static ssize_t
tracing_set_trace_read ( struct file * filp , char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
char buf [ max_tracer_type_len + 2 ] ;
int r ;
mutex_lock ( & trace_types_lock ) ;
if ( current_trace )
r = sprintf ( buf , " %s \n " , current_trace - > name ) ;
else
r = sprintf ( buf , " \n " ) ;
mutex_unlock ( & trace_types_lock ) ;
2008-05-12 23:20:46 +04:00
return simple_read_from_buffer ( ubuf , cnt , ppos , buf , r ) ;
2008-05-12 23:20:42 +04:00
}
static ssize_t
tracing_set_trace_write ( struct file * filp , const char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
struct trace_array * tr = & global_trace ;
struct tracer * t ;
char buf [ max_tracer_type_len + 1 ] ;
int i ;
if ( cnt > max_tracer_type_len )
cnt = max_tracer_type_len ;
if ( copy_from_user ( & buf , ubuf , cnt ) )
return - EFAULT ;
buf [ cnt ] = 0 ;
/* strip ending whitespace. */
for ( i = cnt - 1 ; i > 0 & & isspace ( buf [ i ] ) ; i - - )
buf [ i ] = 0 ;
mutex_lock ( & trace_types_lock ) ;
for ( t = trace_types ; t ; t = t - > next ) {
if ( strcmp ( t - > name , buf ) = = 0 )
break ;
}
if ( ! t | | t = = current_trace )
goto out ;
if ( current_trace & & current_trace - > reset )
current_trace - > reset ( tr ) ;
current_trace = t ;
if ( t - > init )
t - > init ( tr ) ;
out :
mutex_unlock ( & trace_types_lock ) ;
filp - > f_pos + = cnt ;
return cnt ;
}
static ssize_t
tracing_max_lat_read ( struct file * filp , char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
unsigned long * ptr = filp - > private_data ;
char buf [ 64 ] ;
int r ;
r = snprintf ( buf , 64 , " %ld \n " ,
* ptr = = ( unsigned long ) - 1 ? - 1 : nsecs_to_usecs ( * ptr ) ) ;
if ( r > 64 )
r = 64 ;
2008-05-12 23:20:46 +04:00
return simple_read_from_buffer ( ubuf , cnt , ppos , buf , r ) ;
2008-05-12 23:20:42 +04:00
}
static ssize_t
tracing_max_lat_write ( struct file * filp , const char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
long * ptr = filp - > private_data ;
long val ;
char buf [ 64 ] ;
if ( cnt > 63 )
cnt = 63 ;
if ( copy_from_user ( & buf , ubuf , cnt ) )
return - EFAULT ;
buf [ cnt ] = 0 ;
val = simple_strtoul ( buf , NULL , 10 ) ;
* ptr = val * 1000 ;
return cnt ;
}
2008-05-12 23:20:46 +04:00
static atomic_t tracing_reader ;
static int tracing_open_pipe ( struct inode * inode , struct file * filp )
{
struct trace_iterator * iter ;
if ( tracing_disabled )
return - ENODEV ;
/* We only allow for reader of the pipe */
if ( atomic_inc_return ( & tracing_reader ) ! = 1 ) {
atomic_dec ( & tracing_reader ) ;
return - EBUSY ;
}
/* create a buffer to store the information to pass to userspace */
iter = kzalloc ( sizeof ( * iter ) , GFP_KERNEL ) ;
if ( ! iter )
return - ENOMEM ;
iter - > tr = & global_trace ;
filp - > private_data = iter ;
return 0 ;
}
static int tracing_release_pipe ( struct inode * inode , struct file * file )
{
struct trace_iterator * iter = file - > private_data ;
kfree ( iter ) ;
atomic_dec ( & tracing_reader ) ;
return 0 ;
}
/*
* Consumer reader .
*/
static ssize_t
tracing_read_pipe ( struct file * filp , char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
struct trace_iterator * iter = filp - > private_data ;
struct trace_array_cpu * data ;
static cpumask_t mask ;
struct trace_entry * entry ;
static int start ;
unsigned long flags ;
int read = 0 ;
int cpu ;
int len ;
int ret ;
/* return any leftover data */
if ( iter - > seq . len > start ) {
len = iter - > seq . len - start ;
if ( cnt > len )
cnt = len ;
ret = copy_to_user ( ubuf , iter - > seq . buffer + start , cnt ) ;
if ( ret )
cnt = - EFAULT ;
start + = len ;
return cnt ;
}
trace_seq_reset ( & iter - > seq ) ;
start = 0 ;
while ( trace_empty ( iter ) ) {
/*
* This is a make - shift waitqueue . The reason we don ' t use
* an actual wait queue is because :
* 1 ) we only ever have one waiter
* 2 ) the tracing , traces all functions , we don ' t want
* the overhead of calling wake_up and friends
* ( and tracing them too )
* Anyway , this is really very primitive wakeup .
*/
set_current_state ( TASK_INTERRUPTIBLE ) ;
iter - > tr - > waiter = current ;
/* sleep for one second, and try again. */
schedule_timeout ( HZ ) ;
iter - > tr - > waiter = NULL ;
if ( signal_pending ( current ) )
return - EINTR ;
/*
* We block until we read something and tracing is disabled .
* We still block if tracing is disabled , but we have never
* read anything . This allows a user to cat this file , and
* then enable tracing . But after we have read something ,
* we give an EOF when tracing is again disabled .
*
* iter - > pos will be 0 if we haven ' t read anything .
*/
if ( ! tracer_enabled & & iter - > pos )
break ;
continue ;
}
/* stop when tracing is finished */
if ( trace_empty ( iter ) )
return 0 ;
if ( cnt > = PAGE_SIZE )
cnt = PAGE_SIZE - 1 ;
memset ( iter , 0 , sizeof ( * iter ) ) ;
iter - > tr = & global_trace ;
iter - > pos = - 1 ;
/*
* We need to stop all tracing on all CPUS to read the
* the next buffer . This is a bit expensive , but is
* not done often . We fill all what we can read ,
* and then release the locks again .
*/
cpus_clear ( mask ) ;
local_irq_save ( flags ) ;
for_each_possible_cpu ( cpu ) {
data = iter - > tr - > data [ cpu ] ;
if ( ! head_page ( data ) | | ! data - > trace_idx )
continue ;
atomic_inc ( & data - > disabled ) ;
spin_lock ( & data - > lock ) ;
cpu_set ( cpu , mask ) ;
}
2008-05-12 23:20:46 +04:00
while ( ( entry = find_next_entry_inc ( iter ) ) ! = NULL ) {
2008-05-12 23:20:47 +04:00
ret = print_trace_line ( iter ) ;
2008-05-12 23:20:46 +04:00
if ( ! ret )
break ;
trace_consume ( iter ) ;
if ( iter - > seq . len > = cnt )
break ;
}
2008-05-12 23:20:46 +04:00
for_each_cpu_mask ( cpu , mask ) {
2008-05-12 23:20:46 +04:00
data = iter - > tr - > data [ cpu ] ;
spin_unlock ( & data - > lock ) ;
atomic_dec ( & data - > disabled ) ;
}
local_irq_restore ( flags ) ;
/* Now copy what we have to the user */
read = iter - > seq . len ;
if ( read > cnt )
read = cnt ;
ret = copy_to_user ( ubuf , iter - > seq . buffer , read ) ;
if ( read < iter - > seq . len )
start = read ;
else
trace_seq_reset ( & iter - > seq ) ;
if ( ret )
read = - EFAULT ;
return read ;
}
2008-05-12 23:20:42 +04:00
static struct file_operations tracing_max_lat_fops = {
2008-05-12 23:20:46 +04:00
. open = tracing_open_generic ,
. read = tracing_max_lat_read ,
. write = tracing_max_lat_write ,
2008-05-12 23:20:42 +04:00
} ;
static struct file_operations tracing_ctrl_fops = {
2008-05-12 23:20:46 +04:00
. open = tracing_open_generic ,
. read = tracing_ctrl_read ,
. write = tracing_ctrl_write ,
2008-05-12 23:20:42 +04:00
} ;
static struct file_operations set_tracer_fops = {
2008-05-12 23:20:46 +04:00
. open = tracing_open_generic ,
. read = tracing_set_trace_read ,
. write = tracing_set_trace_write ,
2008-05-12 23:20:42 +04:00
} ;
2008-05-12 23:20:46 +04:00
static struct file_operations tracing_pipe_fops = {
2008-05-12 23:20:46 +04:00
. open = tracing_open_pipe ,
. read = tracing_read_pipe ,
. release = tracing_release_pipe ,
2008-05-12 23:20:46 +04:00
} ;
2008-05-12 23:20:42 +04:00
# ifdef CONFIG_DYNAMIC_FTRACE
static ssize_t
tracing_read_long ( struct file * filp , char __user * ubuf ,
size_t cnt , loff_t * ppos )
{
unsigned long * p = filp - > private_data ;
char buf [ 64 ] ;
int r ;
r = sprintf ( buf , " %ld \n " , * p ) ;
2008-05-12 23:20:46 +04:00
return simple_read_from_buffer ( ubuf , cnt , ppos , buf , r ) ;
2008-05-12 23:20:42 +04:00
}
static struct file_operations tracing_read_long_fops = {
2008-05-12 23:20:46 +04:00
. open = tracing_open_generic ,
. read = tracing_read_long ,
2008-05-12 23:20:42 +04:00
} ;
# endif
static struct dentry * d_tracer ;
struct dentry * tracing_init_dentry ( void )
{
static int once ;
if ( d_tracer )
return d_tracer ;
d_tracer = debugfs_create_dir ( " tracing " , NULL ) ;
if ( ! d_tracer & & ! once ) {
once = 1 ;
pr_warning ( " Could not create debugfs directory 'tracing' \n " ) ;
return NULL ;
}
return d_tracer ;
}
2008-05-12 23:20:44 +04:00
# ifdef CONFIG_FTRACE_SELFTEST
/* Let selftest have access to static functions in this file */
# include "trace_selftest.c"
# endif
2008-05-12 23:20:42 +04:00
static __init void tracer_init_debugfs ( void )
{
struct dentry * d_tracer ;
struct dentry * entry ;
d_tracer = tracing_init_dentry ( ) ;
entry = debugfs_create_file ( " tracing_enabled " , 0644 , d_tracer ,
& global_trace , & tracing_ctrl_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs 'tracing_enabled' entry \n " ) ;
entry = debugfs_create_file ( " iter_ctrl " , 0644 , d_tracer ,
NULL , & tracing_iter_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs 'iter_ctrl' entry \n " ) ;
entry = debugfs_create_file ( " latency_trace " , 0444 , d_tracer ,
& global_trace , & tracing_lt_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs 'latency_trace' entry \n " ) ;
entry = debugfs_create_file ( " trace " , 0444 , d_tracer ,
& global_trace , & tracing_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs 'trace' entry \n " ) ;
entry = debugfs_create_file ( " available_tracers " , 0444 , d_tracer ,
& global_trace , & show_traces_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs 'trace' entry \n " ) ;
entry = debugfs_create_file ( " current_tracer " , 0444 , d_tracer ,
& global_trace , & set_tracer_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs 'trace' entry \n " ) ;
entry = debugfs_create_file ( " tracing_max_latency " , 0644 , d_tracer ,
& tracing_max_latency ,
& tracing_max_lat_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs "
" 'tracing_max_latency' entry \n " ) ;
entry = debugfs_create_file ( " tracing_thresh " , 0644 , d_tracer ,
& tracing_thresh , & tracing_max_lat_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs "
" 'tracing_threash' entry \n " ) ;
2008-05-12 23:20:45 +04:00
entry = debugfs_create_file ( " README " , 0644 , d_tracer ,
NULL , & tracing_readme_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs 'README' entry \n " ) ;
2008-05-12 23:20:46 +04:00
entry = debugfs_create_file ( " trace_pipe " , 0644 , d_tracer ,
NULL , & tracing_pipe_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs "
" 'tracing_threash' entry \n " ) ;
2008-05-12 23:20:42 +04:00
# ifdef CONFIG_DYNAMIC_FTRACE
entry = debugfs_create_file ( " dyn_ftrace_total_info " , 0444 , d_tracer ,
& ftrace_update_tot_cnt ,
& tracing_read_long_fops ) ;
if ( ! entry )
pr_warning ( " Could not create debugfs "
" 'dyn_ftrace_total_info' entry \n " ) ;
# endif
}
/* dummy trace to disable tracing */
static struct tracer no_tracer __read_mostly =
{
2008-05-12 23:20:46 +04:00
. name = " none " ,
2008-05-12 23:20:42 +04:00
} ;
2008-05-12 23:20:43 +04:00
static int trace_alloc_page ( void )
2008-05-12 23:20:42 +04:00
{
2008-05-12 23:20:43 +04:00
struct trace_array_cpu * data ;
struct page * page , * tmp ;
LIST_HEAD ( pages ) ;
2008-05-12 23:20:45 +04:00
void * array ;
2008-05-12 23:20:43 +04:00
int i ;
/* first allocate a page for each CPU */
for_each_possible_cpu ( i ) {
array = ( void * ) __get_free_page ( GFP_KERNEL ) ;
if ( array = = NULL ) {
printk ( KERN_ERR " tracer: failed to allocate page "
" for trace buffer! \n " ) ;
goto free_pages ;
}
page = virt_to_page ( array ) ;
list_add ( & page - > lru , & pages ) ;
/* Only allocate if we are actually using the max trace */
# ifdef CONFIG_TRACER_MAX_TRACE
array = ( void * ) __get_free_page ( GFP_KERNEL ) ;
if ( array = = NULL ) {
printk ( KERN_ERR " tracer: failed to allocate page "
" for trace buffer! \n " ) ;
goto free_pages ;
}
page = virt_to_page ( array ) ;
list_add ( & page - > lru , & pages ) ;
# endif
}
/* Now that we successfully allocate a page per CPU, add them */
for_each_possible_cpu ( i ) {
data = global_trace . data [ i ] ;
2008-05-12 23:20:46 +04:00
spin_lock_init ( & data - > lock ) ;
2008-05-12 23:20:46 +04:00
lockdep_set_class ( & data - > lock , & data - > lock_key ) ;
2008-05-12 23:20:43 +04:00
page = list_entry ( pages . next , struct page , lru ) ;
2008-05-12 23:20:45 +04:00
list_del_init ( & page - > lru ) ;
2008-05-12 23:20:43 +04:00
list_add_tail ( & page - > lru , & data - > trace_pages ) ;
ClearPageLRU ( page ) ;
# ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr . data [ i ] ;
2008-05-12 23:20:46 +04:00
spin_lock_init ( & data - > lock ) ;
2008-05-12 23:20:46 +04:00
lockdep_set_class ( & data - > lock , & data - > lock_key ) ;
2008-05-12 23:20:43 +04:00
page = list_entry ( pages . next , struct page , lru ) ;
2008-05-12 23:20:45 +04:00
list_del_init ( & page - > lru ) ;
2008-05-12 23:20:43 +04:00
list_add_tail ( & page - > lru , & data - > trace_pages ) ;
SetPageLRU ( page ) ;
# endif
}
global_trace . entries + = ENTRIES_PER_PAGE ;
return 0 ;
free_pages :
list_for_each_entry_safe ( page , tmp , & pages , lru ) {
2008-05-12 23:20:45 +04:00
list_del_init ( & page - > lru ) ;
2008-05-12 23:20:43 +04:00
__free_page ( page ) ;
}
return - ENOMEM ;
2008-05-12 23:20:42 +04:00
}
__init static int tracer_alloc_buffers ( void )
{
2008-05-12 23:20:43 +04:00
struct trace_array_cpu * data ;
void * array ;
struct page * page ;
int pages = 0 ;
2008-05-12 23:20:44 +04:00
int ret = - ENOMEM ;
2008-05-12 23:20:42 +04:00
int i ;
2008-05-12 23:20:43 +04:00
/* Allocate the first page for all buffers */
2008-05-12 23:20:42 +04:00
for_each_possible_cpu ( i ) {
2008-05-12 23:20:43 +04:00
data = global_trace . data [ i ] = & per_cpu ( global_trace_cpu , i ) ;
2008-05-12 23:20:42 +04:00
max_tr . data [ i ] = & per_cpu ( max_data , i ) ;
2008-05-12 23:20:43 +04:00
array = ( void * ) __get_free_page ( GFP_KERNEL ) ;
2008-05-12 23:20:42 +04:00
if ( array = = NULL ) {
2008-05-12 23:20:43 +04:00
printk ( KERN_ERR " tracer: failed to allocate page "
" for trace buffer! \n " ) ;
2008-05-12 23:20:42 +04:00
goto free_buffers ;
}
2008-05-12 23:20:43 +04:00
/* set the array to the list */
INIT_LIST_HEAD ( & data - > trace_pages ) ;
page = virt_to_page ( array ) ;
list_add ( & page - > lru , & data - > trace_pages ) ;
/* use the LRU flag to differentiate the two buffers */
ClearPageLRU ( page ) ;
2008-05-12 23:20:42 +04:00
/* Only allocate if we are actually using the max trace */
# ifdef CONFIG_TRACER_MAX_TRACE
2008-05-12 23:20:43 +04:00
array = ( void * ) __get_free_page ( GFP_KERNEL ) ;
2008-05-12 23:20:42 +04:00
if ( array = = NULL ) {
2008-05-12 23:20:43 +04:00
printk ( KERN_ERR " tracer: failed to allocate page "
" for trace buffer! \n " ) ;
2008-05-12 23:20:42 +04:00
goto free_buffers ;
}
2008-05-12 23:20:43 +04:00
INIT_LIST_HEAD ( & max_tr . data [ i ] - > trace_pages ) ;
page = virt_to_page ( array ) ;
list_add ( & page - > lru , & max_tr . data [ i ] - > trace_pages ) ;
SetPageLRU ( page ) ;
2008-05-12 23:20:42 +04:00
# endif
}
/*
* Since we allocate by orders of pages , we may be able to
* round up a bit .
*/
2008-05-12 23:20:43 +04:00
global_trace . entries = ENTRIES_PER_PAGE ;
pages + + ;
while ( global_trace . entries < trace_nr_entries ) {
if ( trace_alloc_page ( ) )
break ;
pages + + ;
}
2008-05-12 23:20:44 +04:00
max_tr . entries = global_trace . entries ;
2008-05-12 23:20:42 +04:00
2008-05-12 23:20:43 +04:00
pr_info ( " tracer: %d pages allocated for %ld " ,
pages , trace_nr_entries ) ;
2008-05-12 23:20:42 +04:00
pr_info ( " entries of %ld bytes \n " , ( long ) TRACE_ENTRY_SIZE ) ;
pr_info ( " actual entries %ld \n " , global_trace . entries ) ;
tracer_init_debugfs ( ) ;
trace_init_cmdlines ( ) ;
register_tracer ( & no_tracer ) ;
current_trace = & no_tracer ;
2008-05-12 23:20:44 +04:00
/* All seems OK, enable tracing */
tracing_disabled = 0 ;
2008-05-12 23:20:42 +04:00
return 0 ;
free_buffers :
for ( i - - ; i > = 0 ; i - - ) {
2008-05-12 23:20:43 +04:00
struct page * page , * tmp ;
2008-05-12 23:20:42 +04:00
struct trace_array_cpu * data = global_trace . data [ i ] ;
2008-05-12 23:20:45 +04:00
if ( data ) {
2008-05-12 23:20:43 +04:00
list_for_each_entry_safe ( page , tmp ,
& data - > trace_pages , lru ) {
2008-05-12 23:20:45 +04:00
list_del_init ( & page - > lru ) ;
2008-05-12 23:20:43 +04:00
__free_page ( page ) ;
}
2008-05-12 23:20:42 +04:00
}
# ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr . data [ i ] ;
2008-05-12 23:20:45 +04:00
if ( data ) {
2008-05-12 23:20:43 +04:00
list_for_each_entry_safe ( page , tmp ,
& data - > trace_pages , lru ) {
2008-05-12 23:20:45 +04:00
list_del_init ( & page - > lru ) ;
2008-05-12 23:20:43 +04:00
__free_page ( page ) ;
}
2008-05-12 23:20:42 +04:00
}
# endif
}
2008-05-12 23:20:44 +04:00
return ret ;
2008-05-12 23:20:42 +04:00
}
2008-05-12 23:20:44 +04:00
fs_initcall ( tracer_alloc_buffers ) ;