2008-12-24 07:24:12 +03:00
/*
* trace_output . c
*
* Copyright ( C ) 2008 Red Hat Inc , Steven Rostedt < srostedt @ redhat . com >
*
*/
# include <linux/module.h>
# include <linux/mutex.h>
# include <linux/ftrace.h>
# include "trace_output.h"
/* must be a power of 2 */
# define EVENT_HASHSIZE 128
2013-03-11 11:14:03 +04:00
DECLARE_RWSEM ( trace_event_sem ) ;
2009-05-26 22:25:22 +04:00
2008-12-24 07:24:12 +03:00
static struct hlist_head event_hash [ EVENT_HASHSIZE ] __read_mostly ;
static int next_event_type = __TRACE_LAST_TYPE + 1 ;
2013-03-09 06:02:34 +04:00
enum print_line_t trace_print_bputs_msg_only ( struct trace_iterator * iter )
{
struct trace_seq * s = & iter - > seq ;
struct trace_entry * entry = iter - > ent ;
struct bputs_entry * field ;
trace_assign_type ( field , entry ) ;
2014-11-12 18:29:54 +03:00
trace_seq_puts ( s , field - > str ) ;
2013-03-09 06:02:34 +04:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2013-03-09 06:02:34 +04:00
}
2009-03-19 19:20:38 +03:00
enum print_line_t trace_print_bprintk_msg_only ( struct trace_iterator * iter )
{
struct trace_seq * s = & iter - > seq ;
struct trace_entry * entry = iter - > ent ;
struct bprint_entry * field ;
trace_assign_type ( field , entry ) ;
2014-11-12 18:29:54 +03:00
trace_seq_bprintf ( s , field - > fmt , field - > buf ) ;
2009-03-19 19:20:38 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2009-03-19 19:20:38 +03:00
}
enum print_line_t trace_print_printk_msg_only ( struct trace_iterator * iter )
{
struct trace_seq * s = & iter - > seq ;
struct trace_entry * entry = iter - > ent ;
struct print_entry * field ;
trace_assign_type ( field , entry ) ;
2014-11-12 18:29:54 +03:00
trace_seq_puts ( s , field - > buf ) ;
2009-03-19 19:20:38 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2009-03-19 19:20:38 +03:00
}
2009-05-26 22:25:22 +04:00
const char *
2015-05-05 01:12:44 +03:00
trace_print_flags_seq ( struct trace_seq * p , const char * delim ,
unsigned long flags ,
const struct trace_print_flags * flag_array )
2009-05-26 22:25:22 +04:00
{
unsigned long mask ;
const char * str ;
2014-06-26 17:42:41 +04:00
const char * ret = trace_seq_buffer_ptr ( p ) ;
2012-02-19 15:16:07 +04:00
int i , first = 1 ;
2009-05-26 22:25:22 +04:00
for ( i = 0 ; flag_array [ i ] . name & & flags ; i + + ) {
mask = flag_array [ i ] . mask ;
if ( ( flags & mask ) ! = mask )
continue ;
str = flag_array [ i ] . name ;
flags & = ~ mask ;
2012-02-19 15:16:07 +04:00
if ( ! first & & delim )
2009-05-26 22:25:22 +04:00
trace_seq_puts ( p , delim ) ;
2012-02-19 15:16:07 +04:00
else
first = 0 ;
2009-05-26 22:25:22 +04:00
trace_seq_puts ( p , str ) ;
}
/* check for left over flags */
if ( flags ) {
2012-02-21 05:37:32 +04:00
if ( ! first & & delim )
2009-05-26 22:25:22 +04:00
trace_seq_puts ( p , delim ) ;
trace_seq_printf ( p , " 0x%lx " , flags ) ;
}
trace_seq_putc ( p , 0 ) ;
2009-06-03 17:52:03 +04:00
return ret ;
2009-05-26 22:25:22 +04:00
}
2015-05-05 01:12:44 +03:00
EXPORT_SYMBOL ( trace_print_flags_seq ) ;
2009-05-26 22:25:22 +04:00
2009-05-21 03:21:47 +04:00
const char *
2015-05-05 01:12:44 +03:00
trace_print_symbols_seq ( struct trace_seq * p , unsigned long val ,
const struct trace_print_flags * symbol_array )
2009-05-21 03:21:47 +04:00
{
int i ;
2014-06-26 17:42:41 +04:00
const char * ret = trace_seq_buffer_ptr ( p ) ;
2009-05-21 03:21:47 +04:00
for ( i = 0 ; symbol_array [ i ] . name ; i + + ) {
if ( val ! = symbol_array [ i ] . mask )
continue ;
trace_seq_puts ( p , symbol_array [ i ] . name ) ;
break ;
}
2014-06-26 17:42:41 +04:00
if ( ret = = ( const char * ) ( trace_seq_buffer_ptr ( p ) ) )
2009-05-21 03:21:47 +04:00
trace_seq_printf ( p , " 0x%lx " , val ) ;
2014-11-24 03:34:19 +03:00
2009-05-21 03:21:47 +04:00
trace_seq_putc ( p , 0 ) ;
2009-06-03 17:52:03 +04:00
return ret ;
2009-05-21 03:21:47 +04:00
}
2015-05-05 01:12:44 +03:00
EXPORT_SYMBOL ( trace_print_symbols_seq ) ;
2009-05-21 03:21:47 +04:00
2011-04-19 05:35:28 +04:00
# if BITS_PER_LONG == 32
const char *
2015-05-05 01:12:44 +03:00
trace_print_symbols_seq_u64 ( struct trace_seq * p , unsigned long long val ,
2011-04-19 05:35:28 +04:00
const struct trace_print_flags_u64 * symbol_array )
{
int i ;
2014-06-26 17:42:41 +04:00
const char * ret = trace_seq_buffer_ptr ( p ) ;
2011-04-19 05:35:28 +04:00
for ( i = 0 ; symbol_array [ i ] . name ; i + + ) {
if ( val ! = symbol_array [ i ] . mask )
continue ;
trace_seq_puts ( p , symbol_array [ i ] . name ) ;
break ;
}
2014-06-26 17:42:41 +04:00
if ( ret = = ( const char * ) ( trace_seq_buffer_ptr ( p ) ) )
2011-04-19 05:35:28 +04:00
trace_seq_printf ( p , " 0x%llx " , val ) ;
trace_seq_putc ( p , 0 ) ;
return ret ;
}
2015-05-05 01:12:44 +03:00
EXPORT_SYMBOL ( trace_print_symbols_seq_u64 ) ;
2011-04-19 05:35:28 +04:00
# endif
tracing: Add __bitmask() macro to trace events to cpumasks and other bitmasks
Being able to show a cpumask of events can be useful as some events
may affect only some CPUs. There is no standard way to record the
cpumask and converting it to a string is rather expensive during
the trace as traces happen in hotpaths. It would be better to record
the raw event mask and be able to parse it at print time.
The following macros were added for use with the TRACE_EVENT() macro:
__bitmask()
__assign_bitmask()
__get_bitmask()
To test this, I added this to the sched_migrate_task event, which
looked like this:
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu, const struct cpumask *cpus),
TP_ARGS(p, dest_cpu, cpus),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
__bitmask( cpumask, num_possible_cpus() )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
__assign_bitmask(cpumask, cpumask_bits(cpus), num_possible_cpus());
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d cpumask=%s",
__entry->comm, __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu,
__get_bitmask(cpumask))
);
With the output of:
ksmtuned-3613 [003] d..2 485.220508: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=3 dest_cpu=2 cpumask=00000000,0000000f
migration/1-13 [001] d..5 485.221202: sched_migrate_task: comm=ksmtuned pid=3614 prio=120 orig_cpu=1 dest_cpu=0 cpumask=00000000,0000000f
awk-3615 [002] d.H5 485.221747: sched_migrate_task: comm=rcu_preempt pid=7 prio=120 orig_cpu=0 dest_cpu=1 cpumask=00000000,000000ff
migration/2-18 [002] d..5 485.222062: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=2 dest_cpu=3 cpumask=00000000,0000000f
Link: http://lkml.kernel.org/r/1399377998-14870-6-git-send-email-javi.merino@arm.com
Link: http://lkml.kernel.org/r/20140506132238.22e136d1@gandalf.local.home
Suggested-by: Javi Merino <javi.merino@arm.com>
Tested-by: Javi Merino <javi.merino@arm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-05-06 21:10:24 +04:00
const char *
2015-05-05 01:12:44 +03:00
trace_print_bitmask_seq ( struct trace_seq * p , void * bitmask_ptr ,
unsigned int bitmask_size )
tracing: Add __bitmask() macro to trace events to cpumasks and other bitmasks
Being able to show a cpumask of events can be useful as some events
may affect only some CPUs. There is no standard way to record the
cpumask and converting it to a string is rather expensive during
the trace as traces happen in hotpaths. It would be better to record
the raw event mask and be able to parse it at print time.
The following macros were added for use with the TRACE_EVENT() macro:
__bitmask()
__assign_bitmask()
__get_bitmask()
To test this, I added this to the sched_migrate_task event, which
looked like this:
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu, const struct cpumask *cpus),
TP_ARGS(p, dest_cpu, cpus),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
__bitmask( cpumask, num_possible_cpus() )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
__assign_bitmask(cpumask, cpumask_bits(cpus), num_possible_cpus());
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d cpumask=%s",
__entry->comm, __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu,
__get_bitmask(cpumask))
);
With the output of:
ksmtuned-3613 [003] d..2 485.220508: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=3 dest_cpu=2 cpumask=00000000,0000000f
migration/1-13 [001] d..5 485.221202: sched_migrate_task: comm=ksmtuned pid=3614 prio=120 orig_cpu=1 dest_cpu=0 cpumask=00000000,0000000f
awk-3615 [002] d.H5 485.221747: sched_migrate_task: comm=rcu_preempt pid=7 prio=120 orig_cpu=0 dest_cpu=1 cpumask=00000000,000000ff
migration/2-18 [002] d..5 485.222062: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=2 dest_cpu=3 cpumask=00000000,0000000f
Link: http://lkml.kernel.org/r/1399377998-14870-6-git-send-email-javi.merino@arm.com
Link: http://lkml.kernel.org/r/20140506132238.22e136d1@gandalf.local.home
Suggested-by: Javi Merino <javi.merino@arm.com>
Tested-by: Javi Merino <javi.merino@arm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-05-06 21:10:24 +04:00
{
2014-06-26 17:42:41 +04:00
const char * ret = trace_seq_buffer_ptr ( p ) ;
tracing: Add __bitmask() macro to trace events to cpumasks and other bitmasks
Being able to show a cpumask of events can be useful as some events
may affect only some CPUs. There is no standard way to record the
cpumask and converting it to a string is rather expensive during
the trace as traces happen in hotpaths. It would be better to record
the raw event mask and be able to parse it at print time.
The following macros were added for use with the TRACE_EVENT() macro:
__bitmask()
__assign_bitmask()
__get_bitmask()
To test this, I added this to the sched_migrate_task event, which
looked like this:
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu, const struct cpumask *cpus),
TP_ARGS(p, dest_cpu, cpus),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
__bitmask( cpumask, num_possible_cpus() )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
__assign_bitmask(cpumask, cpumask_bits(cpus), num_possible_cpus());
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d cpumask=%s",
__entry->comm, __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu,
__get_bitmask(cpumask))
);
With the output of:
ksmtuned-3613 [003] d..2 485.220508: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=3 dest_cpu=2 cpumask=00000000,0000000f
migration/1-13 [001] d..5 485.221202: sched_migrate_task: comm=ksmtuned pid=3614 prio=120 orig_cpu=1 dest_cpu=0 cpumask=00000000,0000000f
awk-3615 [002] d.H5 485.221747: sched_migrate_task: comm=rcu_preempt pid=7 prio=120 orig_cpu=0 dest_cpu=1 cpumask=00000000,000000ff
migration/2-18 [002] d..5 485.222062: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=2 dest_cpu=3 cpumask=00000000,0000000f
Link: http://lkml.kernel.org/r/1399377998-14870-6-git-send-email-javi.merino@arm.com
Link: http://lkml.kernel.org/r/20140506132238.22e136d1@gandalf.local.home
Suggested-by: Javi Merino <javi.merino@arm.com>
Tested-by: Javi Merino <javi.merino@arm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-05-06 21:10:24 +04:00
trace_seq_bitmask ( p , bitmask_ptr , bitmask_size * 8 ) ;
trace_seq_putc ( p , 0 ) ;
return ret ;
}
2015-05-05 01:12:44 +03:00
EXPORT_SYMBOL_GPL ( trace_print_bitmask_seq ) ;
tracing: Add __bitmask() macro to trace events to cpumasks and other bitmasks
Being able to show a cpumask of events can be useful as some events
may affect only some CPUs. There is no standard way to record the
cpumask and converting it to a string is rather expensive during
the trace as traces happen in hotpaths. It would be better to record
the raw event mask and be able to parse it at print time.
The following macros were added for use with the TRACE_EVENT() macro:
__bitmask()
__assign_bitmask()
__get_bitmask()
To test this, I added this to the sched_migrate_task event, which
looked like this:
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu, const struct cpumask *cpus),
TP_ARGS(p, dest_cpu, cpus),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
__bitmask( cpumask, num_possible_cpus() )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
__assign_bitmask(cpumask, cpumask_bits(cpus), num_possible_cpus());
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d cpumask=%s",
__entry->comm, __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu,
__get_bitmask(cpumask))
);
With the output of:
ksmtuned-3613 [003] d..2 485.220508: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=3 dest_cpu=2 cpumask=00000000,0000000f
migration/1-13 [001] d..5 485.221202: sched_migrate_task: comm=ksmtuned pid=3614 prio=120 orig_cpu=1 dest_cpu=0 cpumask=00000000,0000000f
awk-3615 [002] d.H5 485.221747: sched_migrate_task: comm=rcu_preempt pid=7 prio=120 orig_cpu=0 dest_cpu=1 cpumask=00000000,000000ff
migration/2-18 [002] d..5 485.222062: sched_migrate_task: comm=ksmtuned pid=3615 prio=120 orig_cpu=2 dest_cpu=3 cpumask=00000000,0000000f
Link: http://lkml.kernel.org/r/1399377998-14870-6-git-send-email-javi.merino@arm.com
Link: http://lkml.kernel.org/r/20140506132238.22e136d1@gandalf.local.home
Suggested-by: Javi Merino <javi.merino@arm.com>
Tested-by: Javi Merino <javi.merino@arm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-05-06 21:10:24 +04:00
2010-04-01 15:40:58 +04:00
const char *
2015-05-05 01:12:44 +03:00
trace_print_hex_seq ( struct trace_seq * p , const unsigned char * buf , int buf_len )
2010-04-01 15:40:58 +04:00
{
int i ;
2014-06-26 17:42:41 +04:00
const char * ret = trace_seq_buffer_ptr ( p ) ;
2010-04-01 15:40:58 +04:00
for ( i = 0 ; i < buf_len ; i + + )
trace_seq_printf ( p , " %s%2.2x " , i = = 0 ? " " : " " , buf [ i ] ) ;
trace_seq_putc ( p , 0 ) ;
return ret ;
}
2015-05-05 01:12:44 +03:00
EXPORT_SYMBOL ( trace_print_hex_seq ) ;
2010-04-01 15:40:58 +04:00
2015-01-28 15:48:53 +03:00
const char *
2015-05-05 01:12:44 +03:00
trace_print_array_seq ( struct trace_seq * p , const void * buf , int count ,
size_t el_size )
2015-01-28 15:48:53 +03:00
{
const char * ret = trace_seq_buffer_ptr ( p ) ;
const char * prefix = " " ;
void * ptr = ( void * ) buf ;
2015-04-29 18:18:46 +03:00
size_t buf_len = count * el_size ;
2015-01-28 15:48:53 +03:00
trace_seq_putc ( p , ' { ' ) ;
while ( ptr < buf + buf_len ) {
switch ( el_size ) {
case 1 :
trace_seq_printf ( p , " %s0x%x " , prefix ,
* ( u8 * ) ptr ) ;
break ;
case 2 :
trace_seq_printf ( p , " %s0x%x " , prefix ,
* ( u16 * ) ptr ) ;
break ;
case 4 :
trace_seq_printf ( p , " %s0x%x " , prefix ,
* ( u32 * ) ptr ) ;
break ;
case 8 :
trace_seq_printf ( p , " %s0x%llx " , prefix ,
* ( u64 * ) ptr ) ;
break ;
default :
trace_seq_printf ( p , " BAD SIZE:%zu 0x%x " , el_size ,
* ( u8 * ) ptr ) ;
el_size = 1 ;
}
prefix = " , " ;
ptr + = el_size ;
}
trace_seq_putc ( p , ' } ' ) ;
trace_seq_putc ( p , 0 ) ;
return ret ;
}
2015-05-05 01:12:44 +03:00
EXPORT_SYMBOL ( trace_print_array_seq ) ;
2015-01-28 15:48:53 +03:00
2015-05-05 21:18:11 +03:00
int trace_raw_output_prep ( struct trace_iterator * iter ,
struct trace_event * trace_event )
2013-02-21 06:32:38 +04:00
{
2015-05-05 18:45:27 +03:00
struct trace_event_call * event ;
2013-02-21 06:32:38 +04:00
struct trace_seq * s = & iter - > seq ;
struct trace_seq * p = & iter - > tmp_seq ;
struct trace_entry * entry ;
2015-05-05 18:45:27 +03:00
event = container_of ( trace_event , struct trace_event_call , event ) ;
2013-02-21 06:32:38 +04:00
entry = iter - > ent ;
if ( entry - > type ! = event - > event . type ) {
WARN_ON_ONCE ( 1 ) ;
return TRACE_TYPE_UNHANDLED ;
}
trace_seq_init ( p ) ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " %s: " , ftrace_event_name ( event ) ) ;
2014-11-14 19:42:06 +03:00
return trace_handle_return ( s ) ;
2013-02-21 06:32:38 +04:00
}
2015-05-05 21:18:11 +03:00
EXPORT_SYMBOL ( trace_raw_output_prep ) ;
2013-02-21 06:32:38 +04:00
2015-05-05 21:18:11 +03:00
static int trace_output_raw ( struct trace_iterator * iter , char * name ,
char * fmt , va_list ap )
2012-08-10 03:16:14 +04:00
{
struct trace_seq * s = & iter - > seq ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " %s: " , name ) ;
trace_seq_vprintf ( s , fmt , ap ) ;
2012-08-10 03:16:14 +04:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2012-08-10 03:16:14 +04:00
}
2015-05-05 21:18:11 +03:00
int trace_output_call ( struct trace_iterator * iter , char * name , char * fmt , . . . )
2012-08-10 03:16:14 +04:00
{
va_list ap ;
int ret ;
va_start ( ap , fmt ) ;
2015-05-05 21:18:11 +03:00
ret = trace_output_raw ( iter , name , fmt , ap ) ;
2012-08-10 03:16:14 +04:00
va_end ( ap ) ;
return ret ;
}
2015-05-05 21:18:11 +03:00
EXPORT_SYMBOL_GPL ( trace_output_call ) ;
2012-08-10 03:16:14 +04:00
2008-12-24 07:24:12 +03:00
# ifdef CONFIG_KRETPROBES
static inline const char * kretprobed ( const char * name )
{
static const char tramp_name [ ] = " kretprobe_trampoline " ;
int size = sizeof ( tramp_name ) ;
if ( strncmp ( tramp_name , name , size ) = = 0 )
return " [unknown/kretprobe'd] " ;
return name ;
}
# else
static inline const char * kretprobed ( const char * name )
{
return name ;
}
# endif /* CONFIG_KRETPROBES */
2014-11-12 18:29:54 +03:00
static void
2008-12-24 07:24:12 +03:00
seq_print_sym_short ( struct trace_seq * s , const char * fmt , unsigned long address )
{
# ifdef CONFIG_KALLSYMS
char str [ KSYM_SYMBOL_LEN ] ;
const char * name ;
kallsyms_lookup ( address , NULL , NULL , NULL , str ) ;
name = kretprobed ( str ) ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , fmt , name ) ;
2008-12-24 07:24:12 +03:00
# endif
}
2014-11-12 18:29:54 +03:00
static void
2008-12-24 07:24:12 +03:00
seq_print_sym_offset ( struct trace_seq * s , const char * fmt ,
unsigned long address )
{
# ifdef CONFIG_KALLSYMS
char str [ KSYM_SYMBOL_LEN ] ;
const char * name ;
sprint_symbol ( str , address ) ;
name = kretprobed ( str ) ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , fmt , name ) ;
2008-12-24 07:24:12 +03:00
# endif
}
# ifndef CONFIG_64BIT
# define IP_FMT "%08lx"
# else
# define IP_FMT "%016lx"
# endif
int seq_print_user_ip ( struct trace_seq * s , struct mm_struct * mm ,
unsigned long ip , unsigned long sym_flags )
{
struct file * file = NULL ;
unsigned long vmstart = 0 ;
int ret = 1 ;
2009-11-25 18:10:14 +03:00
if ( s - > full )
return 0 ;
2008-12-24 07:24:12 +03:00
if ( mm ) {
const struct vm_area_struct * vma ;
down_read ( & mm - > mmap_sem ) ;
vma = find_vma ( mm , ip ) ;
if ( vma ) {
file = vma - > vm_file ;
vmstart = vma - > vm_start ;
}
if ( file ) {
ret = trace_seq_path ( s , & file - > f_path ) ;
if ( ret )
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " [+0x%lx] " ,
ip - vmstart ) ;
2008-12-24 07:24:12 +03:00
}
up_read ( & mm - > mmap_sem ) ;
}
if ( ret & & ( ( sym_flags & TRACE_ITER_SYM_ADDR ) | | ! file ) )
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " < " IP_FMT " > " , ip ) ;
return ! trace_seq_has_overflowed ( s ) ;
2008-12-24 07:24:12 +03:00
}
int
seq_print_userip_objs ( const struct userstack_entry * entry , struct trace_seq * s ,
unsigned long sym_flags )
{
struct mm_struct * mm = NULL ;
unsigned int i ;
if ( trace_flags & TRACE_ITER_SYM_USEROBJ ) {
struct task_struct * task ;
/*
* we do the lookup on the thread group leader ,
* since individual threads might have already quit !
*/
rcu_read_lock ( ) ;
2009-09-11 19:36:23 +04:00
task = find_task_by_vpid ( entry - > tgid ) ;
2008-12-24 07:24:12 +03:00
if ( task )
mm = get_task_mm ( task ) ;
rcu_read_unlock ( ) ;
}
for ( i = 0 ; i < FTRACE_STACK_ENTRIES ; i + + ) {
unsigned long ip = entry - > caller [ i ] ;
2014-11-12 18:29:54 +03:00
if ( ip = = ULONG_MAX | | trace_seq_has_overflowed ( s ) )
2008-12-24 07:24:12 +03:00
break ;
2014-11-12 18:29:54 +03:00
trace_seq_puts ( s , " => " ) ;
2008-12-24 07:24:12 +03:00
if ( ! ip ) {
2014-11-12 18:29:54 +03:00
trace_seq_puts ( s , " ?? " ) ;
trace_seq_putc ( s , ' \n ' ) ;
2008-12-24 07:24:12 +03:00
continue ;
}
2014-11-12 18:29:54 +03:00
seq_print_user_ip ( s , mm , ip , sym_flags ) ;
trace_seq_putc ( s , ' \n ' ) ;
2008-12-24 07:24:12 +03:00
}
if ( mm )
mmput ( mm ) ;
2014-11-12 18:29:54 +03:00
return ! trace_seq_has_overflowed ( s ) ;
2008-12-24 07:24:12 +03:00
}
int
seq_print_ip_sym ( struct trace_seq * s , unsigned long ip , unsigned long sym_flags )
{
2014-11-12 18:29:54 +03:00
if ( ! ip ) {
trace_seq_putc ( s , ' 0 ' ) ;
goto out ;
}
2008-12-24 07:24:12 +03:00
if ( sym_flags & TRACE_ITER_SYM_OFFSET )
2014-11-12 18:29:54 +03:00
seq_print_sym_offset ( s , " %s " , ip ) ;
2008-12-24 07:24:12 +03:00
else
2014-11-12 18:29:54 +03:00
seq_print_sym_short ( s , " %s " , ip ) ;
2008-12-24 07:24:12 +03:00
if ( sym_flags & TRACE_ITER_SYM_ADDR )
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " < " IP_FMT " > " , ip ) ;
out :
return ! trace_seq_has_overflowed ( s ) ;
2008-12-24 07:24:12 +03:00
}
2009-09-11 22:24:13 +04:00
/**
* trace_print_lat_fmt - print the irq , preempt and lockdep fields
* @ s : trace seq struct to write to
* @ entry : The trace entry field from the ring buffer
*
* Prints the generic fields of irqs off , in hard or softirq , preempt
2011-03-09 18:41:56 +03:00
* count .
2009-09-11 22:24:13 +04:00
*/
int trace_print_lat_fmt ( struct trace_seq * s , struct trace_entry * entry )
2009-02-03 01:29:21 +03:00
{
2010-12-04 03:13:26 +03:00
char hardsoft_irq ;
char need_resched ;
char irqs_off ;
int hardirq ;
int softirq ;
2009-02-03 01:29:21 +03:00
hardirq = entry - > flags & TRACE_FLAG_HARDIRQ ;
softirq = entry - > flags & TRACE_FLAG_SOFTIRQ ;
2009-02-04 01:20:41 +03:00
2010-12-04 03:13:26 +03:00
irqs_off =
( entry - > flags & TRACE_FLAG_IRQS_OFF ) ? ' d ' :
( entry - > flags & TRACE_FLAG_IRQS_NOSUPPORT ) ? ' X ' :
' . ' ;
2013-10-04 19:28:26 +04:00
switch ( entry - > flags & ( TRACE_FLAG_NEED_RESCHED |
TRACE_FLAG_PREEMPT_RESCHED ) ) {
case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED :
need_resched = ' N ' ;
break ;
case TRACE_FLAG_NEED_RESCHED :
need_resched = ' n ' ;
break ;
case TRACE_FLAG_PREEMPT_RESCHED :
need_resched = ' p ' ;
break ;
default :
need_resched = ' . ' ;
break ;
}
2010-12-04 03:13:26 +03:00
hardsoft_irq =
( hardirq & & softirq ) ? ' H ' :
hardirq ? ' h ' :
softirq ? ' s ' :
' . ' ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " %c%c%c " ,
irqs_off , need_resched , hardsoft_irq ) ;
2009-02-03 01:29:21 +03:00
2009-09-27 15:02:07 +04:00
if ( entry - > preempt_count )
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " %x " , entry - > preempt_count ) ;
2009-09-11 21:55:35 +04:00
else
2014-11-12 18:29:54 +03:00
trace_seq_putc ( s , ' . ' ) ;
2009-09-27 15:02:07 +04:00
2014-11-12 18:29:54 +03:00
return ! trace_seq_has_overflowed ( s ) ;
2009-02-03 01:29:21 +03:00
}
2009-09-11 22:24:13 +04:00
static int
lat_print_generic ( struct trace_seq * s , struct trace_entry * entry , int cpu )
{
char comm [ TASK_COMM_LEN ] ;
trace_find_cmdline ( entry - > pid , comm ) ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " %8.8s-%-5d %3d " ,
comm , entry - > pid , cpu ) ;
2009-09-11 22:24:13 +04:00
return trace_print_lat_fmt ( s , entry ) ;
}
2014-11-24 03:34:19 +03:00
# undef MARK
# define MARK(v, s) {.val = v, .sym = s}
/* trace overhead mark */
static const struct trace_mark {
unsigned long long val ; /* unit: nsec */
char sym ;
} mark [ ] = {
MARK ( 1000000000ULL , ' $ ' ) , /* 1 sec */
MARK ( 1000000ULL , ' # ' ) , /* 1000 usecs */
MARK ( 100000ULL , ' ! ' ) , /* 100 usecs */
MARK ( 10000ULL , ' + ' ) , /* 10 usecs */
} ;
# undef MARK
char trace_find_mark ( unsigned long long d )
{
int i ;
int size = ARRAY_SIZE ( mark ) ;
for ( i = 0 ; i < size ; i + + ) {
if ( d > = mark [ i ] . val )
break ;
}
return ( i = = size ) ? ' ' : mark [ i ] . sym ;
}
2009-02-03 01:29:21 +03:00
2009-02-04 01:20:41 +03:00
static int
2012-11-14 00:18:22 +04:00
lat_print_timestamp ( struct trace_iterator * iter , u64 next_ts )
2009-02-03 01:29:21 +03:00
{
2012-11-14 00:18:22 +04:00
unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE ;
unsigned long in_ns = iter - > iter_flags & TRACE_FILE_TIME_IN_NS ;
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 18:24:35 +04:00
unsigned long long abs_ts = iter - > ts - iter - > trace_buffer - > time_start ;
2012-11-14 00:18:22 +04:00
unsigned long long rel_ts = next_ts - iter - > ts ;
struct trace_seq * s = & iter - > seq ;
if ( in_ns ) {
abs_ts = ns2usecs ( abs_ts ) ;
rel_ts = ns2usecs ( rel_ts ) ;
}
if ( verbose & & in_ns ) {
unsigned long abs_usec = do_div ( abs_ts , USEC_PER_MSEC ) ;
unsigned long abs_msec = ( unsigned long ) abs_ts ;
unsigned long rel_usec = do_div ( rel_ts , USEC_PER_MSEC ) ;
unsigned long rel_msec = ( unsigned long ) rel_ts ;
2014-11-12 18:29:54 +03:00
trace_seq_printf (
s , " [%08llx] %ld.%03ldms (+%ld.%03ldms): " ,
ns2usecs ( iter - > ts ) ,
abs_msec , abs_usec ,
rel_msec , rel_usec ) ;
2012-11-14 00:18:22 +04:00
} else if ( verbose & & ! in_ns ) {
2014-11-12 18:29:54 +03:00
trace_seq_printf (
s , " [%016llx] %lld (+%lld): " ,
iter - > ts , abs_ts , rel_ts ) ;
2012-11-14 00:18:22 +04:00
} else if ( ! verbose & & in_ns ) {
2014-11-12 18:29:54 +03:00
trace_seq_printf (
s , " %4lldus%c: " ,
abs_ts ,
2014-11-24 03:34:19 +03:00
trace_find_mark ( rel_ts * NSEC_PER_USEC ) ) ;
2014-11-12 18:29:54 +03:00
2012-11-14 00:18:22 +04:00
} else { /* !verbose && !in_ns */
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " %4lld: " , abs_ts ) ;
2012-11-14 00:18:22 +04:00
}
2014-11-12 18:29:54 +03:00
return ! trace_seq_has_overflowed ( s ) ;
2009-02-03 01:29:21 +03:00
}
int trace_print_context ( struct trace_iterator * iter )
{
struct trace_seq * s = & iter - > seq ;
struct trace_entry * entry = iter - > ent ;
2012-11-14 00:18:22 +04:00
unsigned long long t ;
unsigned long secs , usec_rem ;
2009-03-17 02:20:15 +03:00
char comm [ TASK_COMM_LEN ] ;
trace_find_cmdline ( entry - > pid , comm ) ;
2009-02-03 01:29:21 +03:00
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " %16s-%-5d [%03d] " ,
tracing: Add irq, preempt-count and need resched info to default trace output
People keep asking how to get the preempt count, irq, and need resched info
and we keep telling them to enable the latency format. Some developers think
that traces without this info is completely useless, and for a lot of tasks
it is useless.
The first option was to enable the latency trace as the default format, but
the header for the latency format is pretty useless for most tracers and
it also does the timestamp in straight microseconds from the time the trace
started. This is sometimes more difficult to read as the default trace is
seconds from the start of boot up.
Latency format:
# tracer: nop
#
# nop latency trace v1.1.5 on 3.2.0-rc1-test+
# --------------------------------------------------------------------
# latency: 0 us, #159771/64234230, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: -0 (uid:0 nice:0 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
migratio-6 0...2 41778231us+: rcu_note_context_switch <-__schedule
migratio-6 0...2 41778233us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778235us+: rcu_sched_qs <-rcu_note_context_switch
migratio-6 0d..2 41778236us+: rcu_preempt_qs <-rcu_note_context_switch
migratio-6 0...2 41778238us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778239us+: debug_lockdep_rcu_enabled <-__schedule
default format:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
migration/0-6 [000] 50.025810: rcu_note_context_switch <-__schedule
migration/0-6 [000] 50.025812: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025813: rcu_sched_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025815: rcu_preempt_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025817: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025818: debug_lockdep_rcu_enabled <-__schedule
migration/0-6 [000] 50.025820: debug_lockdep_rcu_enabled <-__schedule
The latency format header has latency information that is pretty meaningless
for most tracers. Although some of the header is useful, and we can add that
later to the default format as well.
What is really useful with the latency format is the irqs-off, need-resched
hard/softirq context and the preempt count.
This commit adds the option irq-info which is on by default that adds this
information:
# tracer: nop
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [000] d..2 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] d..2 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] d..2 49.309309: need_resched <-mwait_idle
<idle>-0 [000] d..2 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] d..2 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] d..2 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] d..2 49.309315: need_resched <-mwait_idle
If a user wants the old format, they can disable the 'irq-info' option:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
<idle>-0 [000] 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] 49.309309: need_resched <-mwait_idle
<idle>-0 [000] 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] 49.309315: need_resched <-mwait_idle
Requested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-17 18:34:33 +04:00
comm , entry - > pid , iter - > cpu ) ;
2014-11-12 18:29:54 +03:00
if ( trace_flags & TRACE_ITER_IRQ_INFO )
trace_print_lat_fmt ( s , entry ) ;
tracing: Add irq, preempt-count and need resched info to default trace output
People keep asking how to get the preempt count, irq, and need resched info
and we keep telling them to enable the latency format. Some developers think
that traces without this info is completely useless, and for a lot of tasks
it is useless.
The first option was to enable the latency trace as the default format, but
the header for the latency format is pretty useless for most tracers and
it also does the timestamp in straight microseconds from the time the trace
started. This is sometimes more difficult to read as the default trace is
seconds from the start of boot up.
Latency format:
# tracer: nop
#
# nop latency trace v1.1.5 on 3.2.0-rc1-test+
# --------------------------------------------------------------------
# latency: 0 us, #159771/64234230, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
# -----------------
# | task: -0 (uid:0 nice:0 policy:0 rt_prio:0)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
migratio-6 0...2 41778231us+: rcu_note_context_switch <-__schedule
migratio-6 0...2 41778233us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778235us+: rcu_sched_qs <-rcu_note_context_switch
migratio-6 0d..2 41778236us+: rcu_preempt_qs <-rcu_note_context_switch
migratio-6 0...2 41778238us : trace_rcu_utilization <-rcu_note_context_switch
migratio-6 0...2 41778239us+: debug_lockdep_rcu_enabled <-__schedule
default format:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
migration/0-6 [000] 50.025810: rcu_note_context_switch <-__schedule
migration/0-6 [000] 50.025812: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025813: rcu_sched_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025815: rcu_preempt_qs <-rcu_note_context_switch
migration/0-6 [000] 50.025817: trace_rcu_utilization <-rcu_note_context_switch
migration/0-6 [000] 50.025818: debug_lockdep_rcu_enabled <-__schedule
migration/0-6 [000] 50.025820: debug_lockdep_rcu_enabled <-__schedule
The latency format header has latency information that is pretty meaningless
for most tracers. Although some of the header is useful, and we can add that
later to the default format as well.
What is really useful with the latency format is the irqs-off, need-resched
hard/softirq context and the preempt count.
This commit adds the option irq-info which is on by default that adds this
information:
# tracer: nop
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [000] d..2 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] d..2 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] d..2 49.309309: need_resched <-mwait_idle
<idle>-0 [000] d..2 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] d..2 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] d..2 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] d..2 49.309315: need_resched <-mwait_idle
If a user wants the old format, they can disable the 'irq-info' option:
# tracer: nop
#
# TASK-PID CPU# TIMESTAMP FUNCTION
# | | | | |
<idle>-0 [000] 49.309305: cpuidle_get_driver <-cpuidle_idle_call
<idle>-0 [000] 49.309307: mwait_idle <-cpu_idle
<idle>-0 [000] 49.309309: need_resched <-mwait_idle
<idle>-0 [000] 49.309310: test_ti_thread_flag <-need_resched
<idle>-0 [000] 49.309312: trace_power_start.constprop.13 <-mwait_idle
<idle>-0 [000] 49.309313: trace_cpu_idle <-mwait_idle
<idle>-0 [000] 49.309315: need_resched <-mwait_idle
Requested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-17 18:34:33 +04:00
2012-11-14 00:18:22 +04:00
if ( iter - > iter_flags & TRACE_FILE_TIME_IN_NS ) {
t = ns2usecs ( iter - > ts ) ;
usec_rem = do_div ( t , USEC_PER_SEC ) ;
secs = ( unsigned long ) t ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " %5lu.%06lu: " , secs , usec_rem ) ;
2012-11-14 00:18:22 +04:00
} else
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " %12llu: " , iter - > ts ) ;
return ! trace_seq_has_overflowed ( s ) ;
2009-02-03 01:29:21 +03:00
}
int trace_print_lat_context ( struct trace_iterator * iter )
{
u64 next_ts ;
2012-04-19 18:31:47 +04:00
/* trace_find_next_entry will reset ent_size */
int ent_size = iter - > ent_size ;
2009-02-03 01:29:21 +03:00
struct trace_seq * s = & iter - > seq ;
struct trace_entry * entry = iter - > ent ,
* next_entry = trace_find_next_entry ( iter , NULL ,
& next_ts ) ;
unsigned long verbose = ( trace_flags & TRACE_ITER_VERBOSE ) ;
2012-04-19 18:31:47 +04:00
/* Restore the original ent_size */
iter - > ent_size = ent_size ;
2009-02-03 01:29:21 +03:00
if ( ! next_entry )
next_ts = iter - > ts ;
if ( verbose ) {
2009-03-17 02:20:15 +03:00
char comm [ TASK_COMM_LEN ] ;
trace_find_cmdline ( entry - > pid , comm ) ;
2014-11-12 18:29:54 +03:00
trace_seq_printf (
s , " %16s %5d %3d %d %08x %08lx " ,
comm , entry - > pid , iter - > cpu , entry - > flags ,
entry - > preempt_count , iter - > idx ) ;
2009-02-03 01:29:21 +03:00
} else {
2014-11-12 18:29:54 +03:00
lat_print_generic ( s , entry , iter - > cpu ) ;
2009-02-03 01:29:21 +03:00
}
2014-11-12 18:29:54 +03:00
lat_print_timestamp ( iter , next_ts ) ;
2012-11-14 00:18:22 +04:00
2014-11-12 18:29:54 +03:00
return ! trace_seq_has_overflowed ( s ) ;
2009-02-03 01:29:21 +03:00
}
2008-12-24 07:24:13 +03:00
static const char state_to_char [ ] = TASK_STATE_TO_CHAR_STR ;
static int task_state_char ( unsigned long state )
{
int bit = state ? __ffs ( state ) + 1 : 0 ;
return bit < sizeof ( state_to_char ) - 1 ? state_to_char [ bit ] : ' ? ' ;
}
2008-12-24 07:24:12 +03:00
/**
* ftrace_find_event - find a registered event
* @ type : the type of event to look for
*
* Returns an event of type @ type otherwise NULL
2009-05-18 15:35:34 +04:00
* Called with trace_event_read_lock ( ) held .
2008-12-24 07:24:12 +03:00
*/
struct trace_event * ftrace_find_event ( int type )
{
struct trace_event * event ;
unsigned key ;
key = type & ( EVENT_HASHSIZE - 1 ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
hlist_for_each_entry ( event , & event_hash [ key ] , node ) {
2008-12-24 07:24:12 +03:00
if ( event - > type = = type )
return event ;
}
return NULL ;
}
2009-04-24 20:20:52 +04:00
static LIST_HEAD ( ftrace_event_list ) ;
static int trace_search_list ( struct list_head * * list )
{
struct trace_event * e ;
int last = __TRACE_LAST_TYPE ;
if ( list_empty ( & ftrace_event_list ) ) {
* list = & ftrace_event_list ;
return last + 1 ;
}
/*
* We used up all possible max events ,
* lets see if somebody freed one .
*/
list_for_each_entry ( e , & ftrace_event_list , list ) {
if ( e - > type ! = last + 1 )
break ;
last + + ;
}
/* Did we used up all 65 thousand events??? */
if ( ( last + 1 ) > FTRACE_MAX_EVENT )
return 0 ;
* list = & e - > list ;
return last + 1 ;
}
2009-05-18 15:35:34 +04:00
void trace_event_read_lock ( void )
{
2013-03-11 11:14:03 +04:00
down_read ( & trace_event_sem ) ;
2009-05-18 15:35:34 +04:00
}
void trace_event_read_unlock ( void )
{
2013-03-11 11:14:03 +04:00
up_read ( & trace_event_sem ) ;
2009-05-18 15:35:34 +04:00
}
2008-12-24 07:24:12 +03:00
/**
2015-05-05 16:39:12 +03:00
* register_trace_event - register output for an event type
2008-12-24 07:24:12 +03:00
* @ event : the event type to register
*
* Event types are stored in a hash and this hash is used to
* find a way to print an event . If the @ event - > type is set
* then it will use that type , otherwise it will assign a
* type to use .
*
* If you assign your own type , please make sure it is added
* to the trace_type enum in trace . h , to avoid collisions
* with the dynamic types .
*
* Returns the event type number or zero on error .
*/
2015-05-05 16:39:12 +03:00
int register_trace_event ( struct trace_event * event )
2008-12-24 07:24:12 +03:00
{
unsigned key ;
int ret = 0 ;
2013-03-11 11:14:03 +04:00
down_write ( & trace_event_sem ) ;
2008-12-24 07:24:12 +03:00
2009-04-24 20:20:52 +04:00
if ( WARN_ON ( ! event ) )
2009-03-19 22:26:14 +03:00
goto out ;
2010-04-23 02:46:14 +04:00
if ( WARN_ON ( ! event - > funcs ) )
goto out ;
2009-04-24 20:20:52 +04:00
INIT_LIST_HEAD ( & event - > list ) ;
if ( ! event - > type ) {
2009-05-06 14:15:45 +04:00
struct list_head * list = NULL ;
2009-04-24 20:20:52 +04:00
if ( next_event_type > FTRACE_MAX_EVENT ) {
event - > type = trace_search_list ( & list ) ;
if ( ! event - > type )
goto out ;
} else {
2014-11-24 03:34:19 +03:00
2009-04-24 20:20:52 +04:00
event - > type = next_event_type + + ;
list = & ftrace_event_list ;
}
if ( WARN_ON ( ftrace_find_event ( event - > type ) ) )
goto out ;
list_add_tail ( & event - > list , list ) ;
} else if ( event - > type > __TRACE_LAST_TYPE ) {
2008-12-24 07:24:12 +03:00
printk ( KERN_WARNING " Need to add type to trace.h \n " ) ;
WARN_ON ( 1 ) ;
goto out ;
2009-04-24 20:20:52 +04:00
} else {
/* Is this event already used */
if ( ftrace_find_event ( event - > type ) )
goto out ;
}
2008-12-24 07:24:12 +03:00
2010-04-23 02:46:14 +04:00
if ( event - > funcs - > trace = = NULL )
event - > funcs - > trace = trace_nop_print ;
if ( event - > funcs - > raw = = NULL )
event - > funcs - > raw = trace_nop_print ;
if ( event - > funcs - > hex = = NULL )
event - > funcs - > hex = trace_nop_print ;
if ( event - > funcs - > binary = = NULL )
event - > funcs - > binary = trace_nop_print ;
2009-02-05 01:16:39 +03:00
2008-12-24 07:24:12 +03:00
key = event - > type & ( EVENT_HASHSIZE - 1 ) ;
2009-05-18 15:35:34 +04:00
hlist_add_head ( & event - > node , & event_hash [ key ] ) ;
2008-12-24 07:24:12 +03:00
ret = event - > type ;
out :
2013-03-11 11:14:03 +04:00
up_write ( & trace_event_sem ) ;
2008-12-24 07:24:12 +03:00
return ret ;
}
2015-05-05 16:39:12 +03:00
EXPORT_SYMBOL_GPL ( register_trace_event ) ;
2008-12-24 07:24:12 +03:00
2009-06-10 01:29:07 +04:00
/*
2013-03-11 11:14:03 +04:00
* Used by module code with the trace_event_sem held for write .
2009-06-10 01:29:07 +04:00
*/
2015-05-05 16:39:12 +03:00
int __unregister_trace_event ( struct trace_event * event )
2009-06-10 01:29:07 +04:00
{
hlist_del ( & event - > node ) ;
list_del ( & event - > list ) ;
return 0 ;
}
2008-12-24 07:24:12 +03:00
/**
2015-05-05 16:39:12 +03:00
* unregister_trace_event - remove a no longer used event
2008-12-24 07:24:12 +03:00
* @ event : the event to remove
*/
2015-05-05 16:39:12 +03:00
int unregister_trace_event ( struct trace_event * event )
2008-12-24 07:24:12 +03:00
{
2013-03-11 11:14:03 +04:00
down_write ( & trace_event_sem ) ;
2015-05-05 16:39:12 +03:00
__unregister_trace_event ( event ) ;
2013-03-11 11:14:03 +04:00
up_write ( & trace_event_sem ) ;
2008-12-24 07:24:12 +03:00
return 0 ;
}
2015-05-05 16:39:12 +03:00
EXPORT_SYMBOL_GPL ( unregister_trace_event ) ;
2008-12-24 07:24:13 +03:00
/*
* Standard events
*/
2010-04-23 02:46:14 +04:00
enum print_line_t trace_nop_print ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
2014-11-12 18:29:54 +03:00
trace_seq_printf ( & iter - > seq , " type: %d \n " , iter - > ent - > type ) ;
2011-03-25 14:05:18 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( & iter - > seq ) ;
2008-12-24 07:24:13 +03:00
}
/* TRACE_FN */
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_fn_trace ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
struct ftrace_entry * field ;
2009-02-03 01:30:12 +03:00
struct trace_seq * s = & iter - > seq ;
2008-12-24 07:24:13 +03:00
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
seq_print_ip_sym ( s , field - > ip , flags ) ;
2008-12-24 07:24:13 +03:00
if ( ( flags & TRACE_ITER_PRINT_PARENT ) & & field - > parent_ip ) {
2014-11-12 18:29:54 +03:00
trace_seq_puts ( s , " <- " ) ;
seq_print_ip_sym ( s , field - > parent_ip , flags ) ;
2008-12-24 07:24:13 +03:00
}
2014-11-12 18:29:54 +03:00
trace_seq_putc ( s , ' \n ' ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_fn_raw ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
struct ftrace_entry * field ;
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
trace_seq_printf ( & iter - > seq , " %lx %lx \n " ,
field - > ip ,
field - > parent_ip ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( & iter - > seq ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_fn_hex ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
struct ftrace_entry * field ;
2009-02-03 01:30:12 +03:00
struct trace_seq * s = & iter - > seq ;
2008-12-24 07:24:13 +03:00
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
SEQ_PUT_HEX_FIELD ( s , field - > ip ) ;
SEQ_PUT_HEX_FIELD ( s , field - > parent_ip ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_fn_bin ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
struct ftrace_entry * field ;
2009-02-03 01:30:12 +03:00
struct trace_seq * s = & iter - > seq ;
2008-12-24 07:24:13 +03:00
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
SEQ_PUT_FIELD ( s , field - > ip ) ;
SEQ_PUT_FIELD ( s , field - > parent_ip ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static struct trace_event_functions trace_fn_funcs = {
2008-12-24 07:24:13 +03:00
. trace = trace_fn_trace ,
. raw = trace_fn_raw ,
. hex = trace_fn_hex ,
. binary = trace_fn_bin ,
} ;
2010-04-23 02:46:14 +04:00
static struct trace_event trace_fn_event = {
. type = TRACE_FN ,
. funcs = & trace_fn_funcs ,
} ;
2008-12-24 07:24:13 +03:00
/* TRACE_CTX an TRACE_WAKE */
2009-02-04 03:05:50 +03:00
static enum print_line_t trace_ctxwake_print ( struct trace_iterator * iter ,
char * delim )
2008-12-24 07:24:13 +03:00
{
struct ctx_switch_entry * field ;
2009-03-17 02:20:15 +03:00
char comm [ TASK_COMM_LEN ] ;
2008-12-24 07:24:13 +03:00
int S , T ;
2009-03-17 02:20:15 +03:00
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2008-12-24 07:24:13 +03:00
T = task_state_char ( field - > next_state ) ;
S = task_state_char ( field - > prev_state ) ;
2009-03-17 02:20:15 +03:00
trace_find_cmdline ( field - > next_pid , comm ) ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( & iter - > seq ,
" %5d:%3d:%c %s [%03d] %5d:%3d:%c %s \n " ,
field - > prev_pid ,
field - > prev_prio ,
S , delim ,
field - > next_cpu ,
field - > next_pid ,
field - > next_prio ,
T , comm ) ;
return trace_handle_return ( & iter - > seq ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_ctx_print ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
2009-02-03 01:30:12 +03:00
return trace_ctxwake_print ( iter , " ==> " ) ;
2008-12-24 07:24:13 +03:00
}
2009-02-04 03:05:50 +03:00
static enum print_line_t trace_wake_print ( struct trace_iterator * iter ,
2010-04-23 02:46:14 +04:00
int flags , struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
2009-02-03 01:30:12 +03:00
return trace_ctxwake_print ( iter , " + " ) ;
2008-12-24 07:24:13 +03:00
}
2009-02-03 01:30:12 +03:00
static int trace_ctxwake_raw ( struct trace_iterator * iter , char S )
2008-12-24 07:24:13 +03:00
{
struct ctx_switch_entry * field ;
int T ;
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2008-12-24 07:24:13 +03:00
if ( ! S )
2009-10-01 08:33:28 +04:00
S = task_state_char ( field - > prev_state ) ;
2008-12-24 07:24:13 +03:00
T = task_state_char ( field - > next_state ) ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( & iter - > seq , " %d %d %c %d %d %d %c \n " ,
field - > prev_pid ,
field - > prev_prio ,
S ,
field - > next_cpu ,
field - > next_pid ,
field - > next_prio ,
T ) ;
return trace_handle_return ( & iter - > seq ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_ctx_raw ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
2009-02-03 01:30:12 +03:00
return trace_ctxwake_raw ( iter , 0 ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_wake_raw ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
2009-02-03 01:30:12 +03:00
return trace_ctxwake_raw ( iter , ' + ' ) ;
2008-12-24 07:24:13 +03:00
}
2009-02-03 01:30:12 +03:00
static int trace_ctxwake_hex ( struct trace_iterator * iter , char S )
2008-12-24 07:24:13 +03:00
{
struct ctx_switch_entry * field ;
2009-02-03 01:30:12 +03:00
struct trace_seq * s = & iter - > seq ;
2008-12-24 07:24:13 +03:00
int T ;
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2008-12-24 07:24:13 +03:00
if ( ! S )
2009-10-01 08:33:28 +04:00
S = task_state_char ( field - > prev_state ) ;
2008-12-24 07:24:13 +03:00
T = task_state_char ( field - > next_state ) ;
2014-11-12 18:29:54 +03:00
SEQ_PUT_HEX_FIELD ( s , field - > prev_pid ) ;
SEQ_PUT_HEX_FIELD ( s , field - > prev_prio ) ;
SEQ_PUT_HEX_FIELD ( s , S ) ;
SEQ_PUT_HEX_FIELD ( s , field - > next_cpu ) ;
SEQ_PUT_HEX_FIELD ( s , field - > next_pid ) ;
SEQ_PUT_HEX_FIELD ( s , field - > next_prio ) ;
SEQ_PUT_HEX_FIELD ( s , T ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_ctx_hex ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
2009-02-03 01:30:12 +03:00
return trace_ctxwake_hex ( iter , 0 ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_wake_hex ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
2009-02-03 01:30:12 +03:00
return trace_ctxwake_hex ( iter , ' + ' ) ;
2008-12-24 07:24:13 +03:00
}
2009-02-04 03:05:50 +03:00
static enum print_line_t trace_ctxwake_bin ( struct trace_iterator * iter ,
2010-04-23 02:46:14 +04:00
int flags , struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
struct ctx_switch_entry * field ;
2009-02-03 01:30:12 +03:00
struct trace_seq * s = & iter - > seq ;
2008-12-24 07:24:13 +03:00
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
SEQ_PUT_FIELD ( s , field - > prev_pid ) ;
SEQ_PUT_FIELD ( s , field - > prev_prio ) ;
SEQ_PUT_FIELD ( s , field - > prev_state ) ;
SEQ_PUT_FIELD ( s , field - > next_cpu ) ;
SEQ_PUT_FIELD ( s , field - > next_pid ) ;
SEQ_PUT_FIELD ( s , field - > next_prio ) ;
SEQ_PUT_FIELD ( s , field - > next_state ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static struct trace_event_functions trace_ctx_funcs = {
2008-12-24 07:24:13 +03:00
. trace = trace_ctx_print ,
. raw = trace_ctx_raw ,
. hex = trace_ctx_hex ,
. binary = trace_ctxwake_bin ,
} ;
2010-04-23 02:46:14 +04:00
static struct trace_event trace_ctx_event = {
. type = TRACE_CTX ,
. funcs = & trace_ctx_funcs ,
} ;
static struct trace_event_functions trace_wake_funcs = {
2008-12-24 07:24:13 +03:00
. trace = trace_wake_print ,
. raw = trace_wake_raw ,
. hex = trace_wake_hex ,
. binary = trace_ctxwake_bin ,
} ;
2010-04-23 02:46:14 +04:00
static struct trace_event trace_wake_event = {
. type = TRACE_WAKE ,
. funcs = & trace_wake_funcs ,
} ;
2008-12-24 07:24:13 +03:00
/* TRACE_STACK */
2009-02-04 03:05:50 +03:00
static enum print_line_t trace_stack_print ( struct trace_iterator * iter ,
2010-04-23 02:46:14 +04:00
int flags , struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
struct stack_entry * field ;
2009-02-03 01:30:12 +03:00
struct trace_seq * s = & iter - > seq ;
2011-07-15 00:36:53 +04:00
unsigned long * p ;
unsigned long * end ;
2008-12-24 07:24:13 +03:00
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2011-07-15 00:36:53 +04:00
end = ( unsigned long * ) ( ( long ) iter - > ent + iter - > ent_size ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
trace_seq_puts ( s , " <stack trace> \n " ) ;
2011-07-15 00:36:53 +04:00
for ( p = field - > caller ; p & & * p ! = ULONG_MAX & & p < end ; p + + ) {
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
if ( trace_seq_has_overflowed ( s ) )
break ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
trace_seq_puts ( s , " => " ) ;
seq_print_ip_sym ( s , * p , flags ) ;
trace_seq_putc ( s , ' \n ' ) ;
}
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static struct trace_event_functions trace_stack_funcs = {
2008-12-24 07:24:13 +03:00
. trace = trace_stack_print ,
} ;
2010-04-23 02:46:14 +04:00
static struct trace_event trace_stack_event = {
. type = TRACE_STACK ,
. funcs = & trace_stack_funcs ,
} ;
2008-12-24 07:24:13 +03:00
/* TRACE_USER_STACK */
2009-02-04 03:05:50 +03:00
static enum print_line_t trace_user_stack_print ( struct trace_iterator * iter ,
2010-04-23 02:46:14 +04:00
int flags , struct trace_event * event )
2008-12-24 07:24:13 +03:00
{
struct userstack_entry * field ;
2009-02-03 01:30:12 +03:00
struct trace_seq * s = & iter - > seq ;
2008-12-24 07:24:13 +03:00
2009-02-03 01:30:12 +03:00
trace_assign_type ( field , iter - > ent ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
trace_seq_puts ( s , " <user stack trace> \n " ) ;
seq_print_userip_objs ( field , s , flags ) ;
2008-12-24 07:24:13 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2008-12-24 07:24:13 +03:00
}
2010-04-23 02:46:14 +04:00
static struct trace_event_functions trace_user_stack_funcs = {
2008-12-24 07:24:13 +03:00
. trace = trace_user_stack_print ,
} ;
2010-04-23 02:46:14 +04:00
static struct trace_event trace_user_stack_event = {
. type = TRACE_USER_STACK ,
. funcs = & trace_user_stack_funcs ,
} ;
2013-03-09 06:02:34 +04:00
/* TRACE_BPUTS */
static enum print_line_t
trace_bputs_print ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
{
struct trace_entry * entry = iter - > ent ;
struct trace_seq * s = & iter - > seq ;
struct bputs_entry * field ;
trace_assign_type ( field , entry ) ;
2014-11-12 18:29:54 +03:00
seq_print_ip_sym ( s , field - > ip , flags ) ;
trace_seq_puts ( s , " : " ) ;
trace_seq_puts ( s , field - > str ) ;
2013-03-09 06:02:34 +04:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2013-03-09 06:02:34 +04:00
}
static enum print_line_t
trace_bputs_raw ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
{
struct bputs_entry * field ;
struct trace_seq * s = & iter - > seq ;
trace_assign_type ( field , iter - > ent ) ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " : %lx : " , field - > ip ) ;
trace_seq_puts ( s , field - > str ) ;
2013-03-09 06:02:34 +04:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2013-03-09 06:02:34 +04:00
}
static struct trace_event_functions trace_bputs_funcs = {
. trace = trace_bputs_print ,
. raw = trace_bputs_raw ,
} ;
static struct trace_event trace_bputs_event = {
. type = TRACE_BPUTS ,
. funcs = & trace_bputs_funcs ,
} ;
2009-03-12 20:24:49 +03:00
/* TRACE_BPRINT */
2009-03-06 19:21:47 +03:00
static enum print_line_t
2010-04-23 02:46:14 +04:00
trace_bprint_print ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2009-03-06 19:21:47 +03:00
{
struct trace_entry * entry = iter - > ent ;
struct trace_seq * s = & iter - > seq ;
2009-03-12 20:24:49 +03:00
struct bprint_entry * field ;
2009-03-06 19:21:47 +03:00
trace_assign_type ( field , entry ) ;
2014-11-12 18:29:54 +03:00
seq_print_ip_sym ( s , field - > ip , flags ) ;
trace_seq_puts ( s , " : " ) ;
trace_seq_bprintf ( s , field - > fmt , field - > buf ) ;
2009-03-06 19:21:47 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2009-03-06 19:21:47 +03:00
}
2009-03-06 19:21:49 +03:00
2009-03-12 20:24:49 +03:00
static enum print_line_t
2010-04-23 02:46:14 +04:00
trace_bprint_raw ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2009-03-06 19:21:47 +03:00
{
2009-03-12 20:24:49 +03:00
struct bprint_entry * field ;
2009-03-06 19:21:47 +03:00
struct trace_seq * s = & iter - > seq ;
2009-03-06 19:21:49 +03:00
trace_assign_type ( field , iter - > ent ) ;
2009-03-06 19:21:47 +03:00
2014-11-12 18:29:54 +03:00
trace_seq_printf ( s , " : %lx : " , field - > ip ) ;
trace_seq_bprintf ( s , field - > fmt , field - > buf ) ;
2009-03-06 19:21:47 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2009-03-06 19:21:47 +03:00
}
2010-04-23 02:46:14 +04:00
static struct trace_event_functions trace_bprint_funcs = {
. trace = trace_bprint_print ,
. raw = trace_bprint_raw ,
} ;
2009-03-06 19:21:49 +03:00
2009-03-12 20:24:49 +03:00
static struct trace_event trace_bprint_event = {
. type = TRACE_BPRINT ,
2010-04-23 02:46:14 +04:00
. funcs = & trace_bprint_funcs ,
2009-03-12 20:24:49 +03:00
} ;
/* TRACE_PRINT */
static enum print_line_t trace_print_print ( struct trace_iterator * iter ,
2010-04-23 02:46:14 +04:00
int flags , struct trace_event * event )
2009-03-12 20:24:49 +03:00
{
struct print_entry * field ;
struct trace_seq * s = & iter - > seq ;
trace_assign_type ( field , iter - > ent ) ;
2014-11-12 18:29:54 +03:00
seq_print_ip_sym ( s , field - > ip , flags ) ;
trace_seq_printf ( s , " : %s " , field - > buf ) ;
2009-03-12 20:24:49 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( s ) ;
2009-03-12 20:24:49 +03:00
}
2010-04-23 02:46:14 +04:00
static enum print_line_t trace_print_raw ( struct trace_iterator * iter , int flags ,
struct trace_event * event )
2009-03-12 20:24:49 +03:00
{
struct print_entry * field ;
trace_assign_type ( field , iter - > ent ) ;
2014-11-12 18:29:54 +03:00
trace_seq_printf ( & iter - > seq , " # %lx %s " , field - > ip , field - > buf ) ;
2009-03-12 20:24:49 +03:00
2014-11-12 18:29:54 +03:00
return trace_handle_return ( & iter - > seq ) ;
2009-03-12 20:24:49 +03:00
}
2010-04-23 02:46:14 +04:00
static struct trace_event_functions trace_print_funcs = {
2009-03-06 19:21:49 +03:00
. trace = trace_print_print ,
. raw = trace_print_raw ,
2009-03-06 19:21:47 +03:00
} ;
2010-04-23 02:46:14 +04:00
static struct trace_event trace_print_event = {
. type = TRACE_PRINT ,
. funcs = & trace_print_funcs ,
} ;
2009-03-12 20:24:49 +03:00
2008-12-24 07:24:13 +03:00
static struct trace_event * events [ ] __initdata = {
& trace_fn_event ,
& trace_ctx_event ,
& trace_wake_event ,
& trace_stack_event ,
& trace_user_stack_event ,
2013-03-09 06:02:34 +04:00
& trace_bputs_event ,
2009-03-12 20:24:49 +03:00
& trace_bprint_event ,
2008-12-24 07:24:13 +03:00
& trace_print_event ,
NULL
} ;
__init static int init_events ( void )
{
struct trace_event * event ;
int i , ret ;
for ( i = 0 ; events [ i ] ; i + + ) {
event = events [ i ] ;
2015-05-05 16:39:12 +03:00
ret = register_trace_event ( event ) ;
2008-12-24 07:24:13 +03:00
if ( ! ret ) {
printk ( KERN_WARNING " event %d failed to register \n " ,
event - > type ) ;
WARN_ON_ONCE ( 1 ) ;
}
}
return 0 ;
}
2012-06-01 05:40:05 +04:00
early_initcall ( init_events ) ;