2015-04-29 20:11:00 +03:00
/*
* Stage 4 of the trace events .
*
* Override the macros in < trace / trace_events . h > to include the following :
*
* For those macros defined with TRACE_EVENT :
*
2015-05-05 18:45:27 +03:00
* static struct trace_event_call event_ < call > ;
2015-04-29 20:11:00 +03:00
*
2015-05-13 22:27:47 +03:00
* static void trace_event_raw_event_ < call > ( void * __data , proto )
2015-04-29 20:11:00 +03:00
* {
2015-05-05 17:09:53 +03:00
* struct trace_event_file * trace_file = __data ;
2015-05-05 18:45:27 +03:00
* struct trace_event_call * event_call = trace_file - > event_call ;
2015-05-13 22:33:52 +03:00
* struct trace_event_data_offsets_ < call > __maybe_unused __data_offsets ;
2015-05-05 17:09:53 +03:00
* unsigned long eflags = trace_file - > flags ;
2015-04-29 20:11:00 +03:00
* enum event_trigger_type __tt = ETT_NONE ;
* struct ring_buffer_event * event ;
2015-05-13 22:27:47 +03:00
* struct trace_event_raw_ < call > * entry ; < - - defined in stage 1
2015-04-29 20:11:00 +03:00
* struct ring_buffer * buffer ;
* unsigned long irq_flags ;
* int __data_size ;
* int pc ;
*
2015-05-13 22:12:33 +03:00
* if ( ! ( eflags & EVENT_FILE_FL_TRIGGER_COND ) ) {
* if ( eflags & EVENT_FILE_FL_TRIGGER_MODE )
2015-05-05 17:09:53 +03:00
* event_triggers_call ( trace_file , NULL ) ;
2015-05-13 22:12:33 +03:00
* if ( eflags & EVENT_FILE_FL_SOFT_DISABLED )
2015-04-29 20:11:00 +03:00
* return ;
* }
*
* local_save_flags ( irq_flags ) ;
* pc = preempt_count ( ) ;
*
* __data_size = ftrace_get_offsets_ < call > ( & __data_offsets , args ) ;
*
2015-05-05 17:09:53 +03:00
* event = trace_event_buffer_lock_reserve ( & buffer , trace_file ,
2015-04-29 20:11:00 +03:00
* event_ < call > - > event . type ,
* sizeof ( * entry ) + __data_size ,
* irq_flags , pc ) ;
* if ( ! event )
* return ;
* entry = ring_buffer_event_data ( event ) ;
*
* { < assign > ; } < - - Here we assign the entries by the __field and
* __array macros .
*
2015-05-13 22:12:33 +03:00
* if ( eflags & EVENT_FILE_FL_TRIGGER_COND )
2015-05-05 17:09:53 +03:00
* __tt = event_triggers_call ( trace_file , entry ) ;
2015-04-29 20:11:00 +03:00
*
2015-05-13 22:12:33 +03:00
* if ( test_bit ( EVENT_FILE_FL_SOFT_DISABLED_BIT ,
2015-05-05 17:09:53 +03:00
* & trace_file - > flags ) )
2015-04-29 20:11:00 +03:00
* ring_buffer_discard_commit ( buffer , event ) ;
2015-05-05 17:09:53 +03:00
* else if ( ! filter_check_discard ( trace_file , entry , buffer , event ) )
2015-04-29 20:11:00 +03:00
* trace_buffer_unlock_commit ( buffer , event , irq_flags , pc ) ;
*
* if ( __tt )
2015-05-05 17:09:53 +03:00
* event_triggers_post_call ( trace_file , __tt ) ;
2015-04-29 20:11:00 +03:00
* }
*
* static struct trace_event ftrace_event_type_ < call > = {
2015-05-05 21:18:11 +03:00
* . trace = trace_raw_output_ < call > , < - - stage 2
2015-04-29 20:11:00 +03:00
* } ;
*
* static char print_fmt_ < call > [ ] = < TP_printk > ;
*
2015-05-05 18:45:27 +03:00
* static struct trace_event_class __used event_class_ < template > = {
2015-04-29 20:11:00 +03:00
* . system = " <system> " ,
* . define_fields = ftrace_define_fields_ < call > ,
* . fields = LIST_HEAD_INIT ( event_class_ # # call . fields ) ,
* . raw_init = trace_event_raw_init ,
2015-05-13 22:27:47 +03:00
* . probe = trace_event_raw_event_ # # call ,
2015-05-05 16:39:12 +03:00
* . reg = trace_event_reg ,
2015-04-29 20:11:00 +03:00
* } ;
*
2015-05-05 18:45:27 +03:00
* static struct trace_event_call event_ < call > = {
2015-04-29 20:11:00 +03:00
* . class = event_class_ < template > ,
* {
* . tp = & __tracepoint_ < call > ,
* } ,
* . event = & ftrace_event_type_ < call > ,
* . print_fmt = print_fmt_ < call > ,
* . flags = TRACE_EVENT_FL_TRACEPOINT ,
* } ;
* // its only safe to use pointers when doing linker tricks to
* // create an array.
2015-05-05 18:45:27 +03:00
* static struct trace_event_call __used
2015-04-29 20:11:00 +03:00
* __attribute__ ( ( section ( " _ftrace_events " ) ) ) * __event_ < call > = & event_ < call > ;
*
*/
# ifdef CONFIG_PERF_EVENTS
# define _TRACE_PERF_PROTO(call, proto) \
static notrace void \
perf_trace_ # # call ( void * __data , proto ) ;
# define _TRACE_PERF_INIT(call) \
. perf_probe = perf_trace_ # # call ,
# else
# define _TRACE_PERF_PROTO(call, proto)
# define _TRACE_PERF_INIT(call)
# endif /* CONFIG_PERF_EVENTS */
# undef __entry
# define __entry entry
# undef __field
# define __field(type, item)
# undef __field_struct
# define __field_struct(type, item)
# undef __array
# define __array(type, item, len)
# undef __dynamic_array
# define __dynamic_array(type, item, len) \
__entry - > __data_loc_ # # item = __data_offsets . item ;
# undef __string
# define __string(item, src) __dynamic_array(char, item, -1)
# undef __assign_str
# define __assign_str(dst, src) \
strcpy ( __get_str ( dst ) , ( src ) ? ( const char * ) ( src ) : " (null) " ) ;
# undef __bitmask
# define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
# undef __get_bitmask
# define __get_bitmask(field) (char *)__get_dynamic_array(field)
# undef __assign_bitmask
# define __assign_bitmask(dst, src, nr_bits) \
memcpy ( __get_bitmask ( dst ) , ( src ) , __bitmask_size_in_bytes ( nr_bits ) )
# undef TP_fast_assign
# define TP_fast_assign(args...) args
# undef __perf_addr
# define __perf_addr(a) (a)
# undef __perf_count
# define __perf_count(c) (c)
# undef __perf_task
# define __perf_task(t) (t)
# undef DECLARE_EVENT_CLASS
# define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
\
static notrace void \
2015-05-13 22:27:47 +03:00
trace_event_raw_event_ # # call ( void * __data , proto ) \
2015-04-29 20:11:00 +03:00
{ \
2015-05-05 17:09:53 +03:00
struct trace_event_file * trace_file = __data ; \
2015-05-13 22:33:52 +03:00
struct trace_event_data_offsets_ # # call __maybe_unused __data_offsets ; \
2015-05-05 20:18:46 +03:00
struct trace_event_buffer fbuffer ; \
2015-05-13 22:27:47 +03:00
struct trace_event_raw_ # # call * entry ; \
2015-04-29 20:11:00 +03:00
int __data_size ; \
\
2015-05-13 22:21:25 +03:00
if ( trace_trigger_soft_disabled ( trace_file ) ) \
2015-04-29 20:11:00 +03:00
return ; \
\
__data_size = ftrace_get_offsets_ # # call ( & __data_offsets , args ) ; \
\
2015-05-05 20:18:46 +03:00
entry = trace_event_buffer_reserve ( & fbuffer , trace_file , \
2015-04-29 20:11:00 +03:00
sizeof ( * entry ) + __data_size ) ; \
\
if ( ! entry ) \
return ; \
\
tstruct \
\
{ assign ; } \
\
2015-05-05 20:18:46 +03:00
trace_event_buffer_commit ( & fbuffer ) ; \
2015-04-29 20:11:00 +03:00
}
/*
* The ftrace_test_probe is compiled out , it is only here as a build time check
* to make sure that if the tracepoint handling changes , the ftrace probe will
* fail to compile unless it too is updated .
*/
# undef DEFINE_EVENT
# define DEFINE_EVENT(template, call, proto, args) \
static inline void ftrace_test_probe_ # # call ( void ) \
{ \
2015-05-13 22:27:47 +03:00
check_trace_callback_type_ # # call ( trace_event_raw_event_ # # template ) ; \
2015-04-29 20:11:00 +03:00
}
# undef DEFINE_EVENT_PRINT
# define DEFINE_EVENT_PRINT(template, name, proto, args, print)
# include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
# undef __entry
# define __entry REC
# undef __print_flags
# undef __print_symbolic
# undef __print_hex
# undef __get_dynamic_array
# undef __get_dynamic_array_len
# undef __get_str
# undef __get_bitmask
# undef __print_array
# undef TP_printk
# define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
# undef DECLARE_EVENT_CLASS
# define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
_TRACE_PERF_PROTO ( call , PARAMS ( proto ) ) ; \
static char print_fmt_ # # call [ ] = print ; \
2015-05-05 18:45:27 +03:00
static struct trace_event_class __used __refdata event_class_ # # call = { \
2015-04-29 20:11:00 +03:00
. system = TRACE_SYSTEM_STRING , \
. define_fields = ftrace_define_fields_ # # call , \
. fields = LIST_HEAD_INIT ( event_class_ # # call . fields ) , \
. raw_init = trace_event_raw_init , \
2015-05-13 22:27:47 +03:00
. probe = trace_event_raw_event_ # # call , \
2015-05-05 16:39:12 +03:00
. reg = trace_event_reg , \
2015-04-29 20:11:00 +03:00
_TRACE_PERF_INIT ( call ) \
} ;
# undef DEFINE_EVENT
# define DEFINE_EVENT(template, call, proto, args) \
\
2015-05-05 18:45:27 +03:00
static struct trace_event_call __used event_ # # call = { \
2015-04-29 20:11:00 +03:00
. class = & event_class_ # # template , \
{ \
. tp = & __tracepoint_ # # call , \
} , \
2015-05-13 22:35:44 +03:00
. event . funcs = & trace_event_type_funcs_ # # template , \
2015-04-29 20:11:00 +03:00
. print_fmt = print_fmt_ # # template , \
. flags = TRACE_EVENT_FL_TRACEPOINT , \
} ; \
2015-05-05 18:45:27 +03:00
static struct trace_event_call __used \
2015-04-29 20:11:00 +03:00
__attribute__ ( ( section ( " _ftrace_events " ) ) ) * __event_ # # call = & event_ # # call
# undef DEFINE_EVENT_PRINT
# define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
\
static char print_fmt_ # # call [ ] = print ; \
\
2015-05-05 18:45:27 +03:00
static struct trace_event_call __used event_ # # call = { \
2015-04-29 20:11:00 +03:00
. class = & event_class_ # # template , \
{ \
. tp = & __tracepoint_ # # call , \
} , \
2015-05-13 22:35:44 +03:00
. event . funcs = & trace_event_type_funcs_ # # call , \
2015-04-29 20:11:00 +03:00
. print_fmt = print_fmt_ # # call , \
. flags = TRACE_EVENT_FL_TRACEPOINT , \
} ; \
2015-05-05 18:45:27 +03:00
static struct trace_event_call __used \
2015-04-29 20:11:00 +03:00
__attribute__ ( ( section ( " _ftrace_events " ) ) ) * __event_ # # call = & event_ # # call
# include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
# undef TRACE_SYSTEM_VAR
# ifdef CONFIG_PERF_EVENTS
# undef __entry
# define __entry entry
# undef __get_dynamic_array
# define __get_dynamic_array(field) \
( ( void * ) __entry + ( __entry - > __data_loc_ # # field & 0xffff ) )
# undef __get_dynamic_array_len
# define __get_dynamic_array_len(field) \
( ( __entry - > __data_loc_ # # field > > 16 ) & 0xffff )
# undef __get_str
# define __get_str(field) (char *)__get_dynamic_array(field)
# undef __get_bitmask
# define __get_bitmask(field) (char *)__get_dynamic_array(field)
# undef __perf_addr
# define __perf_addr(a) (__addr = (a))
# undef __perf_count
# define __perf_count(c) (__count = (c))
# undef __perf_task
# define __perf_task(t) (__task = (t))
# undef DECLARE_EVENT_CLASS
# define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
perf_trace_ # # call ( void * __data , proto ) \
{ \
2015-05-05 18:45:27 +03:00
struct trace_event_call * event_call = __data ; \
2015-05-13 22:33:52 +03:00
struct trace_event_data_offsets_ # # call __maybe_unused __data_offsets ; \
2015-05-13 22:27:47 +03:00
struct trace_event_raw_ # # call * entry ; \
2015-04-29 20:11:00 +03:00
struct pt_regs * __regs ; \
u64 __addr = 0 , __count = 1 ; \
struct task_struct * __task = NULL ; \
struct hlist_head * head ; \
int __entry_size ; \
int __data_size ; \
int rctx ; \
\
__data_size = ftrace_get_offsets_ # # call ( & __data_offsets , args ) ; \
\
head = this_cpu_ptr ( event_call - > perf_events ) ; \
if ( __builtin_constant_p ( ! __task ) & & ! __task & & \
hlist_empty ( head ) ) \
return ; \
\
__entry_size = ALIGN ( __data_size + sizeof ( * entry ) + sizeof ( u32 ) , \
sizeof ( u64 ) ) ; \
__entry_size - = sizeof ( u32 ) ; \
\
entry = perf_trace_buf_prepare ( __entry_size , \
event_call - > event . type , & __regs , & rctx ) ; \
if ( ! entry ) \
return ; \
\
perf_fetch_caller_regs ( __regs ) ; \
\
tstruct \
\
{ assign ; } \
\
perf_trace_buf_submit ( entry , __entry_size , rctx , __addr , \
__count , __regs , head , __task ) ; \
}
/*
* This part is compiled out , it is only here as a build time check
* to make sure that if the tracepoint handling changes , the
* perf probe will fail to compile unless it too is updated .
*/
# undef DEFINE_EVENT
# define DEFINE_EVENT(template, call, proto, args) \
static inline void perf_test_probe_ # # call ( void ) \
{ \
check_trace_callback_type_ # # call ( perf_trace_ # # template ) ; \
}
# undef DEFINE_EVENT_PRINT
# define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT ( template , name , PARAMS ( proto ) , PARAMS ( args ) )
# include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
# endif /* CONFIG_PERF_EVENTS */