2008-05-12 23:20:44 +04:00
/* Include in trace.c */
# include <linux/kthread.h>
2008-05-12 23:20:45 +04:00
# include <linux/delay.h>
2008-05-12 23:20:44 +04:00
2008-05-12 23:20:51 +04:00
static inline int trace_valid_entry ( struct trace_entry * entry )
2008-05-12 23:20:44 +04:00
{
switch ( entry - > type ) {
case TRACE_FN :
case TRACE_CTX :
2008-05-12 23:20:51 +04:00
case TRACE_WAKE :
2008-05-12 23:20:54 +04:00
case TRACE_STACK :
2008-08-01 20:26:41 +04:00
case TRACE_PRINT :
2008-05-12 23:20:54 +04:00
case TRACE_SPECIAL :
2008-11-12 23:24:24 +03:00
case TRACE_BRANCH :
2009-02-07 23:33:57 +03:00
case TRACE_GRAPH_ENT :
case TRACE_GRAPH_RET :
2008-05-12 23:20:44 +04:00
return 1 ;
}
return 0 ;
}
2008-09-30 07:02:41 +04:00
static int trace_test_buffer_cpu ( struct trace_array * tr , int cpu )
2008-05-12 23:20:44 +04:00
{
2008-09-30 07:02:41 +04:00
struct ring_buffer_event * event ;
struct trace_entry * entry ;
2008-05-12 23:20:44 +04:00
2008-09-30 07:02:41 +04:00
while ( ( event = ring_buffer_consume ( tr - > buffer , cpu , NULL ) ) ) {
entry = ring_buffer_event_data ( event ) ;
2008-05-12 23:20:44 +04:00
2008-09-30 07:02:41 +04:00
if ( ! trace_valid_entry ( entry ) ) {
2008-05-12 23:20:45 +04:00
printk ( KERN_CONT " .. invalid entry %d " ,
2008-09-30 07:02:41 +04:00
entry - > type ) ;
2008-05-12 23:20:44 +04:00
goto failed ;
}
}
return 0 ;
failed :
2008-05-12 23:20:45 +04:00
/* disable tracing */
tracing_disabled = 1 ;
2008-05-12 23:20:44 +04:00
printk ( KERN_CONT " .. corrupted trace buffer .. " ) ;
return - 1 ;
}
/*
* Test the trace buffer to see if all the elements
* are still sane .
*/
static int trace_test_buffer ( struct trace_array * tr , unsigned long * count )
{
2008-05-12 23:20:56 +04:00
unsigned long flags , cnt = 0 ;
int cpu , ret = 0 ;
2008-05-12 23:20:44 +04:00
2008-05-12 23:20:56 +04:00
/* Don't allow flipping of max traces now */
2008-11-15 23:48:29 +03:00
local_irq_save ( flags ) ;
2008-05-12 23:20:56 +04:00
__raw_spin_lock ( & ftrace_max_lock ) ;
2008-05-12 23:20:44 +04:00
2008-09-30 07:02:41 +04:00
cnt = ring_buffer_entries ( tr - > buffer ) ;
2008-05-12 23:20:44 +04:00
2008-09-30 07:02:41 +04:00
for_each_possible_cpu ( cpu ) {
ret = trace_test_buffer_cpu ( tr , cpu ) ;
2008-05-12 23:20:44 +04:00
if ( ret )
break ;
}
2008-05-12 23:20:56 +04:00
__raw_spin_unlock ( & ftrace_max_lock ) ;
2008-11-15 23:48:29 +03:00
local_irq_restore ( flags ) ;
2008-05-12 23:20:44 +04:00
if ( count )
* count = cnt ;
return ret ;
}
2008-11-16 07:57:26 +03:00
static inline void warn_failed_init_tracer ( struct tracer * trace , int init_ret )
{
printk ( KERN_WARNING " Failed to init %s tracer, init returned %d \n " ,
trace - > name , init_ret ) ;
}
2008-10-07 03:06:12 +04:00
# ifdef CONFIG_FUNCTION_TRACER
2008-05-12 23:20:45 +04:00
# ifdef CONFIG_DYNAMIC_FTRACE
# define __STR(x) #x
# define STR(x) __STR(x)
/* Test dynamic code modification and ftrace filters */
int trace_selftest_startup_dynamic_tracing ( struct tracer * trace ,
struct trace_array * tr ,
int ( * func ) ( void ) )
{
int save_ftrace_enabled = ftrace_enabled ;
int save_tracer_enabled = tracer_enabled ;
2008-08-01 20:26:41 +04:00
unsigned long count ;
2008-05-15 07:49:44 +04:00
char * func_name ;
2008-08-01 20:26:41 +04:00
int ret ;
2008-05-12 23:20:45 +04:00
/* The ftrace test PASSED */
printk ( KERN_CONT " PASSED \n " ) ;
pr_info ( " Testing dynamic ftrace: " ) ;
/* enable tracing, and record the filter function */
ftrace_enabled = 1 ;
tracer_enabled = 1 ;
/* passed in by parameter to fool gcc from optimizing */
func ( ) ;
2008-05-15 07:49:44 +04:00
/*
* Some archs * cough * PowerPC * cough * add charachters to the
* start of the function names . We simply put a ' * ' to
* accomodate them .
*/
func_name = " * " STR ( DYN_FTRACE_TEST_NAME ) ;
2008-05-12 23:20:45 +04:00
/* filter only on our function */
2008-05-15 07:49:44 +04:00
ftrace_set_filter ( func_name , strlen ( func_name ) , 1 ) ;
2008-05-12 23:20:45 +04:00
/* enable tracing */
2009-02-05 23:02:00 +03:00
ret = tracer_init ( trace , tr ) ;
2008-11-16 07:57:26 +03:00
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
goto out ;
}
2008-08-01 20:26:41 +04:00
2008-05-12 23:20:45 +04:00
/* Sleep for a 1/10 of a second */
msleep ( 100 ) ;
/* we should have nothing in the buffer */
ret = trace_test_buffer ( tr , & count ) ;
if ( ret )
goto out ;
if ( count ) {
ret = - 1 ;
printk ( KERN_CONT " .. filter did not filter .. " ) ;
goto out ;
}
/* call our function again */
func ( ) ;
/* sleep again */
msleep ( 100 ) ;
/* stop the tracing. */
2008-11-08 06:36:02 +03:00
tracing_stop ( ) ;
2008-05-12 23:20:45 +04:00
ftrace_enabled = 0 ;
/* check the trace buffer */
ret = trace_test_buffer ( tr , & count ) ;
trace - > reset ( tr ) ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:45 +04:00
/* we should only have one item */
if ( ! ret & & count ! = 1 ) {
2008-05-12 23:20:54 +04:00
printk ( KERN_CONT " .. filter failed count=%ld .. " , count ) ;
2008-05-12 23:20:45 +04:00
ret = - 1 ;
goto out ;
}
2008-11-08 06:36:02 +03:00
2008-05-12 23:20:45 +04:00
out :
ftrace_enabled = save_ftrace_enabled ;
tracer_enabled = save_tracer_enabled ;
/* Enable tracing on all functions again */
ftrace_set_filter ( NULL , 0 , 1 ) ;
return ret ;
}
# else
# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
# endif /* CONFIG_DYNAMIC_FTRACE */
2008-05-12 23:20:44 +04:00
/*
* Simple verification test of ftrace function tracer .
* Enable ftrace , sleep 1 / 10 second , and then read the trace
* buffer to see if all is in order .
*/
int
trace_selftest_startup_function ( struct tracer * trace , struct trace_array * tr )
{
2008-05-12 23:20:45 +04:00
int save_ftrace_enabled = ftrace_enabled ;
int save_tracer_enabled = tracer_enabled ;
2008-08-01 20:26:41 +04:00
unsigned long count ;
int ret ;
2008-05-12 23:20:44 +04:00
2008-05-12 23:20:45 +04:00
/* make sure msleep has been recorded */
msleep ( 1 ) ;
2008-05-12 23:20:44 +04:00
/* start the tracing */
2008-05-12 23:20:45 +04:00
ftrace_enabled = 1 ;
2008-05-12 23:20:45 +04:00
tracer_enabled = 1 ;
2008-05-12 23:20:45 +04:00
2009-02-05 23:02:00 +03:00
ret = tracer_init ( trace , tr ) ;
2008-11-16 07:57:26 +03:00
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
goto out ;
}
2008-05-12 23:20:44 +04:00
/* Sleep for a 1/10 of a second */
msleep ( 100 ) ;
/* stop the tracing. */
2008-11-08 06:36:02 +03:00
tracing_stop ( ) ;
2008-05-12 23:20:45 +04:00
ftrace_enabled = 0 ;
2008-05-12 23:20:44 +04:00
/* check the trace buffer */
ret = trace_test_buffer ( tr , & count ) ;
trace - > reset ( tr ) ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
if ( ! ret & & ! count ) {
printk ( KERN_CONT " .. no entries found .. " ) ;
ret = - 1 ;
2008-05-12 23:20:45 +04:00
goto out ;
2008-05-12 23:20:44 +04:00
}
2008-05-12 23:20:45 +04:00
ret = trace_selftest_startup_dynamic_tracing ( trace , tr ,
DYN_FTRACE_TEST_NAME ) ;
out :
ftrace_enabled = save_ftrace_enabled ;
tracer_enabled = save_tracer_enabled ;
2008-05-12 23:20:48 +04:00
/* kill ftrace totally if we failed */
if ( ret )
ftrace_kill ( ) ;
2008-05-12 23:20:44 +04:00
return ret ;
}
2008-10-07 03:06:12 +04:00
# endif /* CONFIG_FUNCTION_TRACER */
2008-05-12 23:20:44 +04:00
2009-02-07 23:33:57 +03:00
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Pretty much the same than for the function tracer from which the selftest
* has been borrowed .
*/
int
trace_selftest_startup_function_graph ( struct tracer * trace ,
struct trace_array * tr )
{
int ret ;
unsigned long count ;
ret = tracer_init ( trace , tr ) ;
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
goto out ;
}
/* Sleep for a 1/10 of a second */
msleep ( 100 ) ;
tracing_stop ( ) ;
/* check the trace buffer */
ret = trace_test_buffer ( tr , & count ) ;
trace - > reset ( tr ) ;
tracing_start ( ) ;
if ( ! ret & & ! count ) {
printk ( KERN_CONT " .. no entries found .. " ) ;
ret = - 1 ;
goto out ;
}
/* Don't test dynamic tracing, the function tracer already did */
out :
/* Stop it if we failed */
if ( ret )
ftrace_graph_stop ( ) ;
return ret ;
}
# endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2008-05-12 23:20:44 +04:00
# ifdef CONFIG_IRQSOFF_TRACER
int
trace_selftest_startup_irqsoff ( struct tracer * trace , struct trace_array * tr )
{
unsigned long save_max = tracing_max_latency ;
unsigned long count ;
int ret ;
/* start the tracing */
2009-02-05 23:02:00 +03:00
ret = tracer_init ( trace , tr ) ;
2008-11-16 07:57:26 +03:00
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
return ret ;
}
2008-05-12 23:20:44 +04:00
/* reset the max latency */
tracing_max_latency = 0 ;
/* disable interrupts for a bit */
local_irq_disable ( ) ;
udelay ( 100 ) ;
local_irq_enable ( ) ;
/* stop the tracing. */
2008-11-08 06:36:02 +03:00
tracing_stop ( ) ;
2008-05-12 23:20:44 +04:00
/* check both trace buffers */
ret = trace_test_buffer ( tr , NULL ) ;
if ( ! ret )
ret = trace_test_buffer ( & max_tr , & count ) ;
trace - > reset ( tr ) ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
if ( ! ret & & ! count ) {
printk ( KERN_CONT " .. no entries found .. " ) ;
ret = - 1 ;
}
tracing_max_latency = save_max ;
return ret ;
}
# endif /* CONFIG_IRQSOFF_TRACER */
# ifdef CONFIG_PREEMPT_TRACER
int
trace_selftest_startup_preemptoff ( struct tracer * trace , struct trace_array * tr )
{
unsigned long save_max = tracing_max_latency ;
unsigned long count ;
int ret ;
2008-11-08 06:36:02 +03:00
/*
* Now that the big kernel lock is no longer preemptable ,
* and this is called with the BKL held , it will always
* fail . If preemption is already disabled , simply
* pass the test . When the BKL is removed , or becomes
* preemptible again , we will once again test this ,
* so keep it in .
*/
if ( preempt_count ( ) ) {
printk ( KERN_CONT " can not test ... force " ) ;
return 0 ;
}
2008-05-12 23:20:44 +04:00
/* start the tracing */
2009-02-05 23:02:00 +03:00
ret = tracer_init ( trace , tr ) ;
2008-11-16 07:57:26 +03:00
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
return ret ;
}
2008-05-12 23:20:44 +04:00
/* reset the max latency */
tracing_max_latency = 0 ;
/* disable preemption for a bit */
preempt_disable ( ) ;
udelay ( 100 ) ;
preempt_enable ( ) ;
/* stop the tracing. */
2008-11-08 06:36:02 +03:00
tracing_stop ( ) ;
2008-05-12 23:20:44 +04:00
/* check both trace buffers */
ret = trace_test_buffer ( tr , NULL ) ;
if ( ! ret )
ret = trace_test_buffer ( & max_tr , & count ) ;
trace - > reset ( tr ) ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
if ( ! ret & & ! count ) {
printk ( KERN_CONT " .. no entries found .. " ) ;
ret = - 1 ;
}
tracing_max_latency = save_max ;
return ret ;
}
# endif /* CONFIG_PREEMPT_TRACER */
# if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
int
trace_selftest_startup_preemptirqsoff ( struct tracer * trace , struct trace_array * tr )
{
unsigned long save_max = tracing_max_latency ;
unsigned long count ;
int ret ;
2008-11-08 06:36:02 +03:00
/*
* Now that the big kernel lock is no longer preemptable ,
* and this is called with the BKL held , it will always
* fail . If preemption is already disabled , simply
* pass the test . When the BKL is removed , or becomes
* preemptible again , we will once again test this ,
* so keep it in .
*/
if ( preempt_count ( ) ) {
printk ( KERN_CONT " can not test ... force " ) ;
return 0 ;
}
2008-05-12 23:20:44 +04:00
/* start the tracing */
2009-02-05 23:02:00 +03:00
ret = tracer_init ( trace , tr ) ;
2008-11-16 07:57:26 +03:00
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
goto out ;
}
2008-05-12 23:20:44 +04:00
/* reset the max latency */
tracing_max_latency = 0 ;
/* disable preemption and interrupts for a bit */
preempt_disable ( ) ;
local_irq_disable ( ) ;
udelay ( 100 ) ;
preempt_enable ( ) ;
/* reverse the order of preempt vs irqs */
local_irq_enable ( ) ;
/* stop the tracing. */
2008-11-08 06:36:02 +03:00
tracing_stop ( ) ;
2008-05-12 23:20:44 +04:00
/* check both trace buffers */
ret = trace_test_buffer ( tr , NULL ) ;
2008-11-08 06:36:02 +03:00
if ( ret ) {
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
goto out ;
2008-11-08 06:36:02 +03:00
}
2008-05-12 23:20:44 +04:00
ret = trace_test_buffer ( & max_tr , & count ) ;
2008-11-08 06:36:02 +03:00
if ( ret ) {
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
goto out ;
2008-11-08 06:36:02 +03:00
}
2008-05-12 23:20:44 +04:00
if ( ! ret & & ! count ) {
printk ( KERN_CONT " .. no entries found .. " ) ;
ret = - 1 ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
goto out ;
}
/* do the test by disabling interrupts first this time */
tracing_max_latency = 0 ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
preempt_disable ( ) ;
local_irq_disable ( ) ;
udelay ( 100 ) ;
preempt_enable ( ) ;
/* reverse the order of preempt vs irqs */
local_irq_enable ( ) ;
/* stop the tracing. */
2008-11-08 06:36:02 +03:00
tracing_stop ( ) ;
2008-05-12 23:20:44 +04:00
/* check both trace buffers */
ret = trace_test_buffer ( tr , NULL ) ;
if ( ret )
goto out ;
ret = trace_test_buffer ( & max_tr , & count ) ;
if ( ! ret & & ! count ) {
printk ( KERN_CONT " .. no entries found .. " ) ;
ret = - 1 ;
goto out ;
}
out :
trace - > reset ( tr ) ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
tracing_max_latency = save_max ;
return ret ;
}
# endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
2008-09-19 14:06:43 +04:00
# ifdef CONFIG_NOP_TRACER
int
trace_selftest_startup_nop ( struct tracer * trace , struct trace_array * tr )
{
/* What could possibly go wrong? */
return 0 ;
}
# endif
2008-05-12 23:20:44 +04:00
# ifdef CONFIG_SCHED_TRACER
static int trace_wakeup_test_thread ( void * data )
{
/* Make this a RT thread, doesn't need to be too high */
2008-05-12 23:20:59 +04:00
struct sched_param param = { . sched_priority = 5 } ;
struct completion * x = data ;
2008-05-12 23:20:44 +04:00
2008-05-12 23:20:59 +04:00
sched_setscheduler ( current , SCHED_FIFO , & param ) ;
2008-05-12 23:20:44 +04:00
/* Make it know we have a new prio */
complete ( x ) ;
/* now go to sleep and let the test wake us up */
set_current_state ( TASK_INTERRUPTIBLE ) ;
schedule ( ) ;
/* we are awake, now wait to disappear */
while ( ! kthread_should_stop ( ) ) {
/*
* This is an RT task , do short sleeps to let
* others run .
*/
msleep ( 100 ) ;
}
return 0 ;
}
int
trace_selftest_startup_wakeup ( struct tracer * trace , struct trace_array * tr )
{
unsigned long save_max = tracing_max_latency ;
struct task_struct * p ;
struct completion isrt ;
unsigned long count ;
int ret ;
init_completion ( & isrt ) ;
/* create a high prio thread */
p = kthread_run ( trace_wakeup_test_thread , & isrt , " ftrace-test " ) ;
2008-05-12 23:20:45 +04:00
if ( IS_ERR ( p ) ) {
2008-05-12 23:20:44 +04:00
printk ( KERN_CONT " Failed to create ftrace wakeup test thread " ) ;
return - 1 ;
}
/* make sure the thread is running at an RT prio */
wait_for_completion ( & isrt ) ;
/* start the tracing */
2009-02-05 23:02:00 +03:00
ret = tracer_init ( trace , tr ) ;
2008-11-16 07:57:26 +03:00
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
return ret ;
}
2008-05-12 23:20:44 +04:00
/* reset the max latency */
tracing_max_latency = 0 ;
/* sleep to let the RT thread sleep too */
msleep ( 100 ) ;
/*
* Yes this is slightly racy . It is possible that for some
* strange reason that the RT thread we created , did not
* call schedule for 100 ms after doing the completion ,
* and we do a wakeup on a task that already is awake .
* But that is extremely unlikely , and the worst thing that
* happens in such a case , is that we disable tracing .
* Honestly , if this race does happen something is horrible
* wrong with the system .
*/
wake_up_process ( p ) ;
2008-09-30 07:02:37 +04:00
/* give a little time to let the thread wake up */
msleep ( 100 ) ;
2008-05-12 23:20:44 +04:00
/* stop the tracing. */
2008-11-08 06:36:02 +03:00
tracing_stop ( ) ;
2008-05-12 23:20:44 +04:00
/* check both trace buffers */
ret = trace_test_buffer ( tr , NULL ) ;
if ( ! ret )
ret = trace_test_buffer ( & max_tr , & count ) ;
trace - > reset ( tr ) ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
tracing_max_latency = save_max ;
/* kill the thread */
kthread_stop ( p ) ;
if ( ! ret & & ! count ) {
printk ( KERN_CONT " .. no entries found .. " ) ;
ret = - 1 ;
}
return ret ;
}
# endif /* CONFIG_SCHED_TRACER */
# ifdef CONFIG_CONTEXT_SWITCH_TRACER
int
trace_selftest_startup_sched_switch ( struct tracer * trace , struct trace_array * tr )
{
unsigned long count ;
int ret ;
/* start the tracing */
2009-02-05 23:02:00 +03:00
ret = tracer_init ( trace , tr ) ;
2008-11-16 07:57:26 +03:00
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
return ret ;
}
2008-05-12 23:20:44 +04:00
/* Sleep for a 1/10 of a second */
msleep ( 100 ) ;
/* stop the tracing. */
2008-11-08 06:36:02 +03:00
tracing_stop ( ) ;
2008-05-12 23:20:44 +04:00
/* check the trace buffer */
ret = trace_test_buffer ( tr , & count ) ;
trace - > reset ( tr ) ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:44 +04:00
if ( ! ret & & ! count ) {
printk ( KERN_CONT " .. no entries found .. " ) ;
ret = - 1 ;
}
return ret ;
}
# endif /* CONFIG_CONTEXT_SWITCH_TRACER */
2008-05-12 23:20:47 +04:00
# ifdef CONFIG_SYSPROF_TRACER
int
trace_selftest_startup_sysprof ( struct tracer * trace , struct trace_array * tr )
{
unsigned long count ;
int ret ;
/* start the tracing */
2009-02-05 23:02:00 +03:00
ret = tracer_init ( trace , tr ) ;
2008-11-16 07:57:26 +03:00
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
return 0 ;
}
2008-05-12 23:20:47 +04:00
/* Sleep for a 1/10 of a second */
msleep ( 100 ) ;
/* stop the tracing. */
2008-11-08 06:36:02 +03:00
tracing_stop ( ) ;
2008-05-12 23:20:47 +04:00
/* check the trace buffer */
ret = trace_test_buffer ( tr , & count ) ;
trace - > reset ( tr ) ;
2008-11-08 06:36:02 +03:00
tracing_start ( ) ;
2008-05-12 23:20:47 +04:00
return ret ;
}
# endif /* CONFIG_SYSPROF_TRACER */
2008-11-12 23:24:24 +03:00
# ifdef CONFIG_BRANCH_TRACER
int
trace_selftest_startup_branch ( struct tracer * trace , struct trace_array * tr )
{
unsigned long count ;
int ret ;
/* start the tracing */
2009-02-05 23:02:00 +03:00
ret = tracer_init ( trace , tr ) ;
2008-11-16 07:57:26 +03:00
if ( ret ) {
warn_failed_init_tracer ( trace , ret ) ;
return ret ;
}
2008-11-12 23:24:24 +03:00
/* Sleep for a 1/10 of a second */
msleep ( 100 ) ;
/* stop the tracing. */
tracing_stop ( ) ;
/* check the trace buffer */
ret = trace_test_buffer ( tr , & count ) ;
trace - > reset ( tr ) ;
tracing_start ( ) ;
return ret ;
}
# endif /* CONFIG_BRANCH_TRACER */