2009-02-09 00:43:36 +01:00
/*
* Code for tracing calls in Linux kernel .
2016-04-13 22:27:22 +02:00
* Copyright ( C ) 2009 - 2016 Helge Deller < deller @ gmx . de >
2009-02-09 00:43:36 +01:00
*
* based on code for x86 which is :
* Copyright ( C ) 2007 - 2008 Steven Rostedt < srostedt @ redhat . com >
*
* future possible enhancements :
* - add CONFIG_DYNAMIC_FTRACE
* - add CONFIG_STACK_TRACER
*/
# include <linux/init.h>
# include <linux/ftrace.h>
2016-04-13 22:27:22 +02:00
# include <asm/assembly.h>
2009-02-09 00:43:36 +01:00
# include <asm/sections.h>
# include <asm/ftrace.h>
2016-04-29 22:07:31 +02:00
# define __hot __attribute__ ((__section__ (".text.hot")))
2009-02-09 00:43:36 +01:00
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info .
*/
2016-04-29 22:07:31 +02:00
static void __hot prepare_ftrace_return ( unsigned long * parent ,
unsigned long self_addr )
2009-02-09 00:43:36 +01:00
{
unsigned long old ;
struct ftrace_graph_ent trace ;
2016-04-13 22:27:22 +02:00
extern int parisc_return_to_handler ;
2009-02-09 00:43:36 +01:00
2014-06-25 10:17:48 -04:00
if ( unlikely ( ftrace_graph_is_dead ( ) ) )
return ;
2009-02-09 00:43:36 +01:00
if ( unlikely ( atomic_read ( & current - > tracing_graph_pause ) ) )
return ;
old = * parent ;
2016-04-13 22:27:22 +02:00
trace . func = self_addr ;
trace . depth = current - > curr_ret_stack + 1 ;
2009-02-09 00:43:36 +01:00
2016-04-13 22:27:22 +02:00
/* Only trace if the calling function expects to */
if ( ! ftrace_graph_entry ( & trace ) )
2009-02-09 00:43:36 +01:00
return ;
2016-04-13 22:27:22 +02:00
if ( ftrace_push_return_trace ( old , self_addr , & trace . depth ,
2016-08-19 06:52:57 -05:00
0 , NULL ) = = - EBUSY )
2016-04-13 22:27:22 +02:00
return ;
2009-02-09 00:43:36 +01:00
2016-04-13 22:27:22 +02:00
/* activate parisc_return_to_handler() as return point */
* parent = ( unsigned long ) & parisc_return_to_handler ;
2009-02-09 00:43:36 +01:00
}
# endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2016-04-29 22:07:31 +02:00
void notrace __hot ftrace_function_trampoline ( unsigned long parent ,
2009-02-09 00:43:36 +01:00
unsigned long self_addr ,
unsigned long org_sp_gr3 )
{
2016-04-13 22:27:22 +02:00
extern ftrace_func_t ftrace_trace_function ; /* depends on CONFIG_DYNAMIC_FTRACE */
extern int ftrace_graph_entry_stub ( struct ftrace_graph_ent * trace ) ;
2009-02-09 00:43:36 +01:00
if ( ftrace_trace_function ! = ftrace_stub ) {
2016-04-13 22:27:22 +02:00
/* struct ftrace_ops *op, struct pt_regs *regs); */
ftrace_trace_function ( parent , self_addr , NULL , NULL ) ;
2009-02-09 00:43:36 +01:00
return ;
}
2016-04-13 22:27:22 +02:00
2009-02-09 00:43:36 +01:00
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
2016-04-13 22:27:22 +02:00
if ( ftrace_graph_return ! = ( trace_func_graph_ret_t ) ftrace_stub | |
ftrace_graph_entry ! = ftrace_graph_entry_stub ) {
2009-02-09 00:43:36 +01:00
unsigned long * parent_rp ;
/* calculate pointer to %rp in stack */
2016-04-13 22:27:22 +02:00
parent_rp = ( unsigned long * ) ( org_sp_gr3 - RP_OFFSET ) ;
2009-02-09 00:43:36 +01:00
/* sanity check: parent_rp should hold parent */
if ( * parent_rp ! = parent )
return ;
2016-04-13 22:27:22 +02:00
2009-02-09 00:43:36 +01:00
prepare_ftrace_return ( parent_rp , self_addr ) ;
return ;
}
# endif
}