2009-06-12 10:26:44 +02:00
/*
* Dynamic function tracer architecture backend .
*
2014-09-03 13:26:23 +02:00
* Copyright IBM Corp . 2009 , 2014
2009-06-12 10:26:44 +02:00
*
* Author ( s ) : Heiko Carstens < heiko . carstens @ de . ibm . com > ,
2011-01-05 12:48:11 +01:00
* Martin Schwidefsky < schwidefsky @ de . ibm . com >
2009-06-12 10:26:44 +02:00
*/
2009-06-12 10:26:46 +02:00
# include <linux/hardirq.h>
2009-06-12 10:26:44 +02:00
# include <linux/uaccess.h>
# include <linux/ftrace.h>
# include <linux/kernel.h>
# include <linux/types.h>
2011-01-05 12:48:11 +01:00
# include <linux/kprobes.h>
2009-06-12 10:26:47 +02:00
# include <trace/syscall.h>
2010-02-26 22:37:43 +01:00
# include <asm/asm-offsets.h>
2013-09-06 19:10:48 +02:00
# include "entry.h"
2009-06-12 10:26:44 +02:00
2014-09-03 13:26:23 +02:00
void mcount_replace_code ( void ) ;
2009-06-12 10:26:44 +02:00
void ftrace_disable_code ( void ) ;
2011-01-05 12:48:11 +01:00
void ftrace_enable_insn ( void ) ;
2009-06-12 10:26:44 +02:00
2011-01-05 12:48:11 +01:00
/*
2014-10-07 15:45:10 +02:00
* The mcount code looks like this :
2011-01-05 12:48:11 +01:00
* stg % r14 , 8 ( % r15 ) # offset 0
2014-09-03 13:26:23 +02:00
* larl % r1 , < & counter > # offset 6
* brasl % r14 , _mcount # offset 12
2011-01-05 12:48:11 +01:00
* lg % r14 , 8 ( % r15 ) # offset 18
2014-09-03 13:26:23 +02:00
* Total length is 24 bytes . The complete mcount block initially gets replaced
* by ftrace_make_nop . Subsequent calls to ftrace_make_call / ftrace_make_nop
* only patch the jg / lg instruction within the block .
* Note : we do not patch the first instruction to an unconditional branch ,
* since that would break kprobes / jprobes . It is easier to leave the larl
* instruction in and only modify the second instruction .
2014-10-07 15:45:10 +02:00
* The enabled ftrace code block looks like this :
2014-09-03 13:26:23 +02:00
* larl % r0 , . + 24 # offset 0
2011-01-05 12:48:11 +01:00
* > lg % r1 , __LC_FTRACE_FUNC # offset 6
2014-09-03 13:26:23 +02:00
* br % r1 # offset 12
* brcl 0 , 0 # offset 14
* brc 0 , 0 # offset 20
* The ftrace function gets called with a non - standard C function call ABI
* where r0 contains the return address . It is also expected that the called
* function only clobbers r0 and r1 , but restores r2 - r15 .
* The return point of the ftrace function has offset 24 , so execution
* continues behind the mcount block .
* larl % r0 , . + 24 # offset 0
2011-01-05 12:48:11 +01:00
* > jg . + 18 # offset 6
2014-09-03 13:26:23 +02:00
* br % r1 # offset 12
* brcl 0 , 0 # offset 14
* brc 0 , 0 # offset 20
2011-01-05 12:48:11 +01:00
* The jg instruction branches to offset 24 to skip as many instructions
* as possible .
*/
2009-06-12 10:26:44 +02:00
asm (
" .align 4 \n "
2014-09-03 13:26:23 +02:00
" mcount_replace_code: \n "
" larl %r0,0f \n "
2009-06-12 10:26:44 +02:00
" ftrace_disable_code: \n "
2011-01-05 12:48:11 +01:00
" jg 0f \n "
2014-09-03 13:26:23 +02:00
" br %r1 \n "
" brcl 0,0 \n "
" brc 0,0 \n "
2011-01-05 12:48:11 +01:00
" 0: \n "
2009-06-12 10:26:44 +02:00
" .align 4 \n "
2011-01-05 12:48:11 +01:00
" ftrace_enable_insn: \n "
" lg %r1, " __stringify ( __LC_FTRACE_FUNC ) " \n " ) ;
2009-06-12 10:26:44 +02:00
2014-09-03 13:26:23 +02:00
# define MCOUNT_BLOCK_SIZE 24
# define MCOUNT_INSN_OFFSET 6
2011-01-05 12:48:11 +01:00
# define FTRACE_INSN_SIZE 6
2009-06-12 10:26:44 +02:00
2014-08-15 13:01:46 +02:00
int ftrace_modify_call ( struct dyn_ftrace * rec , unsigned long old_addr ,
unsigned long addr )
{
return 0 ;
}
2009-06-12 10:26:44 +02:00
int ftrace_make_nop ( struct module * mod , struct dyn_ftrace * rec ,
unsigned long addr )
{
2014-09-03 13:26:23 +02:00
/* Initial replacement of the whole mcount block */
if ( addr = = MCOUNT_ADDR ) {
if ( probe_kernel_write ( ( void * ) rec - > ip - MCOUNT_INSN_OFFSET ,
mcount_replace_code ,
MCOUNT_BLOCK_SIZE ) )
return - EPERM ;
return 0 ;
}
2011-01-05 12:48:11 +01:00
if ( probe_kernel_write ( ( void * ) rec - > ip , ftrace_disable_code ,
MCOUNT_INSN_SIZE ) )
return - EPERM ;
return 0 ;
2009-06-12 10:26:44 +02:00
}
int ftrace_make_call ( struct dyn_ftrace * rec , unsigned long addr )
{
2011-01-05 12:48:11 +01:00
if ( probe_kernel_write ( ( void * ) rec - > ip , ftrace_enable_insn ,
FTRACE_INSN_SIZE ) )
return - EPERM ;
return 0 ;
2009-06-12 10:26:44 +02:00
}
int ftrace_update_ftrace_func ( ftrace_func_t func )
{
return 0 ;
}
2014-02-24 19:59:59 +01:00
int __init ftrace_dyn_arch_init ( void )
2009-06-12 10:26:44 +02:00
{
return 0 ;
}
2009-06-12 10:26:46 +02:00
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
* in current thread info .
*/
2011-01-05 12:48:11 +01:00
unsigned long __kprobes prepare_ftrace_return ( unsigned long parent ,
unsigned long ip )
2009-06-12 10:26:46 +02:00
{
struct ftrace_graph_ent trace ;
if ( unlikely ( atomic_read ( & current - > tracing_graph_pause ) ) )
goto out ;
2013-05-13 14:48:52 +02:00
ip = ( ip & PSW_ADDR_INSN ) - MCOUNT_INSN_SIZE ;
trace . func = ip ;
2013-10-11 08:55:57 +02:00
trace . depth = current - > curr_ret_stack + 1 ;
2009-06-12 10:26:46 +02:00
/* Only trace if the calling function expects to. */
2013-10-11 08:55:57 +02:00
if ( ! ftrace_graph_entry ( & trace ) )
goto out ;
if ( ftrace_push_return_trace ( parent , ip , & trace . depth , 0 ) = = - EBUSY )
2009-06-12 10:26:46 +02:00
goto out ;
2011-01-05 12:48:11 +01:00
parent = ( unsigned long ) return_to_handler ;
2009-06-12 10:26:46 +02:00
out :
return parent ;
}
2011-01-05 12:48:11 +01:00
/*
* Patch the kernel code at ftrace_graph_caller location . The instruction
2014-10-08 10:03:08 +02:00
* there is branch relative on condition . To enable the ftrace graph code
* block , we simply patch the mask field of the instruction to zero and
* turn the instruction into a nop .
* To disable the ftrace graph code the mask field will be patched to
* all ones , which turns the instruction into an unconditional branch .
2011-01-05 12:48:11 +01:00
*/
2014-08-15 12:33:46 +02:00
int ftrace_enable_ftrace_graph_caller ( void )
{
2014-10-08 10:03:08 +02:00
u8 op = 0x04 ; /* set mask field to zero */
2014-08-15 12:33:46 +02:00
2014-10-08 10:03:08 +02:00
return probe_kernel_write ( __va ( ftrace_graph_caller ) + 1 , & op , sizeof ( op ) ) ;
2014-08-15 12:33:46 +02:00
}
int ftrace_disable_ftrace_graph_caller ( void )
{
2014-10-08 10:03:08 +02:00
u8 op = 0xf4 ; /* set mask field to all ones */
2014-08-15 12:33:46 +02:00
2014-10-08 10:03:08 +02:00
return probe_kernel_write ( __va ( ftrace_graph_caller ) + 1 , & op , sizeof ( op ) ) ;
2014-08-15 12:33:46 +02:00
}
2009-06-12 10:26:46 +02:00
# endif /* CONFIG_FUNCTION_GRAPH_TRACER */