2009-06-12 12:26:44 +04:00
/*
* Dynamic function tracer architecture backend .
*
2014-09-03 15:26:23 +04:00
* Copyright IBM Corp . 2009 , 2014
2009-06-12 12:26:44 +04:00
*
* Author ( s ) : Heiko Carstens < heiko . carstens @ de . ibm . com > ,
2011-01-05 14:48:11 +03:00
* Martin Schwidefsky < schwidefsky @ de . ibm . com >
2009-06-12 12:26:44 +04:00
*/
2014-10-15 14:17:38 +04:00
# include <linux/moduleloader.h>
2009-06-12 12:26:46 +04:00
# include <linux/hardirq.h>
2009-06-12 12:26:44 +04:00
# include <linux/uaccess.h>
# include <linux/ftrace.h>
# include <linux/kernel.h>
# include <linux/types.h>
2011-01-05 14:48:11 +03:00
# include <linux/kprobes.h>
2009-06-12 12:26:47 +04:00
# include <trace/syscall.h>
2010-02-27 00:37:43 +03:00
# include <asm/asm-offsets.h>
2014-10-15 14:17:38 +04:00
# include <asm/cacheflush.h>
2013-09-06 21:10:48 +04:00
# include "entry.h"
2009-06-12 12:26:44 +04:00
2011-01-05 14:48:11 +03:00
/*
2014-10-07 17:45:10 +04:00
* The mcount code looks like this :
2011-01-05 14:48:11 +03:00
* stg % r14 , 8 ( % r15 ) # offset 0
2014-09-03 15:26:23 +04:00
* larl % r1 , < & counter > # offset 6
* brasl % r14 , _mcount # offset 12
2011-01-05 14:48:11 +03:00
* lg % r14 , 8 ( % r15 ) # offset 18
2014-10-15 14:17:38 +04:00
* Total length is 24 bytes . Only the first instruction will be patched
* by ftrace_make_call / ftrace_make_nop .
2014-10-07 17:45:10 +04:00
* The enabled ftrace code block looks like this :
2014-10-15 14:17:38 +04:00
* > brasl % r0 , ftrace_caller # offset 0
* larl % r1 , < & counter > # offset 6
* brasl % r14 , _mcount # offset 12
* lg % r14 , 8 ( % r15 ) # offset 18
2014-09-03 15:26:23 +04:00
* The ftrace function gets called with a non - standard C function call ABI
* where r0 contains the return address . It is also expected that the called
* function only clobbers r0 and r1 , but restores r2 - r15 .
2014-10-15 14:17:38 +04:00
* For module code we can ' t directly jump to ftrace caller , but need a
* trampoline ( ftrace_plt ) , which clobbers also r1 .
2014-09-03 15:26:23 +04:00
* The return point of the ftrace function has offset 24 , so execution
* continues behind the mcount block .
2014-10-15 14:17:38 +04:00
* The disabled ftrace code block looks like this :
* > jg . + 24 # offset 0
* larl % r1 , < & counter > # offset 6
* brasl % r14 , _mcount # offset 12
* lg % r14 , 8 ( % r15 ) # offset 18
2011-01-05 14:48:11 +03:00
* The jg instruction branches to offset 24 to skip as many instructions
* as possible .
2015-01-09 15:08:28 +03:00
* In case we use gcc ' s hotpatch feature the original and also the disabled
* function prologue contains only a single six byte instruction and looks
* like this :
* > brcl 0 , 0 # offset 0
* To enable ftrace the code gets patched like above and afterwards looks
* like this :
* > brasl % r0 , ftrace_caller # offset 0
2011-01-05 14:48:11 +03:00
*/
2009-06-12 12:26:44 +04:00
2014-10-15 14:17:38 +04:00
unsigned long ftrace_plt ;
2009-06-12 12:26:44 +04:00
2015-03-13 12:31:42 +03:00
static inline void ftrace_generate_orig_insn ( struct ftrace_insn * insn )
{
# ifdef CC_USING_HOTPATCH
/* brcl 0,0 */
insn - > opc = 0xc004 ;
insn - > disp = 0 ;
# else
/* stg r14,8(r15) */
insn - > opc = 0xe3e0 ;
insn - > disp = 0xf0080024 ;
# endif
}
static inline int is_kprobe_on_ftrace ( struct ftrace_insn * insn )
{
# ifdef CONFIG_KPROBES
if ( insn - > opc = = BREAKPOINT_INSTRUCTION )
return 1 ;
# endif
return 0 ;
}
static inline void ftrace_generate_kprobe_nop_insn ( struct ftrace_insn * insn )
{
# ifdef CONFIG_KPROBES
insn - > opc = BREAKPOINT_INSTRUCTION ;
insn - > disp = KPROBE_ON_FTRACE_NOP ;
# endif
}
static inline void ftrace_generate_kprobe_call_insn ( struct ftrace_insn * insn )
{
# ifdef CONFIG_KPROBES
insn - > opc = BREAKPOINT_INSTRUCTION ;
insn - > disp = KPROBE_ON_FTRACE_CALL ;
# endif
}
2014-08-15 15:01:46 +04:00
int ftrace_modify_call ( struct dyn_ftrace * rec , unsigned long old_addr ,
unsigned long addr )
{
return 0 ;
}
2009-06-12 12:26:44 +04:00
int ftrace_make_nop ( struct module * mod , struct dyn_ftrace * rec ,
unsigned long addr )
{
2014-12-09 12:18:49 +03:00
struct ftrace_insn orig , new , old ;
2014-10-15 14:17:38 +04:00
2014-12-09 12:18:49 +03:00
if ( probe_kernel_read ( & old , ( void * ) rec - > ip , sizeof ( old ) ) )
2014-10-15 14:17:38 +04:00
return - EFAULT ;
2014-12-09 12:18:49 +03:00
if ( addr = = MCOUNT_ADDR ) {
2015-01-09 15:08:28 +03:00
/* Initial code replacement */
2015-03-13 12:31:42 +03:00
ftrace_generate_orig_insn ( & orig ) ;
2014-12-09 12:18:49 +03:00
ftrace_generate_nop_insn ( & new ) ;
2015-03-13 12:31:42 +03:00
} else if ( is_kprobe_on_ftrace ( & old ) ) {
2014-12-09 12:18:49 +03:00
/*
* If we find a breakpoint instruction , a kprobe has been
* placed at the beginning of the function . We write the
* constant KPROBE_ON_FTRACE_NOP into the remaining four
* bytes of the original instruction so that the kprobes
* handler can execute a nop , if it reaches this breakpoint .
*/
2015-03-13 12:31:42 +03:00
ftrace_generate_kprobe_call_insn ( & orig ) ;
ftrace_generate_kprobe_nop_insn ( & new ) ;
2014-12-09 12:18:49 +03:00
} else {
/* Replace ftrace call with a nop. */
ftrace_generate_call_insn ( & orig , rec - > ip ) ;
ftrace_generate_nop_insn ( & new ) ;
2014-09-03 15:26:23 +04:00
}
2014-12-09 12:18:49 +03:00
/* Verify that the to be replaced code matches what we expect. */
if ( memcmp ( & orig , & old , sizeof ( old ) ) )
return - EINVAL ;
2015-03-13 14:55:56 +03:00
s390_kernel_write ( ( void * ) rec - > ip , & new , sizeof ( new ) ) ;
2011-01-05 14:48:11 +03:00
return 0 ;
2009-06-12 12:26:44 +04:00
}
int ftrace_make_call ( struct dyn_ftrace * rec , unsigned long addr )
{
2014-12-09 12:18:49 +03:00
struct ftrace_insn orig , new , old ;
2014-10-15 14:17:38 +04:00
2014-12-09 12:18:49 +03:00
if ( probe_kernel_read ( & old , ( void * ) rec - > ip , sizeof ( old ) ) )
2014-10-15 14:17:38 +04:00
return - EFAULT ;
2015-03-13 12:31:42 +03:00
if ( is_kprobe_on_ftrace ( & old ) ) {
2014-12-09 12:18:49 +03:00
/*
* If we find a breakpoint instruction , a kprobe has been
* placed at the beginning of the function . We write the
* constant KPROBE_ON_FTRACE_CALL into the remaining four
* bytes of the original instruction so that the kprobes
* handler can execute a brasl if it reaches this breakpoint .
*/
2015-03-13 12:31:42 +03:00
ftrace_generate_kprobe_nop_insn ( & orig ) ;
ftrace_generate_kprobe_call_insn ( & new ) ;
2014-12-09 12:18:49 +03:00
} else {
/* Replace nop with an ftrace call. */
ftrace_generate_nop_insn ( & orig ) ;
ftrace_generate_call_insn ( & new , rec - > ip ) ;
2014-10-15 14:17:38 +04:00
}
2014-12-09 12:18:49 +03:00
/* Verify that the to be replaced code matches what we expect. */
if ( memcmp ( & orig , & old , sizeof ( old ) ) )
return - EINVAL ;
2015-03-13 14:55:56 +03:00
s390_kernel_write ( ( void * ) rec - > ip , & new , sizeof ( new ) ) ;
2011-01-05 14:48:11 +03:00
return 0 ;
2009-06-12 12:26:44 +04:00
}
int ftrace_update_ftrace_func ( ftrace_func_t func )
{
return 0 ;
}
2014-02-24 22:59:59 +04:00
int __init ftrace_dyn_arch_init ( void )
2009-06-12 12:26:44 +04:00
{
return 0 ;
}
2009-06-12 12:26:46 +04:00
2014-10-15 14:17:38 +04:00
static int __init ftrace_plt_init ( void )
{
unsigned int * ip ;
ftrace_plt = ( unsigned long ) module_alloc ( PAGE_SIZE ) ;
if ( ! ftrace_plt )
panic ( " cannot allocate ftrace plt \n " ) ;
ip = ( unsigned int * ) ftrace_plt ;
ip [ 0 ] = 0x0d10e310 ; /* basr 1,0; lg 1,10(1); br 1 */
ip [ 1 ] = 0x100a0004 ;
ip [ 2 ] = 0x07f10000 ;
ip [ 3 ] = FTRACE_ADDR > > 32 ;
ip [ 4 ] = FTRACE_ADDR & 0xffffffff ;
set_memory_ro ( ftrace_plt , 1 ) ;
return 0 ;
}
device_initcall ( ftrace_plt_init ) ;
2009-06-12 12:26:46 +04:00
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
* in current thread info .
*/
2014-10-22 14:42:38 +04:00
unsigned long prepare_ftrace_return ( unsigned long parent , unsigned long ip )
2009-06-12 12:26:46 +04:00
{
struct ftrace_graph_ent trace ;
2014-10-27 17:49:06 +03:00
if ( unlikely ( ftrace_graph_is_dead ( ) ) )
goto out ;
2009-06-12 12:26:46 +04:00
if ( unlikely ( atomic_read ( & current - > tracing_graph_pause ) ) )
goto out ;
2016-01-18 15:12:19 +03:00
ip - = MCOUNT_INSN_SIZE ;
2013-05-13 16:48:52 +04:00
trace . func = ip ;
2013-10-11 10:55:57 +04:00
trace . depth = current - > curr_ret_stack + 1 ;
2009-06-12 12:26:46 +04:00
/* Only trace if the calling function expects to. */
2013-10-11 10:55:57 +04:00
if ( ! ftrace_graph_entry ( & trace ) )
goto out ;
if ( ftrace_push_return_trace ( parent , ip , & trace . depth , 0 ) = = - EBUSY )
2009-06-12 12:26:46 +04:00
goto out ;
2011-01-05 14:48:11 +03:00
parent = ( unsigned long ) return_to_handler ;
2009-06-12 12:26:46 +04:00
out :
return parent ;
}
2014-10-22 14:42:38 +04:00
NOKPROBE_SYMBOL ( prepare_ftrace_return ) ;
2011-01-05 14:48:11 +03:00
/*
* Patch the kernel code at ftrace_graph_caller location . The instruction
2014-10-08 12:03:08 +04:00
* there is branch relative on condition . To enable the ftrace graph code
* block , we simply patch the mask field of the instruction to zero and
* turn the instruction into a nop .
* To disable the ftrace graph code the mask field will be patched to
* all ones , which turns the instruction into an unconditional branch .
2011-01-05 14:48:11 +03:00
*/
2014-08-15 14:33:46 +04:00
int ftrace_enable_ftrace_graph_caller ( void )
{
2014-10-08 12:03:08 +04:00
u8 op = 0x04 ; /* set mask field to zero */
2014-08-15 14:33:46 +04:00
2015-03-13 14:55:56 +03:00
s390_kernel_write ( __va ( ftrace_graph_caller ) + 1 , & op , sizeof ( op ) ) ;
return 0 ;
2014-08-15 14:33:46 +04:00
}
int ftrace_disable_ftrace_graph_caller ( void )
{
2014-10-08 12:03:08 +04:00
u8 op = 0xf4 ; /* set mask field to all ones */
2014-08-15 14:33:46 +04:00
2015-03-13 14:55:56 +03:00
s390_kernel_write ( __va ( ftrace_graph_caller ) + 1 , & op , sizeof ( op ) ) ;
return 0 ;
2014-08-15 14:33:46 +04:00
}
2009-06-12 12:26:46 +04:00
# endif /* CONFIG_FUNCTION_GRAPH_TRACER */