2008-05-14 23:49:44 -04:00
/*
* Code for replacing ftrace calls with jumps .
*
* Copyright ( C ) 2007 - 2008 Steven Rostedt < srostedt @ redhat . com >
*
* Thanks goes out to P . A . Semi , Inc for supplying me with a PPC64 box .
*
*/
# include <linux/spinlock.h>
# include <linux/hardirq.h>
# include <linux/ftrace.h>
# include <linux/percpu.h>
# include <linux/init.h>
# include <linux/list.h>
# include <asm/cacheflush.h>
2008-06-21 23:47:27 +05:30
# include <asm/ftrace.h>
2008-05-14 23:49:44 -04:00
static unsigned int ftrace_nop = 0x60000000 ;
# ifdef CONFIG_PPC32
# define GET_ADDR(addr) addr
# else
/* PowerPC64's functions are data that points to the functions */
# define GET_ADDR(addr) *(unsigned long *)addr
# endif
2008-06-21 23:47:27 +05:30
2008-05-14 23:49:44 -04:00
static unsigned int notrace ftrace_calc_offset ( long ip , long addr )
{
2008-06-21 23:47:27 +05:30
return ( int ) ( addr - ip ) ;
2008-05-14 23:49:44 -04:00
}
notrace unsigned char * ftrace_nop_replace ( void )
{
return ( char * ) & ftrace_nop ;
}
notrace unsigned char * ftrace_call_replace ( unsigned long ip , unsigned long addr )
{
static unsigned int op ;
2008-05-22 14:31:07 -04:00
/*
* It would be nice to just use create_function_call , but that will
* update the code itself . Here we need to just return the
* instruction that is going to be modified , without modifying the
* code .
*/
2008-05-14 23:49:44 -04:00
addr = GET_ADDR ( addr ) ;
/* Set to "bl addr" */
2008-05-22 14:31:07 -04:00
op = 0x48000001 | ( ftrace_calc_offset ( ip , addr ) & 0x03fffffc ) ;
2008-05-14 23:49:44 -04:00
/*
* No locking needed , this must be called via kstop_machine
* which in essence is like running on a uniprocessor machine .
*/
return ( unsigned char * ) & op ;
}
# ifdef CONFIG_PPC64
# define _ASM_ALIGN " .align 3 "
# define _ASM_PTR " .llong "
# else
# define _ASM_ALIGN " .align 2 "
# define _ASM_PTR " .long "
# endif
notrace int
ftrace_modify_code ( unsigned long ip , unsigned char * old_code ,
unsigned char * new_code )
{
unsigned replaced ;
unsigned old = * ( unsigned * ) old_code ;
unsigned new = * ( unsigned * ) new_code ;
int faulted = 0 ;
/*
* Note : Due to modules and __init , code can
* disappear and change , we need to protect against faulting
* as well as code changing .
*
* No real locking needed , this code is run through
* kstop_machine .
*/
asm volatile (
" 1: lwz %1, 0(%2) \n "
" cmpw %1, %5 \n "
" bne 2f \n "
" stwu %3, 0(%2) \n "
" 2: \n "
" .section .fixup, \" ax \" \n "
" 3: li %0, 1 \n "
" b 2b \n "
" .previous \n "
" .section __ex_table, \" a \" \n "
_ASM_ALIGN " \n "
_ASM_PTR " 1b, 3b \n "
" .previous "
: " =r " ( faulted ) , " =r " ( replaced )
: " r " ( ip ) , " r " ( new ) ,
" 0 " ( faulted ) , " r " ( old )
: " memory " ) ;
if ( replaced ! = old & & replaced ! = new )
faulted = 2 ;
if ( ! faulted )
flush_icache_range ( ip , ip + 8 ) ;
return faulted ;
}
notrace int ftrace_update_ftrace_func ( ftrace_func_t func )
{
unsigned long ip = ( unsigned long ) ( & ftrace_call ) ;
2008-06-21 23:47:27 +05:30
unsigned char old [ MCOUNT_INSN_SIZE ] , * new ;
2008-05-14 23:49:44 -04:00
int ret ;
2008-06-21 23:47:27 +05:30
memcpy ( old , & ftrace_call , MCOUNT_INSN_SIZE ) ;
2008-05-14 23:49:44 -04:00
new = ftrace_call_replace ( ip , ( unsigned long ) func ) ;
ret = ftrace_modify_code ( ip , old , new ) ;
return ret ;
}
notrace int ftrace_mcount_set ( unsigned long * data )
{
unsigned long ip = ( long ) ( & mcount_call ) ;
unsigned long * addr = data ;
2008-06-21 23:47:27 +05:30
unsigned char old [ MCOUNT_INSN_SIZE ] , * new ;
2008-05-14 23:49:44 -04:00
/*
* Replace the mcount stub with a pointer to the
* ip recorder function .
*/
2008-06-21 23:47:27 +05:30
memcpy ( old , & mcount_call , MCOUNT_INSN_SIZE ) ;
2008-05-14 23:49:44 -04:00
new = ftrace_call_replace ( ip , * addr ) ;
* addr = ftrace_modify_code ( ip , old , new ) ;
return 0 ;
}
int __init ftrace_dyn_arch_init ( void * data )
{
/* This is running in kstop_machine */
ftrace_mcount_set ( data ) ;
return 0 ;
}