2005-04-17 02:20:36 +04:00
/*
* Kernel Probes ( KProbes )
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*
* Copyright ( C ) IBM Corporation , 2002 , 2004
*
* 2002 - Oct Created by Vamsi Krishna S < vamsi_krishna @ in . ibm . com > Kernel
* Probes initial implementation ( includes contributions from
* Rusty Russell ) .
* 2004 - July Suparna Bhattacharya < suparna @ in . ibm . com > added jumper probes
* interface to access function arguments .
* 2004 - Nov Ananth N Mavinakayanahalli < ananth @ in . ibm . com > kprobes port
* for PPC64
*/
# include <linux/kprobes.h>
# include <linux/ptrace.h>
# include <linux/preempt.h>
2006-03-26 13:38:24 +04:00
# include <linux/module.h>
2007-05-08 11:27:03 +04:00
# include <linux/kdebug.h>
2005-06-23 11:09:25 +04:00
# include <asm/cacheflush.h>
2005-04-17 02:20:36 +04:00
# include <asm/sstep.h>
2006-03-26 13:38:24 +04:00
# include <asm/uaccess.h>
2005-04-17 02:20:36 +04:00
2005-11-07 12:00:10 +03:00
DEFINE_PER_CPU ( struct kprobe * , current_kprobe ) = NULL ;
DEFINE_PER_CPU ( struct kprobe_ctlblk , kprobe_ctlblk ) ;
2005-04-17 02:20:36 +04:00
2007-10-16 12:27:49 +04:00
struct kretprobe_blackpoint kretprobe_blacklist [ ] = { { NULL , NULL } } ;
2005-09-07 02:19:29 +04:00
int __kprobes arch_prepare_kprobe ( struct kprobe * p )
2005-04-17 02:20:36 +04:00
{
2005-06-09 02:49:41 +04:00
int ret = 0 ;
2005-04-17 02:20:36 +04:00
kprobe_opcode_t insn = * p - > addr ;
2005-06-09 02:49:41 +04:00
if ( ( unsigned long ) p - > addr & 0x03 ) {
printk ( " Attempt to register kprobe at an unaligned address \n " ) ;
ret = - EINVAL ;
2007-02-07 07:55:19 +03:00
} else if ( IS_MTMSRD ( insn ) | | IS_RFID ( insn ) | | IS_RFI ( insn ) ) {
printk ( " Cannot register a kprobe on rfi/rfid or mtmsr[d] \n " ) ;
2005-06-09 02:49:41 +04:00
ret = - EINVAL ;
}
2005-06-28 02:17:01 +04:00
/* insn must be on a special executable page on ppc64 */
if ( ! ret ) {
2005-10-01 21:14:17 +04:00
p - > ainsn . insn = get_insn_slot ( ) ;
2005-06-28 02:17:01 +04:00
if ( ! p - > ainsn . insn )
ret = - ENOMEM ;
}
2005-04-17 02:20:36 +04:00
2006-01-10 07:52:43 +03:00
if ( ! ret ) {
2007-04-18 09:57:51 +04:00
memcpy ( p - > ainsn . insn , p - > addr ,
MAX_INSN_SIZE * sizeof ( kprobe_opcode_t ) ) ;
2006-01-10 07:52:43 +03:00
p - > opcode = * p - > addr ;
[POWERPC] kprobes: Fix possible system crash during out-of-line single-stepping
- On archs that have no-exec support, we vmalloc() a executable scratch
area of PAGE_SIZE and divide it up into an array of slots of maximum
instruction size for that arch
- On a kprobe registration, the original instruction is copied to the
first available free slot, so if multiple kprobes are registered, chances
are, they get contiguous slots
- On POWER4, due to not having coherent icaches, we could hit a situation
where a probe that is registered on one processor, is hit immediately on
another. This second processor could have fetched the stream of text from
the out-of-line single-stepping area *before* the probe registration
completed, possibly due to an earlier (and a different) kprobe hit and
hence would see stale data at the slot.
Executing such an arbitrary instruction lead to a problem as reported
in LTC bugzilla 23555.
The correct solution is to call flush_icache_range() as soon as the
instruction is copied for out-of-line single-stepping, so the correct
instruction is seen on all processors.
Thanks to Will Schmidt who tracked this down.
Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Acked-by: Will Schmidt <will_schmidt@vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-08-11 15:31:34 +04:00
flush_icache_range ( ( unsigned long ) p - > ainsn . insn ,
( unsigned long ) p - > ainsn . insn + sizeof ( kprobe_opcode_t ) ) ;
2006-01-10 07:52:43 +03:00
}
2007-04-18 09:57:51 +04:00
p - > ainsn . boostable = 0 ;
2006-01-10 07:52:43 +03:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2005-09-07 02:19:29 +04:00
void __kprobes arch_arm_kprobe ( struct kprobe * p )
2005-04-17 02:20:36 +04:00
{
2005-06-23 11:09:25 +04:00
* p - > addr = BREAKPOINT_INSTRUCTION ;
flush_icache_range ( ( unsigned long ) p - > addr ,
( unsigned long ) p - > addr + sizeof ( kprobe_opcode_t ) ) ;
2005-04-17 02:20:36 +04:00
}
2005-09-07 02:19:29 +04:00
void __kprobes arch_disarm_kprobe ( struct kprobe * p )
2005-04-17 02:20:36 +04:00
{
* p - > addr = p - > opcode ;
2005-06-23 11:09:25 +04:00
flush_icache_range ( ( unsigned long ) p - > addr ,
( unsigned long ) p - > addr + sizeof ( kprobe_opcode_t ) ) ;
}
2006-01-10 07:52:46 +03:00
void __kprobes arch_remove_kprobe ( struct kprobe * p )
2005-06-23 11:09:25 +04:00
{
2006-03-23 14:00:35 +03:00
mutex_lock ( & kprobe_mutex ) ;
2006-12-07 07:38:11 +03:00
free_insn_slot ( p - > ainsn . insn , 0 ) ;
2006-03-23 14:00:35 +03:00
mutex_unlock ( & kprobe_mutex ) ;
2005-04-17 02:20:36 +04:00
}
2006-04-19 09:22:01 +04:00
static void __kprobes prepare_singlestep ( struct kprobe * p , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
regs - > msr | = MSR_SE ;
2005-06-28 02:17:01 +04:00
2006-04-28 16:08:42 +04:00
/*
* On powerpc we should single step on the original
* instruction even if the probed insn is a trap
* variant as values in regs could play a part in
* if the trap is taken or not
*/
regs - > nip = ( unsigned long ) p - > ainsn . insn ;
2005-04-17 02:20:36 +04:00
}
2006-04-19 09:22:01 +04:00
static void __kprobes save_previous_kprobe ( struct kprobe_ctlblk * kcb )
2005-11-07 12:00:10 +03:00
{
kcb - > prev_kprobe . kp = kprobe_running ( ) ;
kcb - > prev_kprobe . status = kcb - > kprobe_status ;
kcb - > prev_kprobe . saved_msr = kcb - > kprobe_saved_msr ;
}
2006-04-19 09:22:01 +04:00
static void __kprobes restore_previous_kprobe ( struct kprobe_ctlblk * kcb )
2005-06-23 11:09:38 +04:00
{
2005-11-07 12:00:10 +03:00
__get_cpu_var ( current_kprobe ) = kcb - > prev_kprobe . kp ;
kcb - > kprobe_status = kcb - > prev_kprobe . status ;
kcb - > kprobe_saved_msr = kcb - > prev_kprobe . saved_msr ;
2005-06-23 11:09:38 +04:00
}
2006-04-19 09:22:01 +04:00
static void __kprobes set_current_kprobe ( struct kprobe * p , struct pt_regs * regs ,
2005-11-07 12:00:10 +03:00
struct kprobe_ctlblk * kcb )
2005-06-23 11:09:38 +04:00
{
2005-11-07 12:00:10 +03:00
__get_cpu_var ( current_kprobe ) = p ;
kcb - > kprobe_saved_msr = regs - > msr ;
2005-06-23 11:09:38 +04:00
}
2005-11-07 12:00:14 +03:00
/* Called with kretprobe_lock held */
2007-05-08 11:34:14 +04:00
void __kprobes arch_prepare_kretprobe ( struct kretprobe_instance * ri ,
2005-09-07 02:19:29 +04:00
struct pt_regs * regs )
2005-06-28 02:17:15 +04:00
{
2007-05-08 11:34:14 +04:00
ri - > ret_addr = ( kprobe_opcode_t * ) regs - > link ;
/* Replace the return addr with trampoline addr */
regs - > link = ( unsigned long ) kretprobe_trampoline ;
2005-06-28 02:17:15 +04:00
}
2006-04-19 09:22:01 +04:00
static int __kprobes kprobe_handler ( struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
struct kprobe * p ;
int ret = 0 ;
unsigned int * addr = ( unsigned int * ) regs - > nip ;
2005-11-07 12:00:14 +03:00
struct kprobe_ctlblk * kcb ;
/*
* We don ' t want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable ( ) ;
kcb = get_kprobe_ctlblk ( ) ;
2005-04-17 02:20:36 +04:00
/* Check we're not actually recursing */
if ( kprobe_running ( ) ) {
p = get_kprobe ( addr ) ;
if ( p ) {
2005-09-07 02:19:35 +04:00
kprobe_opcode_t insn = * p - > ainsn . insn ;
2005-11-07 12:00:10 +03:00
if ( kcb - > kprobe_status = = KPROBE_HIT_SS & &
2005-09-07 02:19:35 +04:00
is_trap ( insn ) ) {
2005-04-17 02:20:36 +04:00
regs - > msr & = ~ MSR_SE ;
2005-11-07 12:00:10 +03:00
regs - > msr | = kcb - > kprobe_saved_msr ;
2005-04-17 02:20:36 +04:00
goto no_kprobe ;
}
2005-06-23 11:09:38 +04:00
/* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler .
* We here save the original kprobes variables and
* just single step on the instruction of the new probe
* without calling any user handlers .
*/
2005-11-07 12:00:10 +03:00
save_previous_kprobe ( kcb ) ;
set_current_kprobe ( p , regs , kcb ) ;
kcb - > kprobe_saved_msr = regs - > msr ;
2005-12-12 11:37:34 +03:00
kprobes_inc_nmissed_count ( p ) ;
2005-06-23 11:09:38 +04:00
prepare_singlestep ( p , regs ) ;
2005-11-07 12:00:10 +03:00
kcb - > kprobe_status = KPROBE_REENTER ;
2005-06-23 11:09:38 +04:00
return 1 ;
2005-04-17 02:20:36 +04:00
} else {
2006-01-11 23:17:42 +03:00
if ( * addr ! = BREAKPOINT_INSTRUCTION ) {
/* If trap variant, then it belongs not to us */
kprobe_opcode_t cur_insn = * addr ;
if ( is_trap ( cur_insn ) )
goto no_kprobe ;
/* The breakpoint instruction was removed by
* another cpu right after we hit , no further
* handling of this interrupt is appropriate
*/
ret = 1 ;
goto no_kprobe ;
}
2005-11-07 12:00:10 +03:00
p = __get_cpu_var ( current_kprobe ) ;
2005-04-17 02:20:36 +04:00
if ( p - > break_handler & & p - > break_handler ( p , regs ) ) {
goto ss_probe ;
}
}
goto no_kprobe ;
}
p = get_kprobe ( addr ) ;
if ( ! p ) {
if ( * addr ! = BREAKPOINT_INSTRUCTION ) {
/*
* PowerPC has multiple variants of the " trap "
* instruction . If the current instruction is a
* trap variant , it could belong to someone else
*/
kprobe_opcode_t cur_insn = * addr ;
2005-09-07 02:19:35 +04:00
if ( is_trap ( cur_insn ) )
2005-04-17 02:20:36 +04:00
goto no_kprobe ;
/*
* The breakpoint instruction was removed right
* after we hit it . Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address . In either case , no further
* handling of this interrupt is appropriate .
*/
ret = 1 ;
}
/* Not one of ours: let kernel handle it */
goto no_kprobe ;
}
2005-11-07 12:00:10 +03:00
kcb - > kprobe_status = KPROBE_HIT_ACTIVE ;
set_current_kprobe ( p , regs , kcb ) ;
2005-04-17 02:20:36 +04:00
if ( p - > pre_handler & & p - > pre_handler ( p , regs ) )
/* handler has already set things up, so skip ss setup */
return 1 ;
ss_probe :
2007-04-18 09:57:51 +04:00
if ( p - > ainsn . boostable > = 0 ) {
unsigned int insn = * p - > ainsn . insn ;
/* regs->nip is also adjusted if emulate_step returns 1 */
ret = emulate_step ( regs , insn ) ;
if ( ret > 0 ) {
/*
* Once this instruction has been boosted
* successfully , set the boostable flag
*/
if ( unlikely ( p - > ainsn . boostable = = 0 ) )
p - > ainsn . boostable = 1 ;
if ( p - > post_handler )
p - > post_handler ( p , regs , 0 ) ;
kcb - > kprobe_status = KPROBE_HIT_SSDONE ;
reset_current_kprobe ( ) ;
preempt_enable_no_resched ( ) ;
return 1 ;
} else if ( ret < 0 ) {
/*
* We don ' t allow kprobes on mtmsr ( d ) / rfi ( d ) , etc .
* So , we should never get here . . . but , its still
* good to catch them , just in case . . .
*/
printk ( " Can't step on instruction %x \n " , insn ) ;
BUG ( ) ;
} else if ( ret = = 0 )
/* This instruction can't be boosted */
p - > ainsn . boostable = - 1 ;
}
2005-04-17 02:20:36 +04:00
prepare_singlestep ( p , regs ) ;
2005-11-07 12:00:10 +03:00
kcb - > kprobe_status = KPROBE_HIT_SS ;
2005-04-17 02:20:36 +04:00
return 1 ;
no_kprobe :
2005-11-07 12:00:14 +03:00
preempt_enable_no_resched ( ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2005-06-28 02:17:15 +04:00
/*
* Function return probe trampoline :
* - init_kprobes ( ) establishes a probepoint here
* - When the probed function returns , this probe
* causes the handlers to fire
*/
void kretprobe_trampoline_holder ( void )
{
asm volatile ( " .global kretprobe_trampoline \n "
" kretprobe_trampoline: \n "
" nop \n " ) ;
}
/*
* Called when the probe at kretprobe trampoline is hit
*/
2005-09-07 02:19:29 +04:00
int __kprobes trampoline_probe_handler ( struct kprobe * p , struct pt_regs * regs )
2005-06-28 02:17:15 +04:00
{
2006-10-02 13:17:33 +04:00
struct kretprobe_instance * ri = NULL ;
2006-10-02 13:17:35 +04:00
struct hlist_head * head , empty_rp ;
2006-10-02 13:17:33 +04:00
struct hlist_node * node , * tmp ;
2005-11-07 12:00:14 +03:00
unsigned long flags , orig_ret_address = 0 ;
2005-06-28 02:17:15 +04:00
unsigned long trampoline_address = ( unsigned long ) & kretprobe_trampoline ;
2006-10-02 13:17:35 +04:00
INIT_HLIST_HEAD ( & empty_rp ) ;
2005-11-07 12:00:14 +03:00
spin_lock_irqsave ( & kretprobe_lock , flags ) ;
2006-10-02 13:17:33 +04:00
head = kretprobe_inst_table_head ( current ) ;
2005-06-28 02:17:15 +04:00
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them , and / or more then one return
* return probe was registered for a target function .
*
* We can handle this because :
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
2006-10-02 13:17:33 +04:00
* function , the first instance ' s ret_addr will point to the
2005-06-28 02:17:15 +04:00
* real return address , and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe ( ri , node , tmp , head , hlist ) {
2006-10-02 13:17:33 +04:00
if ( ri - > task ! = current )
2005-06-28 02:17:15 +04:00
/* another task is sharing our hash bucket */
2006-10-02 13:17:33 +04:00
continue ;
2005-06-28 02:17:15 +04:00
if ( ri - > rp & & ri - > rp - > handler )
ri - > rp - > handler ( ri , regs ) ;
orig_ret_address = ( unsigned long ) ri - > ret_addr ;
2006-10-02 13:17:35 +04:00
recycle_rp_inst ( ri , & empty_rp ) ;
2005-06-28 02:17:15 +04:00
if ( orig_ret_address ! = trampoline_address )
/*
* This is the real return address . Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break ;
}
2007-05-08 11:28:27 +04:00
kretprobe_assert ( ri , orig_ret_address , trampoline_address ) ;
2005-06-28 02:17:15 +04:00
regs - > nip = orig_ret_address ;
2005-11-07 12:00:10 +03:00
reset_current_kprobe ( ) ;
2005-11-07 12:00:14 +03:00
spin_unlock_irqrestore ( & kretprobe_lock , flags ) ;
2005-11-07 12:00:07 +03:00
preempt_enable_no_resched ( ) ;
2005-06-28 02:17:15 +04:00
2006-10-02 13:17:35 +04:00
hlist_for_each_entry_safe ( ri , node , tmp , & empty_rp , hlist ) {
hlist_del ( & ri - > hlist ) ;
kfree ( ri ) ;
}
2006-10-02 13:17:33 +04:00
/*
* By returning a non - zero value , we are telling
* kprobe_handler ( ) that we don ' t want the post_handler
* to run ( and have re - enabled preemption )
*/
return 1 ;
2005-06-28 02:17:15 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* Called after single - stepping . p - > addr is the address of the
* instruction whose first byte has been replaced by the " breakpoint "
* instruction . To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single - step , we
* single - stepped a copy of the instruction . The address of this
* copy is p - > ainsn . insn .
*/
2005-09-07 02:19:29 +04:00
static void __kprobes resume_execution ( struct kprobe * p , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
int ret ;
2005-06-28 02:17:01 +04:00
unsigned int insn = * p - > ainsn . insn ;
2005-04-17 02:20:36 +04:00
regs - > nip = ( unsigned long ) p - > addr ;
2005-06-28 02:17:01 +04:00
ret = emulate_step ( regs , insn ) ;
2005-04-17 02:20:36 +04:00
if ( ret = = 0 )
regs - > nip = ( unsigned long ) p - > addr + 4 ;
}
2006-04-19 09:22:01 +04:00
static int __kprobes post_kprobe_handler ( struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
2005-11-07 12:00:10 +03:00
struct kprobe * cur = kprobe_running ( ) ;
struct kprobe_ctlblk * kcb = get_kprobe_ctlblk ( ) ;
if ( ! cur )
2005-04-17 02:20:36 +04:00
return 0 ;
2005-11-07 12:00:10 +03:00
if ( ( kcb - > kprobe_status ! = KPROBE_REENTER ) & & cur - > post_handler ) {
kcb - > kprobe_status = KPROBE_HIT_SSDONE ;
cur - > post_handler ( cur , regs , 0 ) ;
2005-06-23 11:09:38 +04:00
}
2005-04-17 02:20:36 +04:00
2005-11-07 12:00:10 +03:00
resume_execution ( cur , regs ) ;
regs - > msr | = kcb - > kprobe_saved_msr ;
2005-04-17 02:20:36 +04:00
2005-06-23 11:09:38 +04:00
/*Restore back the original saved kprobes variables and continue. */
2005-11-07 12:00:10 +03:00
if ( kcb - > kprobe_status = = KPROBE_REENTER ) {
restore_previous_kprobe ( kcb ) ;
2005-06-23 11:09:38 +04:00
goto out ;
}
2005-11-07 12:00:10 +03:00
reset_current_kprobe ( ) ;
2005-06-23 11:09:38 +04:00
out :
2005-04-17 02:20:36 +04:00
preempt_enable_no_resched ( ) ;
/*
* if somebody else is singlestepping across a probe point , msr
* will have SE set , in which case , continue the remaining processing
* of do_debug , as if this is not a probe hit .
*/
if ( regs - > msr & MSR_SE )
return 0 ;
return 1 ;
}
2007-04-30 14:56:46 +04:00
int __kprobes kprobe_fault_handler ( struct pt_regs * regs , int trapnr )
2005-04-17 02:20:36 +04:00
{
2005-11-07 12:00:10 +03:00
struct kprobe * cur = kprobe_running ( ) ;
struct kprobe_ctlblk * kcb = get_kprobe_ctlblk ( ) ;
2006-03-26 13:38:24 +04:00
const struct exception_table_entry * entry ;
switch ( kcb - > kprobe_status ) {
case KPROBE_HIT_SS :
case KPROBE_REENTER :
/*
* We are here because the instruction being single
* stepped caused a page fault . We reset the current
* kprobe and the nip points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault .
*/
regs - > nip = ( unsigned long ) cur - > addr ;
2005-06-09 02:50:00 +04:00
regs - > msr & = ~ MSR_SE ;
2005-11-07 12:00:10 +03:00
regs - > msr | = kcb - > kprobe_saved_msr ;
2006-03-26 13:38:24 +04:00
if ( kcb - > kprobe_status = = KPROBE_REENTER )
restore_previous_kprobe ( kcb ) ;
else
reset_current_kprobe ( ) ;
2005-04-17 02:20:36 +04:00
preempt_enable_no_resched ( ) ;
2006-03-26 13:38:24 +04:00
break ;
case KPROBE_HIT_ACTIVE :
case KPROBE_HIT_SSDONE :
/*
* We increment the nmissed count for accounting ,
* we can also use npre / npostfault count for accouting
* these specific fault cases .
*/
kprobes_inc_nmissed_count ( cur ) ;
/*
* We come here because instructions in the pre / post
* handler caused the page_fault , this could happen
* if handler tries to access user space by
* copy_from_user ( ) , get_user ( ) etc . Let the
* user - specified handler try to fix it first .
*/
if ( cur - > fault_handler & & cur - > fault_handler ( cur , regs , trapnr ) )
return 1 ;
/*
* In case the user - specified fault handler returned
* zero , try to fix up .
*/
if ( ( entry = search_exception_tables ( regs - > nip ) ) ! = NULL ) {
regs - > nip = entry - > fixup ;
return 1 ;
}
/*
* fixup_exception ( ) could not handle it ,
* Let do_page_fault ( ) fix it .
*/
break ;
default :
break ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
/*
* Wrapper routine to for handling exceptions .
*/
2005-09-07 02:19:29 +04:00
int __kprobes kprobe_exceptions_notify ( struct notifier_block * self ,
unsigned long val , void * data )
2005-04-17 02:20:36 +04:00
{
struct die_args * args = ( struct die_args * ) data ;
int ret = NOTIFY_DONE ;
2006-03-26 13:38:21 +04:00
if ( args - > regs & & user_mode ( args - > regs ) )
return ret ;
2005-04-17 02:20:36 +04:00
switch ( val ) {
case DIE_BPT :
if ( kprobe_handler ( args - > regs ) )
ret = NOTIFY_STOP ;
break ;
case DIE_SSTEP :
if ( post_kprobe_handler ( args - > regs ) )
ret = NOTIFY_STOP ;
break ;
default :
break ;
}
return ret ;
}
2007-07-19 12:48:11 +04:00
# ifdef CONFIG_PPC64
unsigned long arch_deref_entry_point ( void * entry )
{
return ( unsigned long ) ( ( ( func_descr_t * ) entry ) - > entry ) ;
}
# endif
2005-09-07 02:19:29 +04:00
int __kprobes setjmp_pre_handler ( struct kprobe * p , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
struct jprobe * jp = container_of ( p , struct jprobe , kp ) ;
2005-11-07 12:00:10 +03:00
struct kprobe_ctlblk * kcb = get_kprobe_ctlblk ( ) ;
2005-04-17 02:20:36 +04:00
2005-11-07 12:00:10 +03:00
memcpy ( & kcb - > jprobe_saved_regs , regs , sizeof ( struct pt_regs ) ) ;
2005-04-17 02:20:36 +04:00
/* setup return addr to the jprobe handler routine */
2007-07-19 12:48:11 +04:00
regs - > nip = arch_deref_entry_point ( jp - > entry ) ;
2007-02-07 07:55:19 +03:00
# ifdef CONFIG_PPC64
2005-04-17 02:20:36 +04:00
regs - > gpr [ 2 ] = ( unsigned long ) ( ( ( func_descr_t * ) jp - > entry ) - > toc ) ;
2007-02-07 07:55:19 +03:00
# endif
2005-04-17 02:20:36 +04:00
return 1 ;
}
2005-09-07 02:19:29 +04:00
void __kprobes jprobe_return ( void )
2005-04-17 02:20:36 +04:00
{
asm volatile ( " trap " : : : " memory " ) ;
}
2005-09-07 02:19:29 +04:00
void __kprobes jprobe_return_end ( void )
2005-04-17 02:20:36 +04:00
{
} ;
2005-09-07 02:19:29 +04:00
int __kprobes longjmp_break_handler ( struct kprobe * p , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
2005-11-07 12:00:10 +03:00
struct kprobe_ctlblk * kcb = get_kprobe_ctlblk ( ) ;
2005-04-17 02:20:36 +04:00
/*
* FIXME - we should ideally be validating that we got here ' cos
* of the " trap " in jprobe_return ( ) above , before restoring the
* saved regs . . .
*/
2005-11-07 12:00:10 +03:00
memcpy ( regs , & kcb - > jprobe_saved_regs , sizeof ( struct pt_regs ) ) ;
2005-11-07 12:00:14 +03:00
preempt_enable_no_resched ( ) ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
2005-06-28 02:17:15 +04:00
static struct kprobe trampoline_p = {
. addr = ( kprobe_opcode_t * ) & kretprobe_trampoline ,
. pre_handler = trampoline_probe_handler
} ;
2005-07-06 05:54:50 +04:00
int __init arch_init_kprobes ( void )
2005-06-28 02:17:15 +04:00
{
return register_kprobe ( & trampoline_p ) ;
}
2007-05-08 11:34:16 +04:00
int __kprobes arch_trampoline_kprobe ( struct kprobe * p )
{
if ( p - > addr = = ( kprobe_opcode_t * ) & kretprobe_trampoline )
return 1 ;
return 0 ;
}