2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2017-02-08 12:50:51 +03:00
/*
* Code for Kernel probes Jump optimization .
*
* Copyright 2017 , Anju T , IBM Corp .
*/
# include <linux/kprobes.h>
# include <linux/jump_label.h>
# include <linux/types.h>
# include <linux/slab.h>
# include <linux/list.h>
# include <asm/kprobes.h>
# include <asm/ptrace.h>
# include <asm/cacheflush.h>
# include <asm/code-patching.h>
# include <asm/sstep.h>
# include <asm/ppc-opcode.h>
2020-05-06 06:40:26 +03:00
# include <asm/inst.h>
2017-02-08 12:50:51 +03:00
2021-05-20 16:50:48 +03:00
# define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
# define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
# define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
# define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
# define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
# define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
2017-02-08 12:50:51 +03:00
static bool insn_page_in_use ;
2021-05-13 12:07:53 +03:00
void * alloc_optinsn_page ( void )
2017-02-08 12:50:51 +03:00
{
if ( insn_page_in_use )
return NULL ;
insn_page_in_use = true ;
return & optinsn_slot ;
}
2021-05-13 12:07:53 +03:00
void free_optinsn_page ( void * page )
2017-02-08 12:50:51 +03:00
{
insn_page_in_use = false ;
}
/*
* Check if we can optimize this probe . Returns NIP post - emulation if this can
* be optimized and 0 otherwise .
*/
static unsigned long can_optimize ( struct kprobe * p )
{
struct pt_regs regs ;
struct instruction_op op ;
unsigned long nip = 0 ;
2021-05-20 16:50:47 +03:00
unsigned long addr = ( unsigned long ) p - > addr ;
2017-02-08 12:50:51 +03:00
/*
* kprobe placed for kretprobe during boot time
2017-02-08 12:50:52 +03:00
* has a ' nop ' instruction , which can be emulated .
* So further checks can be skipped .
2017-02-08 12:50:51 +03:00
*/
2021-09-14 17:40:54 +03:00
if ( p - > addr = = ( kprobe_opcode_t * ) & __kretprobe_trampoline )
2021-05-20 16:50:47 +03:00
return addr + sizeof ( kprobe_opcode_t ) ;
2017-02-08 12:50:51 +03:00
/*
* We only support optimizing kernel addresses , but not
* module addresses .
*
* FIXME : Optimize kprobes placed in module addresses .
*/
2021-05-20 16:50:47 +03:00
if ( ! is_kernel_addr ( addr ) )
2017-02-08 12:50:51 +03:00
return 0 ;
memset ( & regs , 0 , sizeof ( struct pt_regs ) ) ;
2021-05-20 16:50:47 +03:00
regs . nip = addr ;
2017-02-08 12:50:51 +03:00
regs . trap = 0x0 ;
regs . msr = MSR_KERNEL ;
/*
* Kprobe placed in conditional branch instructions are
* not optimized , as we can ' t predict the nip prior with
* dummy pt_regs and can not ensure that the return branch
* from detour buffer falls in the range of address ( i . e 32 MB ) .
* A branch back from trampoline is set up in the detour buffer
* to the nip returned by the analyse_instr ( ) here .
*
* Ensure that the instruction is not a conditional branch ,
* and that can be emulated .
*/
2021-05-20 16:50:45 +03:00
if ( ! is_conditional_branch ( ppc_inst_read ( p - > ainsn . insn ) ) & &
analyse_instr ( & op , & regs , ppc_inst_read ( p - > ainsn . insn ) ) = = 1 ) {
2017-09-15 13:08:21 +03:00
emulate_update_regs ( & regs , & op ) ;
2017-02-08 12:50:51 +03:00
nip = regs . nip ;
2017-09-15 13:08:21 +03:00
}
2017-02-08 12:50:51 +03:00
return nip ;
}
static void optimized_callback ( struct optimized_kprobe * op ,
struct pt_regs * regs )
{
/* This is possible if op is under delayed unoptimizing */
if ( kprobe_disabled ( & op - > kp ) )
return ;
2017-10-23 19:37:38 +03:00
preempt_disable ( ) ;
2017-02-08 12:50:51 +03:00
if ( kprobe_running ( ) ) {
kprobes_inc_nmissed_count ( & op - > kp ) ;
} else {
__this_cpu_write ( current_kprobe , & op - > kp ) ;
2021-06-17 18:51:03 +03:00
regs_set_return_ip ( regs , ( unsigned long ) op - > kp . addr ) ;
2017-10-23 19:37:38 +03:00
get_kprobe_ctlblk ( ) - > kprobe_status = KPROBE_HIT_ACTIVE ;
2017-02-08 12:50:51 +03:00
opt_pre_handler ( & op - > kp , regs ) ;
__this_cpu_write ( current_kprobe , NULL ) ;
}
2022-10-20 20:28:58 +03:00
preempt_enable ( ) ;
2017-02-08 12:50:51 +03:00
}
NOKPROBE_SYMBOL ( optimized_callback ) ;
void arch_remove_optimized_kprobe ( struct optimized_kprobe * op )
{
if ( op - > optinsn . insn ) {
2021-05-13 12:07:53 +03:00
free_optinsn_slot ( op - > optinsn . insn , 1 ) ;
2017-02-08 12:50:51 +03:00
op - > optinsn . insn = NULL ;
}
}
2021-04-20 17:02:07 +03:00
static void patch_imm32_load_insns ( unsigned long val , int reg , kprobe_opcode_t * addr )
{
2021-05-20 16:50:49 +03:00
patch_instruction ( addr + + , ppc_inst ( PPC_RAW_LIS ( reg , PPC_HI ( val ) ) ) ) ;
patch_instruction ( addr , ppc_inst ( PPC_RAW_ORI ( reg , reg , PPC_LO ( val ) ) ) ) ;
2021-04-20 17:02:07 +03:00
}
2017-02-08 12:50:51 +03:00
/*
* Generate instructions to load provided immediate 64 - bit value
2020-05-15 04:15:28 +03:00
* to register ' reg ' and patch these instructions at ' addr ' .
2017-02-08 12:50:51 +03:00
*/
2021-04-20 17:02:07 +03:00
static void patch_imm64_load_insns ( unsigned long long val , int reg , kprobe_opcode_t * addr )
2017-02-08 12:50:51 +03:00
{
2021-05-20 16:50:49 +03:00
patch_instruction ( addr + + , ppc_inst ( PPC_RAW_LIS ( reg , PPC_HIGHEST ( val ) ) ) ) ;
patch_instruction ( addr + + , ppc_inst ( PPC_RAW_ORI ( reg , reg , PPC_HIGHER ( val ) ) ) ) ;
patch_instruction ( addr + + , ppc_inst ( PPC_RAW_SLDI ( reg , reg , 32 ) ) ) ;
patch_instruction ( addr + + , ppc_inst ( PPC_RAW_ORIS ( reg , reg , PPC_HI ( val ) ) ) ) ;
patch_instruction ( addr , ppc_inst ( PPC_RAW_ORI ( reg , reg , PPC_LO ( val ) ) ) ) ;
2017-02-08 12:50:51 +03:00
}
2021-04-20 17:02:07 +03:00
static void patch_imm_load_insns ( unsigned long val , int reg , kprobe_opcode_t * addr )
{
if ( IS_ENABLED ( CONFIG_PPC64 ) )
patch_imm64_load_insns ( val , reg , addr ) ;
else
patch_imm32_load_insns ( val , reg , addr ) ;
}
2017-02-08 12:50:51 +03:00
int arch_prepare_optimized_kprobe ( struct optimized_kprobe * op , struct kprobe * p )
{
2021-11-29 20:49:38 +03:00
ppc_inst_t branch_op_callback , branch_emulate_step , temp ;
2021-05-20 16:50:47 +03:00
unsigned long op_callback_addr , emulate_step_addr ;
kprobe_opcode_t * buff ;
2017-02-08 12:50:51 +03:00
long b_offset ;
2017-06-06 07:29:39 +03:00
unsigned long nip , size ;
int rc , i ;
2017-02-08 12:50:51 +03:00
nip = can_optimize ( p ) ;
if ( ! nip )
return - EILSEQ ;
/* Allocate instruction slot for detour buffer */
2021-05-13 12:07:53 +03:00
buff = get_optinsn_slot ( ) ;
2017-02-08 12:50:51 +03:00
if ( ! buff )
return - ENOMEM ;
/*
* OPTPROBE uses ' b ' instruction to branch to optinsn . insn .
*
* The target address has to be relatively nearby , to permit use
* of branch instruction in powerpc , because the address is specified
* in an immediate field in the instruction opcode itself , ie 24 bits
* in the opcode specify the address . Therefore the address should
* be within 32 MB on either side of the current instruction .
*/
b_offset = ( unsigned long ) buff - ( unsigned long ) p - > addr ;
if ( ! is_offset_in_branch_range ( b_offset ) )
goto error ;
/* Check if the return address is also within 32MB range */
2021-05-20 16:50:47 +03:00
b_offset = ( unsigned long ) ( buff + TMPL_RET_IDX ) - nip ;
2017-02-08 12:50:51 +03:00
if ( ! is_offset_in_branch_range ( b_offset ) )
goto error ;
/* Setup template */
2017-06-06 07:29:39 +03:00
/* We can optimize this via patch_instruction_window later */
size = ( TMPL_END_IDX * sizeof ( kprobe_opcode_t ) ) / sizeof ( int ) ;
pr_devel ( " Copying template to %p, size %lu \n " , buff , size ) ;
for ( i = 0 ; i < size ; i + + ) {
2021-05-20 16:50:45 +03:00
rc = patch_instruction ( buff + i , ppc_inst ( * ( optprobe_template_entry + i ) ) ) ;
2017-06-06 07:29:39 +03:00
if ( rc < 0 )
goto error ;
}
2017-02-08 12:50:51 +03:00
/*
* Fixup the template with instructions to :
* 1. load the address of the actual probepoint
*/
2021-04-20 17:02:07 +03:00
patch_imm_load_insns ( ( unsigned long ) op , 3 , buff + TMPL_OP_IDX ) ;
2017-02-08 12:50:51 +03:00
/*
* 2. branch to optimized_callback ( ) and emulate_step ( )
*/
2021-05-20 16:50:47 +03:00
op_callback_addr = ppc_kallsyms_lookup_name ( " optimized_callback " ) ;
emulate_step_addr = ppc_kallsyms_lookup_name ( " emulate_step " ) ;
2017-02-08 12:50:51 +03:00
if ( ! op_callback_addr | | ! emulate_step_addr ) {
2017-04-19 15:52:27 +03:00
WARN ( 1 , " Unable to lookup optimized_callback()/emulate_step() \n " ) ;
2017-02-08 12:50:51 +03:00
goto error ;
}
2021-05-20 16:50:45 +03:00
rc = create_branch ( & branch_op_callback , buff + TMPL_CALL_HDLR_IDX ,
2021-05-20 16:50:47 +03:00
op_callback_addr , BRANCH_SET_LINK ) ;
2017-02-08 12:50:51 +03:00
2021-05-20 16:50:45 +03:00
rc | = create_branch ( & branch_emulate_step , buff + TMPL_EMULATE_IDX ,
2021-05-20 16:50:47 +03:00
emulate_step_addr , BRANCH_SET_LINK ) ;
2017-02-08 12:50:51 +03:00
2020-05-06 06:40:25 +03:00
if ( rc )
2017-02-08 12:50:51 +03:00
goto error ;
2021-05-20 16:50:45 +03:00
patch_instruction ( buff + TMPL_CALL_HDLR_IDX , branch_op_callback ) ;
patch_instruction ( buff + TMPL_EMULATE_IDX , branch_emulate_step ) ;
2017-02-08 12:50:51 +03:00
/*
* 3. load instruction to be emulated into relevant register , and
*/
2021-11-29 20:49:39 +03:00
temp = ppc_inst_read ( p - > ainsn . insn ) ;
patch_imm_load_insns ( ppc_inst_as_ulong ( temp ) , 4 , buff + TMPL_INSN_IDX ) ;
2017-02-08 12:50:51 +03:00
/*
* 4. branch back from trampoline
*/
2021-05-20 16:50:45 +03:00
patch_branch ( buff + TMPL_RET_IDX , nip , 0 ) ;
2017-02-08 12:50:51 +03:00
2021-05-20 16:50:48 +03:00
flush_icache_range ( ( unsigned long ) buff , ( unsigned long ) ( & buff [ TMPL_END_IDX ] ) ) ;
2017-02-08 12:50:51 +03:00
op - > optinsn . insn = buff ;
return 0 ;
error :
2021-05-13 12:07:53 +03:00
free_optinsn_slot ( buff , 0 ) ;
2017-02-08 12:50:51 +03:00
return - ERANGE ;
}
int arch_prepared_optinsn ( struct arch_optimized_insn * optinsn )
{
return optinsn - > insn ! = NULL ;
}
/*
* On powerpc , Optprobes always replaces one instruction ( 4 bytes
* aligned and 4 bytes long ) . It is impossible to encounter another
* kprobe in this address range . So always return 0.
*/
int arch_check_optimized_kprobe ( struct optimized_kprobe * op )
{
return 0 ;
}
void arch_optimize_kprobes ( struct list_head * oplist )
{
2021-11-29 20:49:38 +03:00
ppc_inst_t instr ;
2017-02-08 12:50:51 +03:00
struct optimized_kprobe * op ;
struct optimized_kprobe * tmp ;
list_for_each_entry_safe ( op , tmp , oplist , list ) {
/*
* Backup instructions which will be replaced
* by jump address
*/
2021-05-20 16:50:48 +03:00
memcpy ( op - > optinsn . copied_insn , op - > kp . addr , RELATIVEJUMP_SIZE ) ;
create_branch ( & instr , op - > kp . addr , ( unsigned long ) op - > optinsn . insn , 0 ) ;
2021-05-20 16:50:45 +03:00
patch_instruction ( op - > kp . addr , instr ) ;
2017-02-08 12:50:51 +03:00
list_del_init ( & op - > list ) ;
}
}
void arch_unoptimize_kprobe ( struct optimized_kprobe * op )
{
arch_arm_kprobe ( & op - > kp ) ;
}
2021-05-20 16:50:48 +03:00
void arch_unoptimize_kprobes ( struct list_head * oplist , struct list_head * done_list )
2017-02-08 12:50:51 +03:00
{
struct optimized_kprobe * op ;
struct optimized_kprobe * tmp ;
list_for_each_entry_safe ( op , tmp , oplist , list ) {
arch_unoptimize_kprobe ( op ) ;
list_move ( & op - > list , done_list ) ;
}
}
2021-09-14 17:40:07 +03:00
int arch_within_optimized_kprobe ( struct optimized_kprobe * op , kprobe_opcode_t * addr )
2017-02-08 12:50:51 +03:00
{
2021-09-14 17:40:07 +03:00
return ( op - > kp . addr < = addr & &
op - > kp . addr + ( RELATIVEJUMP_SIZE / sizeof ( kprobe_opcode_t ) ) > addr ) ;
2017-02-08 12:50:51 +03:00
}