2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2016-11-02 12:10:46 +03:00
/*
* Copyright ( C ) 2014 - 2016 Pratyush Anand < panand @ redhat . com >
*/
# include <linux/highmem.h>
# include <linux/ptrace.h>
# include <linux/uprobes.h>
# include <asm/cacheflush.h>
# include "decode-insn.h"
# define UPROBE_INV_FAULT_CODE UINT_MAX
void arch_uprobe_copy_ixol ( struct page * page , unsigned long vaddr ,
void * src , unsigned long len )
{
void * xol_page_kaddr = kmap_atomic ( page ) ;
void * dst = xol_page_kaddr + ( vaddr & ~ PAGE_MASK ) ;
/* Initialize the slot */
memcpy ( dst , src , len ) ;
/* flush caches (dcache/icache) */
2021-05-24 11:29:59 +03:00
sync_icache_aliases ( ( unsigned long ) dst , ( unsigned long ) dst + len ) ;
2016-11-02 12:10:46 +03:00
kunmap_atomic ( xol_page_kaddr ) ;
}
unsigned long uprobe_get_swbp_addr ( struct pt_regs * regs )
{
return instruction_pointer ( regs ) ;
}
int arch_uprobe_analyze_insn ( struct arch_uprobe * auprobe , struct mm_struct * mm ,
unsigned long addr )
{
probe_opcode_t insn ;
/* TODO: Currently we do not support AARCH32 instruction probing */
2017-08-20 13:20:47 +03:00
if ( mm - > context . flags & MMCF_AARCH32 )
2021-02-23 11:25:34 +03:00
return - EOPNOTSUPP ;
2016-11-02 12:10:46 +03:00
else if ( ! IS_ALIGNED ( addr , AARCH64_INSN_SIZE ) )
return - EINVAL ;
insn = * ( probe_opcode_t * ) ( & auprobe - > insn [ 0 ] ) ;
switch ( arm_probe_decode_insn ( insn , & auprobe - > api ) ) {
case INSN_REJECTED :
return - EINVAL ;
case INSN_GOOD_NO_SLOT :
auprobe - > simulate = true ;
break ;
default :
break ;
}
return 0 ;
}
int arch_uprobe_pre_xol ( struct arch_uprobe * auprobe , struct pt_regs * regs )
{
struct uprobe_task * utask = current - > utask ;
/* Initialize with an invalid fault code to detect if ol insn trapped */
current - > thread . fault_code = UPROBE_INV_FAULT_CODE ;
/* Instruction points to execute ol */
instruction_pointer_set ( regs , utask - > xol_vaddr ) ;
user_enable_single_step ( current ) ;
return 0 ;
}
int arch_uprobe_post_xol ( struct arch_uprobe * auprobe , struct pt_regs * regs )
{
struct uprobe_task * utask = current - > utask ;
WARN_ON_ONCE ( current - > thread . fault_code ! = UPROBE_INV_FAULT_CODE ) ;
/* Instruction points to execute next to breakpoint address */
instruction_pointer_set ( regs , utask - > vaddr + 4 ) ;
user_disable_single_step ( current ) ;
return 0 ;
}
bool arch_uprobe_xol_was_trapped ( struct task_struct * t )
{
/*
* Between arch_uprobe_pre_xol and arch_uprobe_post_xol , if an xol
* insn itself is trapped , then detect the case with the help of
* invalid fault code which is being set in arch_uprobe_pre_xol
*/
if ( t - > thread . fault_code ! = UPROBE_INV_FAULT_CODE )
return true ;
return false ;
}
bool arch_uprobe_skip_sstep ( struct arch_uprobe * auprobe , struct pt_regs * regs )
{
probe_opcode_t insn ;
unsigned long addr ;
if ( ! auprobe - > simulate )
return false ;
insn = * ( probe_opcode_t * ) ( & auprobe - > insn [ 0 ] ) ;
addr = instruction_pointer ( regs ) ;
if ( auprobe - > api . handler )
auprobe - > api . handler ( insn , addr , regs ) ;
return true ;
}
void arch_uprobe_abort_xol ( struct arch_uprobe * auprobe , struct pt_regs * regs )
{
struct uprobe_task * utask = current - > utask ;
/*
* Task has received a fatal signal , so reset back to probbed
* address .
*/
instruction_pointer_set ( regs , utask - > vaddr ) ;
user_disable_single_step ( current ) ;
}
bool arch_uretprobe_is_alive ( struct return_instance * ret , enum rp_check ctx ,
struct pt_regs * regs )
{
/*
* If a simple branch instruction ( B ) was called for retprobed
* assembly label then return true even when regs - > sp and ret - > stack
* are same . It will ensure that cleanup and reporting of return
* instances corresponding to callee label is done when
* handle_trampoline for called function is executed .
*/
if ( ctx = = RP_CHECK_CHAIN_CALL )
return regs - > sp < = ret - > stack ;
else
return regs - > sp < ret - > stack ;
}
unsigned long
arch_uretprobe_hijack_return_addr ( unsigned long trampoline_vaddr ,
struct pt_regs * regs )
{
unsigned long orig_ret_vaddr ;
orig_ret_vaddr = procedure_link_pointer ( regs ) ;
/* Replace the return addr with trampoline addr */
procedure_link_pointer_set ( regs , trampoline_vaddr ) ;
return orig_ret_vaddr ;
}
int arch_uprobe_exception_notify ( struct notifier_block * self ,
unsigned long val , void * data )
{
return NOTIFY_DONE ;
}
static int uprobe_breakpoint_handler ( struct pt_regs * regs ,
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 14:44:42 +03:00
unsigned long esr )
2016-11-02 12:10:46 +03:00
{
2019-02-26 18:37:09 +03:00
if ( uprobe_pre_sstep_notifier ( regs ) )
2016-11-02 12:10:46 +03:00
return DBG_HOOK_HANDLED ;
return DBG_HOOK_ERROR ;
}
static int uprobe_single_step_handler ( struct pt_regs * regs ,
arm64: Treat ESR_ELx as a 64-bit register
In the initial release of the ARM Architecture Reference Manual for
ARMv8-A, the ESR_ELx registers were defined as 32-bit registers. This
changed in 2018 with version D.a (ARM DDI 0487D.a) of the architecture,
when they became 64-bit registers, with bits [63:32] defined as RES0. In
version G.a, a new field was added to ESR_ELx, ISS2, which covers bits
[36:32]. This field is used when the Armv8.7 extension FEAT_LS64 is
implemented.
As a result of the evolution of the register width, Linux stores it as
both a 64-bit value and a 32-bit value, which hasn't affected correctness
so far as Linux only uses the lower 32 bits of the register.
Make the register type consistent and always treat it as 64-bit wide. The
register is redefined as an "unsigned long", which is an unsigned
double-word (64-bit quantity) for the LP64 machine (aapcs64 [1], Table 1,
page 14). The type was chosen because "unsigned int" is the most frequent
type for ESR_ELx and because FAR_ELx, which is used together with ESR_ELx
in exception handling, is also declared as "unsigned long". The 64-bit type
also makes adding support for architectural features that use fields above
bit 31 easier in the future.
The KVM hypervisor will receive a similar update in a subsequent patch.
[1] https://github.com/ARM-software/abi-aa/releases/download/2021Q3/aapcs64.pdf
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220425114444.368693-4-alexandru.elisei@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-04-25 14:44:42 +03:00
unsigned long esr )
2016-11-02 12:10:46 +03:00
{
struct uprobe_task * utask = current - > utask ;
2019-02-26 18:37:09 +03:00
WARN_ON ( utask & & ( instruction_pointer ( regs ) ! = utask - > xol_vaddr + 4 ) ) ;
if ( uprobe_post_sstep_notifier ( regs ) )
return DBG_HOOK_HANDLED ;
2016-11-02 12:10:46 +03:00
return DBG_HOOK_ERROR ;
}
/* uprobe breakpoint handler hook */
static struct break_hook uprobes_break_hook = {
2019-02-26 18:06:42 +03:00
. imm = UPROBES_BRK_IMM ,
2016-11-02 12:10:46 +03:00
. fn = uprobe_breakpoint_handler ,
} ;
/* uprobe single step handler hook */
static struct step_hook uprobes_step_hook = {
. fn = uprobe_single_step_handler ,
} ;
static int __init arch_init_uprobes ( void )
{
2019-02-26 15:52:47 +03:00
register_user_break_hook ( & uprobes_break_hook ) ;
register_user_step_hook ( & uprobes_step_hook ) ;
2016-11-02 12:10:46 +03:00
return 0 ;
}
device_initcall ( arch_init_uprobes ) ;