2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2012-03-05 11:49:27 +00:00
/*
* Stack tracing support
*
* Copyright ( C ) 2012 ARM Ltd .
*/
# include <linux/kernel.h>
2022-12-09 12:10:13 +01:00
# include <linux/efi.h>
2012-03-05 11:49:27 +00:00
# include <linux/export.h>
2024-02-01 12:52:24 +00:00
# include <linux/filter.h>
2015-12-15 17:33:41 +09:00
# include <linux/ftrace.h>
2023-11-24 11:05:10 +00:00
# include <linux/kprobes.h>
2012-03-05 11:49:27 +00:00
# include <linux/sched.h>
2017-02-08 18:51:35 +01:00
# include <linux/sched/debug.h>
2017-02-08 18:51:37 +01:00
# include <linux/sched/task_stack.h>
2012-03-05 11:49:27 +00:00
# include <linux/stacktrace.h>
2022-12-09 12:10:13 +01:00
# include <asm/efi.h>
2015-12-04 11:02:26 +00:00
# include <asm/irq.h>
2016-11-03 20:23:05 +00:00
# include <asm/stack_pointer.h>
2012-03-05 11:49:27 +00:00
# include <asm/stacktrace.h>
2023-11-24 11:05:10 +00:00
/*
* Kernel unwind state
*
* @ common : Common unwind state .
* @ task : The task being unwound .
* @ kr_cur : When KRETPROBES is selected , holds the kretprobe instance
* associated with the most recently encountered replacement lr
* value .
*/
struct kunwind_state {
struct unwind_state common ;
struct task_struct * task ;
# ifdef CONFIG_KRETPROBES
struct llist_node * kr_cur ;
# endif
} ;
static __always_inline void
kunwind_init ( struct kunwind_state * state ,
struct task_struct * task )
{
unwind_init_common ( & state - > common ) ;
state - > task = task ;
}
2022-06-17 13:02:14 -05:00
/*
* Start an unwind from a pt_regs .
*
* The unwind will begin at the PC within the regs .
*
* The regs must be on a stack currently owned by the calling task .
*/
2023-04-11 17:29:43 +01:00
static __always_inline void
2023-11-24 11:05:10 +00:00
kunwind_init_from_regs ( struct kunwind_state * state ,
struct pt_regs * regs )
2022-06-17 13:02:14 -05:00
{
2023-11-24 11:05:10 +00:00
kunwind_init ( state , current ) ;
2022-06-17 13:02:14 -05:00
2023-11-24 11:05:10 +00:00
state - > common . fp = regs - > regs [ 29 ] ;
state - > common . pc = regs - > pc ;
2022-06-17 13:02:14 -05:00
}
/*
* Start an unwind from a caller .
*
* The unwind will begin at the caller of whichever function this is inlined
* into .
*
* The function which invokes this must be noinline .
*/
2023-04-11 17:29:43 +01:00
static __always_inline void
2023-11-24 11:05:10 +00:00
kunwind_init_from_caller ( struct kunwind_state * state )
2022-06-17 13:02:14 -05:00
{
2023-11-24 11:05:10 +00:00
kunwind_init ( state , current ) ;
2022-06-17 13:02:14 -05:00
2023-11-24 11:05:10 +00:00
state - > common . fp = ( unsigned long ) __builtin_frame_address ( 1 ) ;
state - > common . pc = ( unsigned long ) __builtin_return_address ( 0 ) ;
2022-06-17 13:02:14 -05:00
}
/*
* Start an unwind from a blocked task .
*
* The unwind will begin at the blocked tasks saved PC ( i . e . the caller of
* cpu_switch_to ( ) ) .
*
* The caller should ensure the task is blocked in cpu_switch_to ( ) for the
* duration of the unwind , or the unwind will be bogus . It is never valid to
* call this for the current task .
*/
2023-04-11 17:29:43 +01:00
static __always_inline void
2023-11-24 11:05:10 +00:00
kunwind_init_from_task ( struct kunwind_state * state ,
struct task_struct * task )
2022-06-17 13:02:14 -05:00
{
2023-11-24 11:05:10 +00:00
kunwind_init ( state , task ) ;
2022-06-17 13:02:14 -05:00
2023-11-24 11:05:10 +00:00
state - > common . fp = thread_saved_fp ( task ) ;
state - > common . pc = thread_saved_pc ( task ) ;
2022-06-17 13:02:14 -05:00
}
2021-03-19 17:40:22 +00:00
2023-04-11 17:29:41 +01:00
static __always_inline int
2023-11-24 11:05:10 +00:00
kunwind_recover_return_address ( struct kunwind_state * state )
2023-04-11 17:29:41 +01:00
{
# ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ( state - > task - > ret_stack & &
2023-11-24 11:05:10 +00:00
( state - > common . pc = = ( unsigned long ) return_to_handler ) ) {
2023-04-11 17:29:41 +01:00
unsigned long orig_pc ;
2023-11-24 11:05:10 +00:00
orig_pc = ftrace_graph_ret_addr ( state - > task , NULL ,
state - > common . pc ,
( void * ) state - > common . fp ) ;
if ( WARN_ON_ONCE ( state - > common . pc = = orig_pc ) )
2023-04-11 17:29:41 +01:00
return - EINVAL ;
2023-11-24 11:05:10 +00:00
state - > common . pc = orig_pc ;
2023-04-11 17:29:41 +01:00
}
# endif /* CONFIG_FUNCTION_GRAPH_TRACER */
# ifdef CONFIG_KRETPROBES
2023-11-24 11:05:10 +00:00
if ( is_kretprobe_trampoline ( state - > common . pc ) ) {
unsigned long orig_pc ;
orig_pc = kretprobe_find_ret_addr ( state - > task ,
( void * ) state - > common . fp ,
& state - > kr_cur ) ;
state - > common . pc = orig_pc ;
2023-04-11 17:29:41 +01:00
}
# endif /* CONFIG_KRETPROBES */
return 0 ;
}
2022-07-27 15:29:03 +01:00
/*
* Unwind from one frame record ( A ) to the next frame record ( B ) .
*
* We terminate early if the location of B indicates a malformed chain of frame
* records ( e . g . a cycle ) , determined based on the location and fp value of A
* and the location ( but not the fp value ) of B .
*/
2023-04-11 17:29:43 +01:00
static __always_inline int
2023-11-24 11:05:10 +00:00
kunwind_next ( struct kunwind_state * state )
2022-07-27 15:29:03 +01:00
{
struct task_struct * tsk = state - > task ;
2023-11-24 11:05:10 +00:00
unsigned long fp = state - > common . fp ;
2022-07-27 15:29:03 +01:00
int err ;
/* Final frame; nothing to unwind */
if ( fp = = ( unsigned long ) task_pt_regs ( tsk ) - > stackframe )
return - ENOENT ;
2023-11-24 11:05:10 +00:00
err = unwind_next_frame_record ( & state - > common ) ;
2022-07-27 15:29:03 +01:00
if ( err )
return err ;
2023-11-24 11:05:10 +00:00
state - > common . pc = ptrauth_strip_kernel_insn_pac ( state - > common . pc ) ;
2022-07-27 15:29:03 +01:00
2023-11-24 11:05:10 +00:00
return kunwind_recover_return_address ( state ) ;
2022-07-27 15:29:03 +01:00
}
2023-11-24 11:05:11 +00:00
typedef bool ( * kunwind_consume_fn ) ( const struct kunwind_state * state , void * cookie ) ;
2023-04-11 17:29:43 +01:00
static __always_inline void
2023-11-24 11:05:11 +00:00
do_kunwind ( struct kunwind_state * state , kunwind_consume_fn consume_state ,
2023-11-24 11:05:10 +00:00
void * cookie )
2022-07-27 15:29:03 +01:00
{
2023-11-24 11:05:10 +00:00
if ( kunwind_recover_return_address ( state ) )
2023-04-11 17:29:41 +01:00
return ;
2022-07-27 15:29:03 +01:00
while ( 1 ) {
int ret ;
2023-11-24 11:05:11 +00:00
if ( ! consume_state ( state , cookie ) )
2022-07-27 15:29:03 +01:00
break ;
2023-11-24 11:05:10 +00:00
ret = kunwind_next ( state ) ;
2022-07-27 15:29:03 +01:00
if ( ret < 0 )
break ;
}
}
2022-09-01 14:06:45 +01:00
/*
* Per - cpu stacks are only accessible when unwinding the current task in a
* non - preemptible context .
*/
# define STACKINFO_CPU(name) \
( { \
( ( task = = current ) & & ! preemptible ( ) ) \
? stackinfo_get_ # # name ( ) \
: stackinfo_get_unknown ( ) ; \
} )
/*
* SDEI stacks are only accessible when unwinding the current task in an NMI
* context .
*/
# define STACKINFO_SDEI(name) \
( { \
( ( task = = current ) & & in_nmi ( ) ) \
? stackinfo_get_sdei_ # # name ( ) \
: stackinfo_get_unknown ( ) ; \
} )
2022-12-09 12:10:13 +01:00
# define STACKINFO_EFI \
( { \
( ( task = = current ) & & current_in_efi ( ) ) \
? stackinfo_get_efi ( ) \
: stackinfo_get_unknown ( ) ; \
} )
2023-11-24 11:05:11 +00:00
static __always_inline void
kunwind_stack_walk ( kunwind_consume_fn consume_state ,
void * cookie , struct task_struct * task ,
struct pt_regs * regs )
2012-03-05 11:49:27 +00:00
{
2022-09-01 14:06:45 +01:00
struct stack_info stacks [ ] = {
stackinfo_get_task ( task ) ,
STACKINFO_CPU ( irq ) ,
# if defined(CONFIG_VMAP_STACK)
STACKINFO_CPU ( overflow ) ,
# endif
# if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
STACKINFO_SDEI ( normal ) ,
STACKINFO_SDEI ( critical ) ,
2022-12-09 12:10:13 +01:00
# endif
# ifdef CONFIG_EFI
STACKINFO_EFI ,
2022-09-01 14:06:45 +01:00
# endif
} ;
2023-11-24 11:05:10 +00:00
struct kunwind_state state = {
. common = {
. stacks = stacks ,
. nr_stacks = ARRAY_SIZE ( stacks ) ,
} ,
2022-09-01 14:06:45 +01:00
} ;
2012-03-05 11:49:27 +00:00
2022-06-17 13:02:15 -05:00
if ( regs ) {
if ( task ! = current )
return ;
2023-11-24 11:05:10 +00:00
kunwind_init_from_regs ( & state , regs ) ;
2022-06-17 13:02:15 -05:00
} else if ( task = = current ) {
2023-11-24 11:05:10 +00:00
kunwind_init_from_caller ( & state ) ;
2022-06-17 13:02:15 -05:00
} else {
2023-11-24 11:05:10 +00:00
kunwind_init_from_task ( & state , task ) ;
2022-06-17 13:02:15 -05:00
}
2016-11-03 20:23:08 +00:00
2023-11-24 11:05:11 +00:00
do_kunwind ( & state , consume_state , cookie ) ;
}
struct kunwind_consume_entry_data {
stack_trace_consume_fn consume_entry ;
void * cookie ;
} ;
2024-02-29 23:16:20 +00:00
static __always_inline bool
2023-11-24 11:05:11 +00:00
arch_kunwind_consume_entry ( const struct kunwind_state * state , void * cookie )
{
struct kunwind_consume_entry_data * data = cookie ;
return data - > consume_entry ( data - > cookie , state - > common . pc ) ;
}
noinline noinstr void arch_stack_walk ( stack_trace_consume_fn consume_entry ,
void * cookie , struct task_struct * task ,
struct pt_regs * regs )
{
struct kunwind_consume_entry_data data = {
. consume_entry = consume_entry ,
. cookie = cookie ,
} ;
kunwind_stack_walk ( arch_kunwind_consume_entry , & data , task , regs ) ;
2012-03-05 11:49:27 +00:00
}
2023-04-11 17:29:42 +01:00
2024-02-01 12:52:24 +00:00
struct bpf_unwind_consume_entry_data {
bool ( * consume_entry ) ( void * cookie , u64 ip , u64 sp , u64 fp ) ;
void * cookie ;
} ;
static bool
arch_bpf_unwind_consume_entry ( const struct kunwind_state * state , void * cookie )
{
struct bpf_unwind_consume_entry_data * data = cookie ;
return data - > consume_entry ( data - > cookie , state - > common . pc , 0 ,
state - > common . fp ) ;
}
noinline noinstr void arch_bpf_stack_walk ( bool ( * consume_entry ) ( void * cookie , u64 ip , u64 sp ,
u64 fp ) , void * cookie )
{
struct bpf_unwind_consume_entry_data data = {
. consume_entry = consume_entry ,
. cookie = cookie ,
} ;
kunwind_stack_walk ( arch_bpf_unwind_consume_entry , & data , current , NULL ) ;
}
2023-04-11 17:29:42 +01:00
static bool dump_backtrace_entry ( void * arg , unsigned long where )
{
char * loglvl = arg ;
printk ( " %s %pSb \n " , loglvl , ( void * ) where ) ;
return true ;
}
void dump_backtrace ( struct pt_regs * regs , struct task_struct * tsk ,
const char * loglvl )
{
pr_debug ( " %s(regs = %p tsk = %p) \n " , __func__ , regs , tsk ) ;
if ( regs & & user_mode ( regs ) )
return ;
if ( ! tsk )
tsk = current ;
if ( ! try_get_task_stack ( tsk ) )
return ;
printk ( " %sCall trace: \n " , loglvl ) ;
arch_stack_walk ( dump_backtrace_entry , ( void * ) loglvl , tsk , regs ) ;
put_task_stack ( tsk ) ;
}
void show_stack ( struct task_struct * tsk , unsigned long * sp , const char * loglvl )
{
dump_backtrace ( NULL , tsk , loglvl ) ;
barrier ( ) ;
}
2023-12-19 10:22:29 +08:00
/*
* The struct defined for userspace stack frame in AARCH64 mode .
*/
struct frame_tail {
struct frame_tail __user * fp ;
unsigned long lr ;
} __attribute__ ( ( packed ) ) ;
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail .
*/
static struct frame_tail __user *
unwind_user_frame ( struct frame_tail __user * tail , void * cookie ,
stack_trace_consume_fn consume_entry )
{
struct frame_tail buftail ;
unsigned long err ;
unsigned long lr ;
/* Also check accessibility of one struct frame_tail beyond */
if ( ! access_ok ( tail , sizeof ( buftail ) ) )
return NULL ;
pagefault_disable ( ) ;
err = __copy_from_user_inatomic ( & buftail , tail , sizeof ( buftail ) ) ;
pagefault_enable ( ) ;
if ( err )
return NULL ;
lr = ptrauth_strip_user_insn_pac ( buftail . lr ) ;
if ( ! consume_entry ( cookie , lr ) )
return NULL ;
/*
* Frame pointers should strictly progress back up the stack
* ( towards higher addresses ) .
*/
if ( tail > = buftail . fp )
return NULL ;
return buftail . fp ;
}
# ifdef CONFIG_COMPAT
/*
* The registers we ' re interested in are at the end of the variable
* length saved register structure . The fp points at the end of this
* structure so the address of this struct is :
* ( struct compat_frame_tail * ) ( xxx - > fp ) - 1
*
* This code has been adapted from the ARM OProfile support .
*/
struct compat_frame_tail {
compat_uptr_t fp ; /* a (struct compat_frame_tail *) in compat mode */
u32 sp ;
u32 lr ;
} __attribute__ ( ( packed ) ) ;
static struct compat_frame_tail __user *
unwind_compat_user_frame ( struct compat_frame_tail __user * tail , void * cookie ,
stack_trace_consume_fn consume_entry )
{
struct compat_frame_tail buftail ;
unsigned long err ;
/* Also check accessibility of one struct frame_tail beyond */
if ( ! access_ok ( tail , sizeof ( buftail ) ) )
return NULL ;
pagefault_disable ( ) ;
err = __copy_from_user_inatomic ( & buftail , tail , sizeof ( buftail ) ) ;
pagefault_enable ( ) ;
if ( err )
return NULL ;
if ( ! consume_entry ( cookie , buftail . lr ) )
return NULL ;
/*
* Frame pointers should strictly progress back up the stack
* ( towards higher addresses ) .
*/
if ( tail + 1 > = ( struct compat_frame_tail __user * )
compat_ptr ( buftail . fp ) )
return NULL ;
return ( struct compat_frame_tail __user * ) compat_ptr ( buftail . fp ) - 1 ;
}
# endif /* CONFIG_COMPAT */
void arch_stack_walk_user ( stack_trace_consume_fn consume_entry , void * cookie ,
const struct pt_regs * regs )
{
if ( ! consume_entry ( cookie , regs - > pc ) )
return ;
if ( ! compat_user_mode ( regs ) ) {
/* AARCH64 mode */
struct frame_tail __user * tail ;
tail = ( struct frame_tail __user * ) regs - > regs [ 29 ] ;
while ( tail & & ! ( ( unsigned long ) tail & 0x7 ) )
tail = unwind_user_frame ( tail , cookie , consume_entry ) ;
} else {
# ifdef CONFIG_COMPAT
/* AARCH32 compat mode */
struct compat_frame_tail __user * tail ;
tail = ( struct compat_frame_tail __user * ) regs - > compat_fp - 1 ;
while ( tail & & ! ( ( unsigned long ) tail & 0x3 ) )
tail = unwind_compat_user_frame ( tail , cookie , consume_entry ) ;
# endif
}
}