2018-05-02 23:07:29 +10:00
// SPDX-License-Identifier: GPL-2.0
2008-04-17 14:35:00 +10:00
/*
2018-05-02 23:07:29 +10:00
* Stack trace utility functions etc .
2008-04-17 14:35:00 +10:00
*
* Copyright 2008 Christoph Hellwig , IBM Corp .
2018-05-04 14:38:34 +02:00
* Copyright 2018 SUSE Linux GmbH
2018-05-02 23:07:29 +10:00
* Copyright 2018 Nick Piggin , Michael Ellerman , IBM Corp .
2008-04-17 14:35:00 +10:00
*/
2011-07-22 18:24:23 -04:00
# include <linux/export.h>
2018-05-04 14:38:34 +02:00
# include <linux/kallsyms.h>
# include <linux/module.h>
2018-05-02 23:07:28 +10:00
# include <linux/nmi.h>
2008-04-17 14:35:00 +10:00
# include <linux/sched.h>
2017-02-08 18:51:35 +01:00
# include <linux/sched/debug.h>
2018-05-04 14:38:34 +02:00
# include <linux/sched/task_stack.h>
2008-04-17 14:35:00 +10:00
# include <linux/stacktrace.h>
# include <asm/ptrace.h>
2008-07-11 00:08:18 +10:00
# include <asm/processor.h>
2018-05-04 14:38:34 +02:00
# include <linux/ftrace.h>
# include <asm/kprobes.h>
2008-04-17 14:35:00 +10:00
2018-05-02 23:07:28 +10:00
# include <asm/paca.h>
2021-03-16 07:57:15 +00:00
void arch_stack_walk ( stack_trace_consume_fn consume_entry , void * cookie ,
struct task_struct * task , struct pt_regs * regs )
2008-04-17 14:35:00 +10:00
{
2021-03-16 07:57:15 +00:00
unsigned long sp ;
2021-03-16 07:57:16 +00:00
if ( regs & & ! consume_entry ( cookie , regs - > nip ) )
return ;
2021-03-16 07:57:15 +00:00
if ( regs )
sp = regs - > gpr [ 1 ] ;
else if ( task = = current )
sp = current_stack_frame ( ) ;
else
sp = task - > thread . ksp ;
2008-04-17 14:35:00 +10:00
for ( ; ; ) {
unsigned long * stack = ( unsigned long * ) sp ;
unsigned long newsp , ip ;
2021-03-16 07:57:14 +00:00
if ( ! validate_sp ( sp , task , STACK_FRAME_OVERHEAD ) )
2008-04-17 14:35:00 +10:00
return ;
newsp = stack [ 0 ] ;
ip = stack [ STACK_FRAME_LR_SAVE ] ;
2021-03-16 07:57:15 +00:00
if ( ! consume_entry ( cookie , ip ) )
2008-04-17 14:35:00 +10:00
return ;
sp = newsp ;
}
}
2008-07-11 00:08:18 +10:00
2019-01-22 10:57:22 -05:00
/*
* This function returns an error if it detects any unreliable features of the
* stack . Otherwise it guarantees that the stack trace is reliable .
*
* If the task is not ' current ' , the caller * must * ensure the task is inactive .
*/
2021-03-16 07:57:15 +00:00
int arch_stack_walk_reliable ( stack_trace_consume_fn consume_entry ,
void * cookie , struct task_struct * task )
2018-05-04 14:38:34 +02:00
{
unsigned long sp ;
2019-01-22 10:57:23 -05:00
unsigned long newsp ;
2021-03-16 07:57:14 +00:00
unsigned long stack_page = ( unsigned long ) task_stack_page ( task ) ;
2018-05-04 14:38:34 +02:00
unsigned long stack_end ;
int graph_idx = 0 ;
2019-01-22 10:57:23 -05:00
bool firstframe ;
2018-05-04 14:38:34 +02:00
stack_end = stack_page + THREAD_SIZE ;
2021-03-16 07:57:14 +00:00
if ( ! is_idle_task ( task ) ) {
2018-05-04 14:38:34 +02:00
/*
* For user tasks , this is the SP value loaded on
* kernel entry , see " PACAKSAVE(r13) " in _switch ( ) and
* system_call_common ( ) / EXCEPTION_PROLOG_COMMON ( ) .
*
* Likewise for non - swapper kernel threads ,
* this also happens to be the top of the stack
* as setup by copy_thread ( ) .
*
* Note that stack backlinks are not properly setup by
* copy_thread ( ) and thus , a forked task ( ) will have
* an unreliable stack trace until it ' s been
* _switch ( ) ' ed to for the first time .
*/
stack_end - = STACK_FRAME_OVERHEAD + sizeof ( struct pt_regs ) ;
} else {
/*
* idle tasks have a custom stack layout ,
* c . f . cpu_idle_thread_init ( ) .
*/
stack_end - = STACK_FRAME_OVERHEAD ;
}
2021-03-16 07:57:14 +00:00
if ( task = = current )
2020-02-20 22:51:37 +11:00
sp = current_stack_frame ( ) ;
2019-01-22 10:57:23 -05:00
else
2021-03-16 07:57:14 +00:00
sp = task - > thread . ksp ;
2019-01-22 10:57:23 -05:00
2018-05-04 14:38:34 +02:00
if ( sp < stack_page + sizeof ( struct thread_struct ) | |
sp > stack_end - STACK_FRAME_MIN_SIZE ) {
2019-01-22 10:57:24 -05:00
return - EINVAL ;
2018-05-04 14:38:34 +02:00
}
2019-01-22 10:57:23 -05:00
for ( firstframe = true ; sp ! = stack_end ;
firstframe = false , sp = newsp ) {
2018-05-04 14:38:34 +02:00
unsigned long * stack = ( unsigned long * ) sp ;
2019-01-22 10:57:23 -05:00
unsigned long ip ;
2018-05-04 14:38:34 +02:00
/* sanity check: ABI requires SP to be aligned 16 bytes. */
if ( sp & 0xF )
2019-01-22 10:57:24 -05:00
return - EINVAL ;
2018-05-04 14:38:34 +02:00
newsp = stack [ 0 ] ;
/* Stack grows downwards; unwinder may only go up. */
if ( newsp < = sp )
2019-01-22 10:57:24 -05:00
return - EINVAL ;
2018-05-04 14:38:34 +02:00
if ( newsp ! = stack_end & &
newsp > stack_end - STACK_FRAME_MIN_SIZE ) {
2019-01-22 10:57:24 -05:00
return - EINVAL ; /* invalid backlink, too far up. */
2018-05-04 14:38:34 +02:00
}
2019-01-22 10:57:22 -05:00
/*
* We can only trust the bottom frame ' s backlink , the
* rest of the frame may be uninitialized , continue to
* the next .
*/
2019-01-22 10:57:23 -05:00
if ( firstframe )
continue ;
2019-01-22 10:57:22 -05:00
/* Mark stacktraces with exception frames as unreliable. */
if ( sp < = stack_end - STACK_INT_FRAME_SIZE & &
stack [ STACK_FRAME_MARKER ] = = STACK_FRAME_REGS_MARKER ) {
2019-01-22 10:57:24 -05:00
return - EINVAL ;
2019-01-22 10:57:22 -05:00
}
2018-05-04 14:38:34 +02:00
/* Examine the saved LR: it must point into kernel code. */
ip = stack [ STACK_FRAME_LR_SAVE ] ;
2019-01-22 10:57:22 -05:00
if ( ! __kernel_text_address ( ip ) )
2019-01-22 10:57:24 -05:00
return - EINVAL ;
2018-05-04 14:38:34 +02:00
/*
* FIXME : IMHO these tests do not belong in
* arch - dependent code , they are generic .
*/
2021-03-16 07:57:14 +00:00
ip = ftrace_graph_ret_addr ( task , & graph_idx , ip , stack ) ;
2018-05-22 14:38:20 +05:30
# ifdef CONFIG_KPROBES
2018-05-04 14:38:34 +02:00
/*
* Mark stacktraces with kretprobed functions on them
* as unreliable .
*/
if ( ip = = ( unsigned long ) kretprobe_trampoline )
2019-01-22 10:57:24 -05:00
return - EINVAL ;
2018-05-22 14:38:20 +05:30
# endif
2018-05-04 14:38:34 +02:00
2021-03-16 07:57:15 +00:00
if ( ! consume_entry ( cookie , ip ) )
return - EINVAL ;
2018-05-04 14:38:34 +02:00
}
return 0 ;
}
2019-01-31 10:08:52 +00:00
2018-06-19 21:51:55 +10:00
# if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
2018-05-02 23:07:28 +10:00
static void handle_backtrace_ipi ( struct pt_regs * regs )
{
nmi_cpu_backtrace ( regs ) ;
}
static void raise_backtrace_ipi ( cpumask_t * mask )
{
unsigned int cpu ;
for_each_cpu ( cpu , mask ) {
if ( cpu = = smp_processor_id ( ) )
handle_backtrace_ipi ( NULL ) ;
else
smp_send_safe_nmi_ipi ( cpu , handle_backtrace_ipi , 5 * USEC_PER_SEC ) ;
}
for_each_cpu ( cpu , mask ) {
struct paca_struct * p = paca_ptrs [ cpu ] ;
cpumask_clear_cpu ( cpu , mask ) ;
pr_warn ( " CPU %d didn't respond to backtrace IPI, inspecting paca. \n " , cpu ) ;
if ( ! virt_addr_valid ( p ) ) {
pr_warn ( " paca pointer appears corrupt? (%px) \n " , p ) ;
continue ;
}
pr_warn ( " irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d " ,
p - > irq_soft_mask , p - > in_mce , p - > in_nmi ) ;
if ( virt_addr_valid ( p - > __current ) )
pr_cont ( " current: %d (%s) \n " , p - > __current - > pid ,
p - > __current - > comm ) ;
else
pr_cont ( " current pointer corrupt? (%px) \n " , p - > __current ) ;
pr_warn ( " Back trace of paca->saved_r1 (0x%016llx) (possibly stale): \n " , p - > saved_r1 ) ;
2020-06-08 21:32:29 -07:00
show_stack ( p - > __current , ( unsigned long * ) p - > saved_r1 , KERN_WARNING ) ;
2018-05-02 23:07:28 +10:00
}
}
void arch_trigger_cpumask_backtrace ( const cpumask_t * mask , bool exclude_self )
{
nmi_trigger_cpumask_backtrace ( mask , exclude_self , raise_backtrace_ipi ) ;
}
2018-06-19 21:51:55 +10:00
# endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */