2018-05-02 16:07:29 +03:00
// SPDX-License-Identifier: GPL-2.0
2008-04-17 08:35:00 +04:00
/*
2018-05-02 16:07:29 +03:00
* Stack trace utility functions etc .
2008-04-17 08:35:00 +04:00
*
* Copyright 2008 Christoph Hellwig , IBM Corp .
2018-05-04 15:38:34 +03:00
* Copyright 2018 SUSE Linux GmbH
2018-05-02 16:07:29 +03:00
* Copyright 2018 Nick Piggin , Michael Ellerman , IBM Corp .
2008-04-17 08:35:00 +04:00
*/
2011-07-23 02:24:23 +04:00
# include <linux/export.h>
2018-05-04 15:38:34 +03:00
# include <linux/kallsyms.h>
# include <linux/module.h>
2018-05-02 16:07:28 +03:00
# include <linux/nmi.h>
2008-04-17 08:35:00 +04:00
# include <linux/sched.h>
2017-02-08 20:51:35 +03:00
# include <linux/sched/debug.h>
2018-05-04 15:38:34 +03:00
# include <linux/sched/task_stack.h>
2008-04-17 08:35:00 +04:00
# include <linux/stacktrace.h>
# include <asm/ptrace.h>
2008-07-10 18:08:18 +04:00
# include <asm/processor.h>
2018-05-04 15:38:34 +03:00
# include <linux/ftrace.h>
# include <asm/kprobes.h>
2008-04-17 08:35:00 +04:00
2018-05-02 16:07:28 +03:00
# include <asm/paca.h>
2008-04-17 08:35:00 +04:00
/*
* Save stack - backtrace addresses into a stack_trace buffer .
*/
2008-07-10 18:08:18 +04:00
static void save_context_stack ( struct stack_trace * trace , unsigned long sp ,
struct task_struct * tsk , int savesched )
2008-04-17 08:35:00 +04:00
{
for ( ; ; ) {
unsigned long * stack = ( unsigned long * ) sp ;
unsigned long newsp , ip ;
2008-07-10 18:08:18 +04:00
if ( ! validate_sp ( sp , tsk , STACK_FRAME_OVERHEAD ) )
2008-04-17 08:35:00 +04:00
return ;
newsp = stack [ 0 ] ;
ip = stack [ STACK_FRAME_LR_SAVE ] ;
2008-07-10 18:08:18 +04:00
if ( savesched | | ! in_sched_functions ( ip ) ) {
if ( ! trace - > skip )
trace - > entries [ trace - > nr_entries + + ] = ip ;
else
trace - > skip - - ;
}
2008-04-17 08:35:00 +04:00
if ( trace - > nr_entries > = trace - > max_entries )
return ;
sp = newsp ;
}
}
2008-07-10 18:08:18 +04:00
void save_stack_trace ( struct stack_trace * trace )
{
unsigned long sp ;
2014-10-13 12:41:39 +04:00
sp = current_stack_pointer ( ) ;
2008-07-10 18:08:18 +04:00
save_context_stack ( trace , sp , current , 1 ) ;
}
EXPORT_SYMBOL_GPL ( save_stack_trace ) ;
void save_stack_trace_tsk ( struct task_struct * tsk , struct stack_trace * trace )
{
2017-03-27 22:32:33 +03:00
unsigned long sp ;
2019-01-31 13:08:52 +03:00
if ( ! try_get_task_stack ( tsk ) )
return ;
2017-03-27 22:32:33 +03:00
if ( tsk = = current )
sp = current_stack_pointer ( ) ;
else
sp = tsk - > thread . ksp ;
save_context_stack ( trace , sp , tsk , 0 ) ;
2019-01-31 13:08:52 +03:00
put_task_stack ( tsk ) ;
2008-07-10 18:08:18 +04:00
}
EXPORT_SYMBOL_GPL ( save_stack_trace_tsk ) ;
2015-12-08 21:50:56 +03:00
void
save_stack_trace_regs ( struct pt_regs * regs , struct stack_trace * trace )
{
save_context_stack ( trace , regs - > gpr [ 1 ] , current , 0 ) ;
}
EXPORT_SYMBOL_GPL ( save_stack_trace_regs ) ;
2018-05-04 15:38:34 +03:00
# ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
2019-01-22 18:57:22 +03:00
/*
* This function returns an error if it detects any unreliable features of the
* stack . Otherwise it guarantees that the stack trace is reliable .
*
* If the task is not ' current ' , the caller * must * ensure the task is inactive .
*/
2019-01-31 13:08:52 +03:00
static int __save_stack_trace_tsk_reliable ( struct task_struct * tsk ,
struct stack_trace * trace )
2018-05-04 15:38:34 +03:00
{
unsigned long sp ;
2019-01-22 18:57:23 +03:00
unsigned long newsp ;
2018-05-04 15:38:34 +03:00
unsigned long stack_page = ( unsigned long ) task_stack_page ( tsk ) ;
unsigned long stack_end ;
int graph_idx = 0 ;
2019-01-22 18:57:23 +03:00
bool firstframe ;
2018-05-04 15:38:34 +03:00
stack_end = stack_page + THREAD_SIZE ;
if ( ! is_idle_task ( tsk ) ) {
/*
* For user tasks , this is the SP value loaded on
* kernel entry , see " PACAKSAVE(r13) " in _switch ( ) and
* system_call_common ( ) / EXCEPTION_PROLOG_COMMON ( ) .
*
* Likewise for non - swapper kernel threads ,
* this also happens to be the top of the stack
* as setup by copy_thread ( ) .
*
* Note that stack backlinks are not properly setup by
* copy_thread ( ) and thus , a forked task ( ) will have
* an unreliable stack trace until it ' s been
* _switch ( ) ' ed to for the first time .
*/
stack_end - = STACK_FRAME_OVERHEAD + sizeof ( struct pt_regs ) ;
} else {
/*
* idle tasks have a custom stack layout ,
* c . f . cpu_idle_thread_init ( ) .
*/
stack_end - = STACK_FRAME_OVERHEAD ;
}
2019-01-22 18:57:23 +03:00
if ( tsk = = current )
sp = current_stack_pointer ( ) ;
else
sp = tsk - > thread . ksp ;
2018-05-04 15:38:34 +03:00
if ( sp < stack_page + sizeof ( struct thread_struct ) | |
sp > stack_end - STACK_FRAME_MIN_SIZE ) {
2019-01-22 18:57:24 +03:00
return - EINVAL ;
2018-05-04 15:38:34 +03:00
}
2019-01-22 18:57:23 +03:00
for ( firstframe = true ; sp ! = stack_end ;
firstframe = false , sp = newsp ) {
2018-05-04 15:38:34 +03:00
unsigned long * stack = ( unsigned long * ) sp ;
2019-01-22 18:57:23 +03:00
unsigned long ip ;
2018-05-04 15:38:34 +03:00
/* sanity check: ABI requires SP to be aligned 16 bytes. */
if ( sp & 0xF )
2019-01-22 18:57:24 +03:00
return - EINVAL ;
2018-05-04 15:38:34 +03:00
newsp = stack [ 0 ] ;
/* Stack grows downwards; unwinder may only go up. */
if ( newsp < = sp )
2019-01-22 18:57:24 +03:00
return - EINVAL ;
2018-05-04 15:38:34 +03:00
if ( newsp ! = stack_end & &
newsp > stack_end - STACK_FRAME_MIN_SIZE ) {
2019-01-22 18:57:24 +03:00
return - EINVAL ; /* invalid backlink, too far up. */
2018-05-04 15:38:34 +03:00
}
2019-01-22 18:57:22 +03:00
/*
* We can only trust the bottom frame ' s backlink , the
* rest of the frame may be uninitialized , continue to
* the next .
*/
2019-01-22 18:57:23 +03:00
if ( firstframe )
continue ;
2019-01-22 18:57:22 +03:00
/* Mark stacktraces with exception frames as unreliable. */
if ( sp < = stack_end - STACK_INT_FRAME_SIZE & &
stack [ STACK_FRAME_MARKER ] = = STACK_FRAME_REGS_MARKER ) {
2019-01-22 18:57:24 +03:00
return - EINVAL ;
2019-01-22 18:57:22 +03:00
}
2018-05-04 15:38:34 +03:00
/* Examine the saved LR: it must point into kernel code. */
ip = stack [ STACK_FRAME_LR_SAVE ] ;
2019-01-22 18:57:22 +03:00
if ( ! __kernel_text_address ( ip ) )
2019-01-22 18:57:24 +03:00
return - EINVAL ;
2018-05-04 15:38:34 +03:00
/*
* FIXME : IMHO these tests do not belong in
* arch - dependent code , they are generic .
*/
ip = ftrace_graph_ret_addr ( tsk , & graph_idx , ip , NULL ) ;
2018-05-22 12:08:20 +03:00
# ifdef CONFIG_KPROBES
2018-05-04 15:38:34 +03:00
/*
* Mark stacktraces with kretprobed functions on them
* as unreliable .
*/
if ( ip = = ( unsigned long ) kretprobe_trampoline )
2019-01-22 18:57:24 +03:00
return - EINVAL ;
2018-05-22 12:08:20 +03:00
# endif
2018-05-04 15:38:34 +03:00
2019-01-22 18:57:23 +03:00
if ( trace - > nr_entries > = trace - > max_entries )
return - E2BIG ;
2018-05-04 15:38:34 +03:00
if ( ! trace - > skip )
trace - > entries [ trace - > nr_entries + + ] = ip ;
else
trace - > skip - - ;
}
return 0 ;
}
2019-01-31 13:08:52 +03:00
int save_stack_trace_tsk_reliable ( struct task_struct * tsk ,
struct stack_trace * trace )
{
int ret ;
/*
* If the task doesn ' t have a stack ( e . g . , a zombie ) , the stack is
* " reliably " empty .
*/
if ( ! try_get_task_stack ( tsk ) )
return 0 ;
ret = __save_stack_trace_tsk_reliable ( tsk , trace ) ;
put_task_stack ( tsk ) ;
return ret ;
}
2018-05-04 15:38:34 +03:00
# endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
2018-05-02 16:07:28 +03:00
2018-06-19 14:51:55 +03:00
# if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
2018-05-02 16:07:28 +03:00
static void handle_backtrace_ipi ( struct pt_regs * regs )
{
nmi_cpu_backtrace ( regs ) ;
}
static void raise_backtrace_ipi ( cpumask_t * mask )
{
unsigned int cpu ;
for_each_cpu ( cpu , mask ) {
if ( cpu = = smp_processor_id ( ) )
handle_backtrace_ipi ( NULL ) ;
else
smp_send_safe_nmi_ipi ( cpu , handle_backtrace_ipi , 5 * USEC_PER_SEC ) ;
}
for_each_cpu ( cpu , mask ) {
struct paca_struct * p = paca_ptrs [ cpu ] ;
cpumask_clear_cpu ( cpu , mask ) ;
pr_warn ( " CPU %d didn't respond to backtrace IPI, inspecting paca. \n " , cpu ) ;
if ( ! virt_addr_valid ( p ) ) {
pr_warn ( " paca pointer appears corrupt? (%px) \n " , p ) ;
continue ;
}
pr_warn ( " irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d " ,
p - > irq_soft_mask , p - > in_mce , p - > in_nmi ) ;
if ( virt_addr_valid ( p - > __current ) )
pr_cont ( " current: %d (%s) \n " , p - > __current - > pid ,
p - > __current - > comm ) ;
else
pr_cont ( " current pointer corrupt? (%px) \n " , p - > __current ) ;
pr_warn ( " Back trace of paca->saved_r1 (0x%016llx) (possibly stale): \n " , p - > saved_r1 ) ;
show_stack ( p - > __current , ( unsigned long * ) p - > saved_r1 ) ;
}
}
void arch_trigger_cpumask_backtrace ( const cpumask_t * mask , bool exclude_self )
{
nmi_trigger_cpumask_backtrace ( mask , exclude_self , raise_backtrace_ipi ) ;
}
2018-06-19 14:51:55 +03:00
# endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */