2010-02-05 21:47:04 -05:00
/*
* HW NMI watchdog support
*
* started by Don Zickus , Copyright ( C ) 2010 Red Hat , Inc .
*
* Arch specific calls to support NMI watchdog
*
* Bits copied from original nmi . c file
*
*/
2010-05-13 09:12:39 +02:00
# include <asm/apic.h>
x86: Fix trigger_all_cpu_backtrace() implementation
The following change fixes the x86 implementation of
trigger_all_cpu_backtrace(), which was previously (accidentally,
as far as I can tell) disabled to always return false as on
architectures that do not implement this function.
trigger_all_cpu_backtrace(), as defined in include/linux/nmi.h,
should call arch_trigger_all_cpu_backtrace() if available, or
return false if the underlying arch doesn't implement this
function.
x86 did provide a suitable arch_trigger_all_cpu_backtrace()
implementation, but it wasn't actually being used because it was
declared in asm/nmi.h, which linux/nmi.h doesn't include. Also,
linux/nmi.h couldn't easily be fixed by including asm/nmi.h,
because that file is not available on all architectures.
I am proposing to fix this by moving the x86 definition of
arch_trigger_all_cpu_backtrace() to asm/irq.h.
Tested via: echo l > /proc/sysrq-trigger
Before the change, this uses a fallback implementation which
shows backtraces on active CPUs (using
smp_call_function_interrupt() )
After the change, this shows NMI backtraces on all CPUs
Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1370518875-1346-1-git-send-email-walken@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-06-06 04:41:15 -07:00
# include <asm/nmi.h>
2010-02-05 21:47:04 -05:00
# include <linux/cpumask.h>
2010-05-07 17:11:48 -04:00
# include <linux/kdebug.h>
# include <linux/notifier.h>
# include <linux/kprobes.h>
2010-02-05 21:47:04 -05:00
# include <linux/nmi.h>
# include <linux/module.h>
2011-03-25 15:20:14 +01:00
# include <linux/delay.h>
2014-06-19 17:33:32 -04:00
# include <linux/seq_buf.h>
2010-02-05 21:47:04 -05:00
2010-11-12 11:22:23 -05:00
# ifdef CONFIG_HARDLOCKUP_DETECTOR
2011-05-22 22:10:23 -07:00
u64 hw_nmi_get_sample_period ( int watchdog_thresh )
2010-02-12 17:19:19 -05:00
{
2011-05-22 22:10:23 -07:00
return ( u64 ) ( cpu_khz ) * 1000 * watchdog_thresh ;
2010-02-12 17:19:19 -05:00
}
2010-11-12 11:22:23 -05:00
# endif
2010-02-12 17:19:19 -05:00
2010-12-09 14:47:34 +06:00
# ifdef arch_trigger_all_cpu_backtrace
2010-11-12 09:50:54 -05:00
/* For reliability, we're prepared to waste bits here. */
static DECLARE_BITMAP ( backtrace_mask , NR_CPUS ) __read_mostly ;
x86/nmi: Fix use of unallocated cpumask_var_t
Commit "x86/nmi: Perform a safe NMI stack trace on all CPUs" has introduced
a cpumask_var_t variable:
+static cpumask_var_t printtrace_mask;
But never allocated it before using it, which caused a NULL ptr deref when
trying to print the stack trace:
[ 1110.296154] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 1110.296169] IP: __memcpy (arch/x86/lib/memcpy_64.S:151)
[ 1110.296178] PGD 4c34b3067 PUD 4c351b067 PMD 0
[ 1110.296186] Oops: 0002 [#1] PREEMPT SMP KASAN
[ 1110.296234] Dumping ftrace buffer:
[ 1110.296330] (ftrace buffer empty)
[ 1110.296339] Modules linked in:
[ 1110.296345] CPU: 1 PID: 10538 Comm: trinity-c99 Not tainted 3.18.0-rc5-next-20141124-sasha-00058-ge2a8c09-dirty #1499
[ 1110.296348] task: ffff880152650000 ti: ffff8804c3560000 task.ti: ffff8804c3560000
[ 1110.296357] RIP: __memcpy (arch/x86/lib/memcpy_64.S:151)
[ 1110.296360] RSP: 0000:ffff8804c3563870 EFLAGS: 00010246
[ 1110.296363] RAX: 0000000000000000 RBX: ffffe8fff3c4a809 RCX: 0000000000000000
[ 1110.296366] RDX: 0000000000000008 RSI: ffffffff9e254040 RDI: 0000000000000000
[ 1110.296369] RBP: ffff8804c3563908 R08: 0000000000ffffff R09: 0000000000ffffff
[ 1110.296371] R10: 0000000000000000 R11: 0000000000000006 R12: 0000000000000000
[ 1110.296375] R13: 0000000000000000 R14: ffffffff9e254040 R15: ffffe8fff3c4a809
[ 1110.296379] FS: 00007f9e43b0b700(0000) GS:ffff880107e00000(0000) knlGS:0000000000000000
[ 1110.296382] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 1110.296385] CR2: 0000000000000000 CR3: 00000004e4334000 CR4: 00000000000006a0
[ 1110.296400] Stack:
[ 1110.296406] ffffffff81b1e46c 0000000000000000 ffff880107e03fb8 000000000000000b
[ 1110.296413] ffff880107dfffc0 ffff880107e03fc0 0000000000000008 ffffffff93f2e9c8
[ 1110.296419] 0000000000000000 ffffda0020fc07f7 0000000000000008 ffff8804c3563901
[ 1110.296420] Call Trace:
[ 1110.296429] ? memcpy (mm/kasan/kasan.c:275)
[ 1110.296437] ? arch_trigger_all_cpu_backtrace (include/linux/bitmap.h:215 include/linux/cpumask.h:506 arch/x86/kernel/apic/hw_nmi.c:76)
[ 1110.296444] arch_trigger_all_cpu_backtrace (include/linux/bitmap.h:215 include/linux/cpumask.h:506 arch/x86/kernel/apic/hw_nmi.c:76)
[ 1110.296451] ? dump_stack (./arch/x86/include/asm/preempt.h:95 lib/dump_stack.c:55)
[ 1110.296458] do_raw_spin_lock (./arch/x86/include/asm/spinlock.h:86 kernel/locking/spinlock_debug.c:130 kernel/locking/spinlock_debug.c:137)
[ 1110.296468] _raw_spin_lock (include/linux/spinlock_api_smp.h:143 kernel/locking/spinlock.c:151)
[ 1110.296474] ? __page_check_address (include/linux/spinlock.h:309 mm/rmap.c:630)
[ 1110.296481] __page_check_address (include/linux/spinlock.h:309 mm/rmap.c:630)
[ 1110.296487] ? preempt_count_sub (kernel/sched/core.c:2615)
[ 1110.296493] try_to_unmap_one (include/linux/rmap.h:202 mm/rmap.c:1146)
[ 1110.296504] ? anon_vma_interval_tree_iter_next (mm/interval_tree.c:72 mm/interval_tree.c:103)
[ 1110.296514] rmap_walk (mm/rmap.c:1653 mm/rmap.c:1725)
[ 1110.296521] ? page_get_anon_vma (include/linux/rcupdate.h:423 include/linux/rcupdate.h:935 mm/rmap.c:435)
[ 1110.296530] try_to_unmap (mm/rmap.c:1545)
[ 1110.296536] ? page_get_anon_vma (mm/rmap.c:437)
[ 1110.296545] ? try_to_unmap_nonlinear (mm/rmap.c:1138)
[ 1110.296551] ? SyS_msync (mm/rmap.c:1501)
[ 1110.296558] ? page_remove_rmap (mm/rmap.c:1409)
[ 1110.296565] ? page_get_anon_vma (mm/rmap.c:448)
[ 1110.296571] ? anon_vma_ctor (mm/rmap.c:1496)
[ 1110.296579] migrate_pages (mm/migrate.c:913 mm/migrate.c:956 mm/migrate.c:1136)
[ 1110.296586] ? _raw_spin_unlock_irq (./arch/x86/include/asm/preempt.h:95 include/linux/spinlock_api_smp.h:169 kernel/locking/spinlock.c:199)
[ 1110.296593] ? buffer_migrate_lock_buffers (mm/migrate.c:1584)
[ 1110.296601] ? handle_mm_fault (mm/memory.c:3163 mm/memory.c:3223 mm/memory.c:3336 mm/memory.c:3365)
[ 1110.296607] migrate_misplaced_page (mm/migrate.c:1738)
[ 1110.296614] handle_mm_fault (mm/memory.c:3170 mm/memory.c:3223 mm/memory.c:3336 mm/memory.c:3365)
[ 1110.296623] __do_page_fault (arch/x86/mm/fault.c:1246)
[ 1110.296630] ? vtime_account_user (kernel/sched/cputime.c:701)
[ 1110.296638] ? get_parent_ip (kernel/sched/core.c:2559)
[ 1110.296646] ? context_tracking_user_exit (kernel/context_tracking.c:144)
[ 1110.296656] trace_do_page_fault (arch/x86/mm/fault.c:1329 include/linux/jump_label.h:114 include/linux/context_tracking_state.h:27 include/linux/context_tracking.h:45 arch/x86/mm/fault.c:1330)
[ 1110.296664] do_async_page_fault (arch/x86/kernel/kvm.c:280)
[ 1110.296670] async_page_fault (arch/x86/kernel/entry_64.S:1285)
[ 1110.296755] Code: 08 4c 8b 54 16 f0 4c 8b 5c 16 f8 4c 89 07 4c 89 4f 08 4c 89 54 17 f0 4c 89 5c 17 f8 c3 90 83 fa 08 72 1b 4c 8b 06 4c 8b 4c 16 f8 <4c> 89 07 4c 89 4c 17 f8 c3 66 2e 0f 1f 84 00 00 00 00 00 83 fa
All code
========
0: 08 4c 8b 54 or %cl,0x54(%rbx,%rcx,4)
4: 16 (bad)
5: f0 4c 8b 5c 16 f8 lock mov -0x8(%rsi,%rdx,1),%r11
b: 4c 89 07 mov %r8,(%rdi)
e: 4c 89 4f 08 mov %r9,0x8(%rdi)
12: 4c 89 54 17 f0 mov %r10,-0x10(%rdi,%rdx,1)
17: 4c 89 5c 17 f8 mov %r11,-0x8(%rdi,%rdx,1)
1c: c3 retq
1d: 90 nop
1e: 83 fa 08 cmp $0x8,%edx
21: 72 1b jb 0x3e
23: 4c 8b 06 mov (%rsi),%r8
26: 4c 8b 4c 16 f8 mov -0x8(%rsi,%rdx,1),%r9
2b:* 4c 89 07 mov %r8,(%rdi) <-- trapping instruction
2e: 4c 89 4c 17 f8 mov %r9,-0x8(%rdi,%rdx,1)
33: c3 retq
34: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
3b: 00 00 00
3e: 83 fa 00 cmp $0x0,%edx
Code starting with the faulting instruction
===========================================
0: 4c 89 07 mov %r8,(%rdi)
3: 4c 89 4c 17 f8 mov %r9,-0x8(%rdi,%rdx,1)
8: c3 retq
9: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
10: 00 00 00
13: 83 fa 00 cmp $0x0,%edx
[ 1110.296760] RIP __memcpy (arch/x86/lib/memcpy_64.S:151)
[ 1110.296763] RSP <ffff8804c3563870>
[ 1110.296765] CR2: 0000000000000000
Link: http://lkml.kernel.org/r/1416931560-10603-1-git-send-email-sasha.levin@oracle.com
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-11-25 11:06:00 -05:00
static cpumask_t printtrace_mask ;
2014-06-19 17:33:32 -04:00
# define NMI_BUF_SIZE 4096
struct nmi_seq_buf {
unsigned char buffer [ NMI_BUF_SIZE ] ;
struct seq_buf seq ;
} ;
/* Safe printing in NMI context */
static DEFINE_PER_CPU ( struct nmi_seq_buf , nmi_print_seq ) ;
2010-11-12 09:50:54 -05:00
2011-01-04 22:38:08 -05:00
/* "in progress" flag of arch_trigger_all_cpu_backtrace */
static unsigned long backtrace_flag ;
2014-06-19 17:33:32 -04:00
static void print_seq_line ( struct nmi_seq_buf * s , int start , int end )
{
const char * buf = s - > buffer + start ;
printk ( " %.*s " , ( end - start ) + 1 , buf ) ;
}
2014-06-23 13:22:05 -07:00
void arch_trigger_all_cpu_backtrace ( bool include_self )
2010-02-05 21:47:04 -05:00
{
2014-06-19 17:33:32 -04:00
struct nmi_seq_buf * s ;
int len ;
int cpu ;
2010-02-05 21:47:04 -05:00
int i ;
2014-06-19 17:33:32 -04:00
int this_cpu = get_cpu ( ) ;
2010-02-05 21:47:04 -05:00
2014-06-23 13:22:05 -07:00
if ( test_and_set_bit ( 0 , & backtrace_flag ) ) {
2011-01-04 22:38:08 -05:00
/*
* If there is already a trigger_all_cpu_backtrace ( ) in progress
* ( backtrace_flag = = 1 ) , don ' t output double cpu dump infos .
*/
2014-06-23 13:22:05 -07:00
put_cpu ( ) ;
2011-01-04 22:38:08 -05:00
return ;
2014-06-23 13:22:05 -07:00
}
2011-01-04 22:38:08 -05:00
2010-02-05 21:47:04 -05:00
cpumask_copy ( to_cpumask ( backtrace_mask ) , cpu_online_mask ) ;
2014-06-23 13:22:05 -07:00
if ( ! include_self )
2014-06-19 17:33:32 -04:00
cpumask_clear_cpu ( this_cpu , to_cpumask ( backtrace_mask ) ) ;
x86/nmi: Fix use of unallocated cpumask_var_t
Commit "x86/nmi: Perform a safe NMI stack trace on all CPUs" has introduced
a cpumask_var_t variable:
+static cpumask_var_t printtrace_mask;
But never allocated it before using it, which caused a NULL ptr deref when
trying to print the stack trace:
[ 1110.296154] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 1110.296169] IP: __memcpy (arch/x86/lib/memcpy_64.S:151)
[ 1110.296178] PGD 4c34b3067 PUD 4c351b067 PMD 0
[ 1110.296186] Oops: 0002 [#1] PREEMPT SMP KASAN
[ 1110.296234] Dumping ftrace buffer:
[ 1110.296330] (ftrace buffer empty)
[ 1110.296339] Modules linked in:
[ 1110.296345] CPU: 1 PID: 10538 Comm: trinity-c99 Not tainted 3.18.0-rc5-next-20141124-sasha-00058-ge2a8c09-dirty #1499
[ 1110.296348] task: ffff880152650000 ti: ffff8804c3560000 task.ti: ffff8804c3560000
[ 1110.296357] RIP: __memcpy (arch/x86/lib/memcpy_64.S:151)
[ 1110.296360] RSP: 0000:ffff8804c3563870 EFLAGS: 00010246
[ 1110.296363] RAX: 0000000000000000 RBX: ffffe8fff3c4a809 RCX: 0000000000000000
[ 1110.296366] RDX: 0000000000000008 RSI: ffffffff9e254040 RDI: 0000000000000000
[ 1110.296369] RBP: ffff8804c3563908 R08: 0000000000ffffff R09: 0000000000ffffff
[ 1110.296371] R10: 0000000000000000 R11: 0000000000000006 R12: 0000000000000000
[ 1110.296375] R13: 0000000000000000 R14: ffffffff9e254040 R15: ffffe8fff3c4a809
[ 1110.296379] FS: 00007f9e43b0b700(0000) GS:ffff880107e00000(0000) knlGS:0000000000000000
[ 1110.296382] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 1110.296385] CR2: 0000000000000000 CR3: 00000004e4334000 CR4: 00000000000006a0
[ 1110.296400] Stack:
[ 1110.296406] ffffffff81b1e46c 0000000000000000 ffff880107e03fb8 000000000000000b
[ 1110.296413] ffff880107dfffc0 ffff880107e03fc0 0000000000000008 ffffffff93f2e9c8
[ 1110.296419] 0000000000000000 ffffda0020fc07f7 0000000000000008 ffff8804c3563901
[ 1110.296420] Call Trace:
[ 1110.296429] ? memcpy (mm/kasan/kasan.c:275)
[ 1110.296437] ? arch_trigger_all_cpu_backtrace (include/linux/bitmap.h:215 include/linux/cpumask.h:506 arch/x86/kernel/apic/hw_nmi.c:76)
[ 1110.296444] arch_trigger_all_cpu_backtrace (include/linux/bitmap.h:215 include/linux/cpumask.h:506 arch/x86/kernel/apic/hw_nmi.c:76)
[ 1110.296451] ? dump_stack (./arch/x86/include/asm/preempt.h:95 lib/dump_stack.c:55)
[ 1110.296458] do_raw_spin_lock (./arch/x86/include/asm/spinlock.h:86 kernel/locking/spinlock_debug.c:130 kernel/locking/spinlock_debug.c:137)
[ 1110.296468] _raw_spin_lock (include/linux/spinlock_api_smp.h:143 kernel/locking/spinlock.c:151)
[ 1110.296474] ? __page_check_address (include/linux/spinlock.h:309 mm/rmap.c:630)
[ 1110.296481] __page_check_address (include/linux/spinlock.h:309 mm/rmap.c:630)
[ 1110.296487] ? preempt_count_sub (kernel/sched/core.c:2615)
[ 1110.296493] try_to_unmap_one (include/linux/rmap.h:202 mm/rmap.c:1146)
[ 1110.296504] ? anon_vma_interval_tree_iter_next (mm/interval_tree.c:72 mm/interval_tree.c:103)
[ 1110.296514] rmap_walk (mm/rmap.c:1653 mm/rmap.c:1725)
[ 1110.296521] ? page_get_anon_vma (include/linux/rcupdate.h:423 include/linux/rcupdate.h:935 mm/rmap.c:435)
[ 1110.296530] try_to_unmap (mm/rmap.c:1545)
[ 1110.296536] ? page_get_anon_vma (mm/rmap.c:437)
[ 1110.296545] ? try_to_unmap_nonlinear (mm/rmap.c:1138)
[ 1110.296551] ? SyS_msync (mm/rmap.c:1501)
[ 1110.296558] ? page_remove_rmap (mm/rmap.c:1409)
[ 1110.296565] ? page_get_anon_vma (mm/rmap.c:448)
[ 1110.296571] ? anon_vma_ctor (mm/rmap.c:1496)
[ 1110.296579] migrate_pages (mm/migrate.c:913 mm/migrate.c:956 mm/migrate.c:1136)
[ 1110.296586] ? _raw_spin_unlock_irq (./arch/x86/include/asm/preempt.h:95 include/linux/spinlock_api_smp.h:169 kernel/locking/spinlock.c:199)
[ 1110.296593] ? buffer_migrate_lock_buffers (mm/migrate.c:1584)
[ 1110.296601] ? handle_mm_fault (mm/memory.c:3163 mm/memory.c:3223 mm/memory.c:3336 mm/memory.c:3365)
[ 1110.296607] migrate_misplaced_page (mm/migrate.c:1738)
[ 1110.296614] handle_mm_fault (mm/memory.c:3170 mm/memory.c:3223 mm/memory.c:3336 mm/memory.c:3365)
[ 1110.296623] __do_page_fault (arch/x86/mm/fault.c:1246)
[ 1110.296630] ? vtime_account_user (kernel/sched/cputime.c:701)
[ 1110.296638] ? get_parent_ip (kernel/sched/core.c:2559)
[ 1110.296646] ? context_tracking_user_exit (kernel/context_tracking.c:144)
[ 1110.296656] trace_do_page_fault (arch/x86/mm/fault.c:1329 include/linux/jump_label.h:114 include/linux/context_tracking_state.h:27 include/linux/context_tracking.h:45 arch/x86/mm/fault.c:1330)
[ 1110.296664] do_async_page_fault (arch/x86/kernel/kvm.c:280)
[ 1110.296670] async_page_fault (arch/x86/kernel/entry_64.S:1285)
[ 1110.296755] Code: 08 4c 8b 54 16 f0 4c 8b 5c 16 f8 4c 89 07 4c 89 4f 08 4c 89 54 17 f0 4c 89 5c 17 f8 c3 90 83 fa 08 72 1b 4c 8b 06 4c 8b 4c 16 f8 <4c> 89 07 4c 89 4c 17 f8 c3 66 2e 0f 1f 84 00 00 00 00 00 83 fa
All code
========
0: 08 4c 8b 54 or %cl,0x54(%rbx,%rcx,4)
4: 16 (bad)
5: f0 4c 8b 5c 16 f8 lock mov -0x8(%rsi,%rdx,1),%r11
b: 4c 89 07 mov %r8,(%rdi)
e: 4c 89 4f 08 mov %r9,0x8(%rdi)
12: 4c 89 54 17 f0 mov %r10,-0x10(%rdi,%rdx,1)
17: 4c 89 5c 17 f8 mov %r11,-0x8(%rdi,%rdx,1)
1c: c3 retq
1d: 90 nop
1e: 83 fa 08 cmp $0x8,%edx
21: 72 1b jb 0x3e
23: 4c 8b 06 mov (%rsi),%r8
26: 4c 8b 4c 16 f8 mov -0x8(%rsi,%rdx,1),%r9
2b:* 4c 89 07 mov %r8,(%rdi) <-- trapping instruction
2e: 4c 89 4c 17 f8 mov %r9,-0x8(%rdi,%rdx,1)
33: c3 retq
34: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
3b: 00 00 00
3e: 83 fa 00 cmp $0x0,%edx
Code starting with the faulting instruction
===========================================
0: 4c 89 07 mov %r8,(%rdi)
3: 4c 89 4c 17 f8 mov %r9,-0x8(%rdi,%rdx,1)
8: c3 retq
9: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
10: 00 00 00
13: 83 fa 00 cmp $0x0,%edx
[ 1110.296760] RIP __memcpy (arch/x86/lib/memcpy_64.S:151)
[ 1110.296763] RSP <ffff8804c3563870>
[ 1110.296765] CR2: 0000000000000000
Link: http://lkml.kernel.org/r/1416931560-10603-1-git-send-email-sasha.levin@oracle.com
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-11-25 11:06:00 -05:00
cpumask_copy ( & printtrace_mask , to_cpumask ( backtrace_mask ) ) ;
2014-06-19 17:33:32 -04:00
/*
* Set up per_cpu seq_buf buffers that the NMIs running on the other
* CPUs will write to .
*/
for_each_cpu ( cpu , to_cpumask ( backtrace_mask ) ) {
s = & per_cpu ( nmi_print_seq , cpu ) ;
seq_buf_init ( & s - > seq , s - > buffer , NMI_BUF_SIZE ) ;
}
2010-02-05 21:47:04 -05:00
2014-06-23 13:22:05 -07:00
if ( ! cpumask_empty ( to_cpumask ( backtrace_mask ) ) ) {
pr_info ( " sending NMI to %s CPUs: \n " ,
( include_self ? " all " : " other " ) ) ;
apic - > send_IPI_mask ( to_cpumask ( backtrace_mask ) , NMI_VECTOR ) ;
}
2010-02-05 21:47:04 -05:00
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
for ( i = 0 ; i < 10 * 1000 ; i + + ) {
if ( cpumask_empty ( to_cpumask ( backtrace_mask ) ) )
break ;
mdelay ( 1 ) ;
2014-06-23 13:22:05 -07:00
touch_softlockup_watchdog ( ) ;
2010-02-05 21:47:04 -05:00
}
2011-01-04 22:38:08 -05:00
2014-06-19 17:33:32 -04:00
/*
* Now that all the NMIs have triggered , we can dump out their
* back traces safely to the console .
*/
x86/nmi: Fix use of unallocated cpumask_var_t
Commit "x86/nmi: Perform a safe NMI stack trace on all CPUs" has introduced
a cpumask_var_t variable:
+static cpumask_var_t printtrace_mask;
But never allocated it before using it, which caused a NULL ptr deref when
trying to print the stack trace:
[ 1110.296154] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 1110.296169] IP: __memcpy (arch/x86/lib/memcpy_64.S:151)
[ 1110.296178] PGD 4c34b3067 PUD 4c351b067 PMD 0
[ 1110.296186] Oops: 0002 [#1] PREEMPT SMP KASAN
[ 1110.296234] Dumping ftrace buffer:
[ 1110.296330] (ftrace buffer empty)
[ 1110.296339] Modules linked in:
[ 1110.296345] CPU: 1 PID: 10538 Comm: trinity-c99 Not tainted 3.18.0-rc5-next-20141124-sasha-00058-ge2a8c09-dirty #1499
[ 1110.296348] task: ffff880152650000 ti: ffff8804c3560000 task.ti: ffff8804c3560000
[ 1110.296357] RIP: __memcpy (arch/x86/lib/memcpy_64.S:151)
[ 1110.296360] RSP: 0000:ffff8804c3563870 EFLAGS: 00010246
[ 1110.296363] RAX: 0000000000000000 RBX: ffffe8fff3c4a809 RCX: 0000000000000000
[ 1110.296366] RDX: 0000000000000008 RSI: ffffffff9e254040 RDI: 0000000000000000
[ 1110.296369] RBP: ffff8804c3563908 R08: 0000000000ffffff R09: 0000000000ffffff
[ 1110.296371] R10: 0000000000000000 R11: 0000000000000006 R12: 0000000000000000
[ 1110.296375] R13: 0000000000000000 R14: ffffffff9e254040 R15: ffffe8fff3c4a809
[ 1110.296379] FS: 00007f9e43b0b700(0000) GS:ffff880107e00000(0000) knlGS:0000000000000000
[ 1110.296382] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 1110.296385] CR2: 0000000000000000 CR3: 00000004e4334000 CR4: 00000000000006a0
[ 1110.296400] Stack:
[ 1110.296406] ffffffff81b1e46c 0000000000000000 ffff880107e03fb8 000000000000000b
[ 1110.296413] ffff880107dfffc0 ffff880107e03fc0 0000000000000008 ffffffff93f2e9c8
[ 1110.296419] 0000000000000000 ffffda0020fc07f7 0000000000000008 ffff8804c3563901
[ 1110.296420] Call Trace:
[ 1110.296429] ? memcpy (mm/kasan/kasan.c:275)
[ 1110.296437] ? arch_trigger_all_cpu_backtrace (include/linux/bitmap.h:215 include/linux/cpumask.h:506 arch/x86/kernel/apic/hw_nmi.c:76)
[ 1110.296444] arch_trigger_all_cpu_backtrace (include/linux/bitmap.h:215 include/linux/cpumask.h:506 arch/x86/kernel/apic/hw_nmi.c:76)
[ 1110.296451] ? dump_stack (./arch/x86/include/asm/preempt.h:95 lib/dump_stack.c:55)
[ 1110.296458] do_raw_spin_lock (./arch/x86/include/asm/spinlock.h:86 kernel/locking/spinlock_debug.c:130 kernel/locking/spinlock_debug.c:137)
[ 1110.296468] _raw_spin_lock (include/linux/spinlock_api_smp.h:143 kernel/locking/spinlock.c:151)
[ 1110.296474] ? __page_check_address (include/linux/spinlock.h:309 mm/rmap.c:630)
[ 1110.296481] __page_check_address (include/linux/spinlock.h:309 mm/rmap.c:630)
[ 1110.296487] ? preempt_count_sub (kernel/sched/core.c:2615)
[ 1110.296493] try_to_unmap_one (include/linux/rmap.h:202 mm/rmap.c:1146)
[ 1110.296504] ? anon_vma_interval_tree_iter_next (mm/interval_tree.c:72 mm/interval_tree.c:103)
[ 1110.296514] rmap_walk (mm/rmap.c:1653 mm/rmap.c:1725)
[ 1110.296521] ? page_get_anon_vma (include/linux/rcupdate.h:423 include/linux/rcupdate.h:935 mm/rmap.c:435)
[ 1110.296530] try_to_unmap (mm/rmap.c:1545)
[ 1110.296536] ? page_get_anon_vma (mm/rmap.c:437)
[ 1110.296545] ? try_to_unmap_nonlinear (mm/rmap.c:1138)
[ 1110.296551] ? SyS_msync (mm/rmap.c:1501)
[ 1110.296558] ? page_remove_rmap (mm/rmap.c:1409)
[ 1110.296565] ? page_get_anon_vma (mm/rmap.c:448)
[ 1110.296571] ? anon_vma_ctor (mm/rmap.c:1496)
[ 1110.296579] migrate_pages (mm/migrate.c:913 mm/migrate.c:956 mm/migrate.c:1136)
[ 1110.296586] ? _raw_spin_unlock_irq (./arch/x86/include/asm/preempt.h:95 include/linux/spinlock_api_smp.h:169 kernel/locking/spinlock.c:199)
[ 1110.296593] ? buffer_migrate_lock_buffers (mm/migrate.c:1584)
[ 1110.296601] ? handle_mm_fault (mm/memory.c:3163 mm/memory.c:3223 mm/memory.c:3336 mm/memory.c:3365)
[ 1110.296607] migrate_misplaced_page (mm/migrate.c:1738)
[ 1110.296614] handle_mm_fault (mm/memory.c:3170 mm/memory.c:3223 mm/memory.c:3336 mm/memory.c:3365)
[ 1110.296623] __do_page_fault (arch/x86/mm/fault.c:1246)
[ 1110.296630] ? vtime_account_user (kernel/sched/cputime.c:701)
[ 1110.296638] ? get_parent_ip (kernel/sched/core.c:2559)
[ 1110.296646] ? context_tracking_user_exit (kernel/context_tracking.c:144)
[ 1110.296656] trace_do_page_fault (arch/x86/mm/fault.c:1329 include/linux/jump_label.h:114 include/linux/context_tracking_state.h:27 include/linux/context_tracking.h:45 arch/x86/mm/fault.c:1330)
[ 1110.296664] do_async_page_fault (arch/x86/kernel/kvm.c:280)
[ 1110.296670] async_page_fault (arch/x86/kernel/entry_64.S:1285)
[ 1110.296755] Code: 08 4c 8b 54 16 f0 4c 8b 5c 16 f8 4c 89 07 4c 89 4f 08 4c 89 54 17 f0 4c 89 5c 17 f8 c3 90 83 fa 08 72 1b 4c 8b 06 4c 8b 4c 16 f8 <4c> 89 07 4c 89 4c 17 f8 c3 66 2e 0f 1f 84 00 00 00 00 00 83 fa
All code
========
0: 08 4c 8b 54 or %cl,0x54(%rbx,%rcx,4)
4: 16 (bad)
5: f0 4c 8b 5c 16 f8 lock mov -0x8(%rsi,%rdx,1),%r11
b: 4c 89 07 mov %r8,(%rdi)
e: 4c 89 4f 08 mov %r9,0x8(%rdi)
12: 4c 89 54 17 f0 mov %r10,-0x10(%rdi,%rdx,1)
17: 4c 89 5c 17 f8 mov %r11,-0x8(%rdi,%rdx,1)
1c: c3 retq
1d: 90 nop
1e: 83 fa 08 cmp $0x8,%edx
21: 72 1b jb 0x3e
23: 4c 8b 06 mov (%rsi),%r8
26: 4c 8b 4c 16 f8 mov -0x8(%rsi,%rdx,1),%r9
2b:* 4c 89 07 mov %r8,(%rdi) <-- trapping instruction
2e: 4c 89 4c 17 f8 mov %r9,-0x8(%rdi,%rdx,1)
33: c3 retq
34: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
3b: 00 00 00
3e: 83 fa 00 cmp $0x0,%edx
Code starting with the faulting instruction
===========================================
0: 4c 89 07 mov %r8,(%rdi)
3: 4c 89 4c 17 f8 mov %r9,-0x8(%rdi,%rdx,1)
8: c3 retq
9: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
10: 00 00 00
13: 83 fa 00 cmp $0x0,%edx
[ 1110.296760] RIP __memcpy (arch/x86/lib/memcpy_64.S:151)
[ 1110.296763] RSP <ffff8804c3563870>
[ 1110.296765] CR2: 0000000000000000
Link: http://lkml.kernel.org/r/1416931560-10603-1-git-send-email-sasha.levin@oracle.com
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2014-11-25 11:06:00 -05:00
for_each_cpu ( cpu , & printtrace_mask ) {
2014-06-19 17:33:32 -04:00
int last_i = 0 ;
s = & per_cpu ( nmi_print_seq , cpu ) ;
len = seq_buf_used ( & s - > seq ) ;
if ( ! len )
continue ;
/* Print line by line. */
for ( i = 0 ; i < len ; i + + ) {
if ( s - > buffer [ i ] = = ' \n ' ) {
print_seq_line ( s , last_i , i ) ;
last_i = i + 1 ;
}
}
/* Check if there was a partial line. */
if ( last_i < len ) {
print_seq_line ( s , last_i , len - 1 ) ;
pr_cont ( " \n " ) ;
}
}
2011-01-04 22:38:08 -05:00
clear_bit ( 0 , & backtrace_flag ) ;
2014-03-13 19:00:35 +01:00
smp_mb__after_atomic ( ) ;
2014-06-23 13:22:05 -07:00
put_cpu ( ) ;
2010-02-05 21:47:04 -05:00
}
2010-05-07 17:11:48 -04:00
2014-06-19 17:33:32 -04:00
/*
* It is not safe to call printk ( ) directly from NMI handlers .
* It may be fine if the NMI detected a lock up and we have no choice
* but to do so , but doing a NMI on all other CPUs to get a back trace
* can be done with a sysrq - l . We don ' t want that to lock up , which
* can happen if the NMI interrupts a printk in progress .
*
* Instead , we redirect the vprintk ( ) to this nmi_vprintk ( ) that writes
* the content into a per cpu seq_buf buffer . Then when the NMIs are
* all done , we can safely dump the contents of the seq_buf to a printk ( )
* from a non NMI context .
*/
static int nmi_vprintk ( const char * fmt , va_list args )
{
struct nmi_seq_buf * s = this_cpu_ptr ( & nmi_print_seq ) ;
unsigned int len = seq_buf_used ( & s - > seq ) ;
seq_buf_vprintf ( & s - > seq , fmt , args ) ;
return seq_buf_used ( & s - > seq ) - len ;
}
2014-04-17 17:18:14 +09:00
static int
2011-09-30 15:06:21 -04:00
arch_trigger_all_cpu_backtrace_handler ( unsigned int cmd , struct pt_regs * regs )
2010-05-07 17:11:48 -04:00
{
2011-01-04 22:38:07 -05:00
int cpu ;
2010-05-07 17:11:48 -04:00
2011-01-04 22:38:07 -05:00
cpu = smp_processor_id ( ) ;
2010-05-07 17:11:48 -04:00
if ( cpumask_test_cpu ( cpu , to_cpumask ( backtrace_mask ) ) ) {
2014-06-19 17:33:32 -04:00
printk_func_t printk_func_save = this_cpu_read ( printk_func ) ;
2010-05-07 17:11:48 -04:00
2014-06-19 17:33:32 -04:00
/* Replace printk to write into the NMI seq */
this_cpu_write ( printk_func , nmi_vprintk ) ;
2010-05-07 17:11:48 -04:00
printk ( KERN_WARNING " NMI backtrace for cpu %d \n " , cpu ) ;
show_regs ( regs ) ;
2014-06-19 17:33:32 -04:00
this_cpu_write ( printk_func , printk_func_save ) ;
2010-05-07 17:11:48 -04:00
cpumask_clear_cpu ( cpu , to_cpumask ( backtrace_mask ) ) ;
2011-09-30 15:06:21 -04:00
return NMI_HANDLED ;
2010-05-07 17:11:48 -04:00
}
2011-09-30 15:06:21 -04:00
return NMI_DONE ;
2010-05-07 17:11:48 -04:00
}
2014-04-17 17:18:14 +09:00
NOKPROBE_SYMBOL ( arch_trigger_all_cpu_backtrace_handler ) ;
2010-05-07 17:11:48 -04:00
static int __init register_trigger_all_cpu_backtrace ( void )
{
2011-09-30 15:06:21 -04:00
register_nmi_handler ( NMI_LOCAL , arch_trigger_all_cpu_backtrace_handler ,
0 , " arch_bt " ) ;
2010-05-07 17:11:48 -04:00
return 0 ;
}
early_initcall ( register_trigger_all_cpu_backtrace ) ;
2010-02-18 21:56:52 -05:00
# endif