Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "The main changes in this cycle were: - unwinder fixes and enhancements - improve ftrace interaction with the unwinder - optimize the code footprint of WARN() and related debugging constructs - ... plus misc updates, cleanups and fixes" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) x86/unwind: Dump all stacks in unwind_dump() x86/unwind: Silence more entry-code related warnings x86/ftrace: Fix ebp in ftrace_regs_caller that screws up unwinder x86/unwind: Remove unused 'sp' parameter in unwind_dump() x86/unwind: Prepend hex mask value with '0x' in unwind_dump() x86/unwind: Properly zero-pad 32-bit values in unwind_dump() x86/unwind: Ensure stack pointer is aligned debug: Avoid setting BUGFLAG_WARNING twice x86/unwind: Silence entry-related warnings x86/unwind: Read stack return address in update_stack_state() x86/unwind: Move common code into update_stack_state() debug: Fix __bug_table[] in arch linker scripts debug: Add _ONCE() logic to report_bug() x86/debug: Define BUG() again for !CONFIG_BUG x86/debug: Implement __WARN() using UD0 x86/ftrace: Use Makefile logic instead of #ifdef for compiling ftrace_*.o x86/ftrace: Add -mfentry support to x86_32 with DYNAMIC_FTRACE set x86/ftrace: Clean up ftrace_regs_caller x86/ftrace: Add stack frame pointer to ftrace_caller x86/ftrace: Move the ftrace specific code out of entry_32.S ...
This commit is contained in:
@ -242,6 +242,8 @@ SECTIONS
|
|||||||
}
|
}
|
||||||
_edata_loc = __data_loc + SIZEOF(.data);
|
_edata_loc = __data_loc + SIZEOF(.data);
|
||||||
|
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_TCM
|
#ifdef CONFIG_HAVE_TCM
|
||||||
/*
|
/*
|
||||||
* We align everything to a page boundary so we can
|
* We align everything to a page boundary so we can
|
||||||
|
@ -262,6 +262,8 @@ SECTIONS
|
|||||||
}
|
}
|
||||||
_edata_loc = __data_loc + SIZEOF(.data);
|
_edata_loc = __data_loc + SIZEOF(.data);
|
||||||
|
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_TCM
|
#ifdef CONFIG_HAVE_TCM
|
||||||
/*
|
/*
|
||||||
* We align everything to a page boundary so we can
|
* We align everything to a page boundary so we can
|
||||||
|
@ -55,7 +55,7 @@ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
|
|||||||
unreachable(); \
|
unreachable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint))
|
#define __WARN_FLAGS(flags) _BUG_FLAGS(BUGFLAG_WARNING|(flags))
|
||||||
|
|
||||||
#endif /* ! CONFIG_GENERIC_BUG */
|
#endif /* ! CONFIG_GENERIC_BUG */
|
||||||
|
|
||||||
|
@ -115,6 +115,8 @@ SECTIONS
|
|||||||
__data_lma = LOADADDR(.data);
|
__data_lma = LOADADDR(.data);
|
||||||
__data_len = SIZEOF(.data);
|
__data_len = SIZEOF(.data);
|
||||||
|
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
/* The init section should be last, so when we free it, it goes into
|
/* The init section should be last, so when we free it, it goes into
|
||||||
* the general memory pool, and (hopefully) will decrease fragmentation
|
* the general memory pool, and (hopefully) will decrease fragmentation
|
||||||
* a tiny bit. The init section has a _requirement_ that it be
|
* a tiny bit. The init section has a _requirement_ that it be
|
||||||
|
@ -128,6 +128,8 @@ SECTIONS
|
|||||||
. = ALIGN(8);
|
. = ALIGN(8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
_edata = .;
|
_edata = .;
|
||||||
|
|
||||||
__bss_start = .;
|
__bss_start = .;
|
||||||
|
@ -68,6 +68,8 @@ SECTIONS
|
|||||||
__edata = . ; /* End of data section. */
|
__edata = . ; /* End of data section. */
|
||||||
_edata = . ;
|
_edata = . ;
|
||||||
|
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
INIT_TASK_DATA_SECTION(PAGE_SIZE)
|
INIT_TASK_DATA_SECTION(PAGE_SIZE)
|
||||||
|
|
||||||
. = ALIGN(PAGE_SIZE); /* Init code and data. */
|
. = ALIGN(PAGE_SIZE); /* Init code and data. */
|
||||||
|
@ -102,6 +102,8 @@ SECTIONS
|
|||||||
|
|
||||||
_edata = .; /* End of data section */
|
_edata = .; /* End of data section */
|
||||||
|
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
/* GP section */
|
/* GP section */
|
||||||
. = ALIGN(L1_CACHE_BYTES);
|
. = ALIGN(L1_CACHE_BYTES);
|
||||||
_gp = . + 2048;
|
_gp = . + 2048;
|
||||||
|
@ -192,6 +192,8 @@ SECTIONS {
|
|||||||
CONSTRUCTORS
|
CONSTRUCTORS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
. = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
|
. = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
|
||||||
.got : AT(ADDR(.got) - LOAD_OFFSET) {
|
.got : AT(ADDR(.got) - LOAD_OFFSET) {
|
||||||
*(.got.plt)
|
*(.got.plt)
|
||||||
|
@ -97,6 +97,7 @@ SECTIONS
|
|||||||
DATA_DATA
|
DATA_DATA
|
||||||
CONSTRUCTORS
|
CONSTRUCTORS
|
||||||
}
|
}
|
||||||
|
BUG_TABLE
|
||||||
_gp = . + 0x8000;
|
_gp = . + 0x8000;
|
||||||
.lit8 : {
|
.lit8 : {
|
||||||
*(.lit8)
|
*(.lit8)
|
||||||
|
@ -46,7 +46,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||||
#define __WARN_TAINT(taint) \
|
#define __WARN_FLAGS(flags) \
|
||||||
do { \
|
do { \
|
||||||
asm volatile("\n" \
|
asm volatile("\n" \
|
||||||
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
|
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
|
||||||
@ -56,11 +56,11 @@
|
|||||||
"\t.org 2b+%c3\n" \
|
"\t.org 2b+%c3\n" \
|
||||||
"\t.popsection" \
|
"\t.popsection" \
|
||||||
: : "i" (__FILE__), "i" (__LINE__), \
|
: : "i" (__FILE__), "i" (__LINE__), \
|
||||||
"i" (BUGFLAG_TAINT(taint)), \
|
"i" (BUGFLAG_WARNING|(flags)), \
|
||||||
"i" (sizeof(struct bug_entry)) ); \
|
"i" (sizeof(struct bug_entry)) ); \
|
||||||
} while(0)
|
} while(0)
|
||||||
#else
|
#else
|
||||||
#define __WARN_TAINT(taint) \
|
#define __WARN_FLAGS(flags) \
|
||||||
do { \
|
do { \
|
||||||
asm volatile("\n" \
|
asm volatile("\n" \
|
||||||
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
|
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
|
||||||
@ -69,7 +69,7 @@
|
|||||||
"\t.short %c0\n" \
|
"\t.short %c0\n" \
|
||||||
"\t.org 2b+%c1\n" \
|
"\t.org 2b+%c1\n" \
|
||||||
"\t.popsection" \
|
"\t.popsection" \
|
||||||
: : "i" (BUGFLAG_TAINT(taint)), \
|
: : "i" (BUGFLAG_WARNING|(flags)), \
|
||||||
"i" (sizeof(struct bug_entry)) ); \
|
"i" (sizeof(struct bug_entry)) ); \
|
||||||
} while(0)
|
} while(0)
|
||||||
#endif
|
#endif
|
||||||
|
@ -85,12 +85,12 @@
|
|||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __WARN_TAINT(taint) do { \
|
#define __WARN_FLAGS(flags) do { \
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
"1: twi 31,0,0\n" \
|
"1: twi 31,0,0\n" \
|
||||||
_EMIT_BUG_ENTRY \
|
_EMIT_BUG_ENTRY \
|
||||||
: : "i" (__FILE__), "i" (__LINE__), \
|
: : "i" (__FILE__), "i" (__LINE__), \
|
||||||
"i" (BUGFLAG_TAINT(taint)), \
|
"i" (BUGFLAG_WARNING|(flags)), \
|
||||||
"i" (sizeof(struct bug_entry))); \
|
"i" (sizeof(struct bug_entry))); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
@ -312,6 +312,8 @@ SECTIONS
|
|||||||
NOSAVE_DATA
|
NOSAVE_DATA
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
_edata = .;
|
_edata = .;
|
||||||
PROVIDE32 (edata = .);
|
PROVIDE32 (edata = .);
|
||||||
|
@ -46,8 +46,8 @@
|
|||||||
unreachable(); \
|
unreachable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __WARN_TAINT(taint) do { \
|
#define __WARN_FLAGS(flags) do { \
|
||||||
__EMIT_BUG(BUGFLAG_TAINT(taint)); \
|
__EMIT_BUG(BUGFLAG_WARNING|(flags)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define WARN_ON(x) ({ \
|
#define WARN_ON(x) ({ \
|
||||||
|
@ -50,7 +50,7 @@ do { \
|
|||||||
"i" (sizeof(struct bug_entry))); \
|
"i" (sizeof(struct bug_entry))); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __WARN_TAINT(taint) \
|
#define __WARN_FLAGS(flags) \
|
||||||
do { \
|
do { \
|
||||||
__asm__ __volatile__ ( \
|
__asm__ __volatile__ ( \
|
||||||
"1:\t.short %O0\n" \
|
"1:\t.short %O0\n" \
|
||||||
@ -59,7 +59,7 @@ do { \
|
|||||||
: "n" (TRAPA_BUG_OPCODE), \
|
: "n" (TRAPA_BUG_OPCODE), \
|
||||||
"i" (__FILE__), \
|
"i" (__FILE__), \
|
||||||
"i" (__LINE__), \
|
"i" (__LINE__), \
|
||||||
"i" (BUGFLAG_TAINT(taint)), \
|
"i" (BUGFLAG_WARNING|(flags)), \
|
||||||
"i" (sizeof(struct bug_entry))); \
|
"i" (sizeof(struct bug_entry))); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
@ -50,11 +50,6 @@ config GENERIC_CALIBRATE_DELAY
|
|||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config GENERIC_BUG
|
|
||||||
bool
|
|
||||||
default y
|
|
||||||
depends on BUG
|
|
||||||
|
|
||||||
config HZ
|
config HZ
|
||||||
int
|
int
|
||||||
default 100
|
default 100
|
||||||
|
@ -126,7 +126,7 @@ config X86
|
|||||||
select HAVE_EBPF_JIT if X86_64
|
select HAVE_EBPF_JIT if X86_64
|
||||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||||
select HAVE_EXIT_THREAD
|
select HAVE_EXIT_THREAD
|
||||||
select HAVE_FENTRY if X86_64
|
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
select HAVE_FUNCTION_GRAPH_TRACER
|
select HAVE_FUNCTION_GRAPH_TRACER
|
||||||
select HAVE_FUNCTION_TRACER
|
select HAVE_FUNCTION_TRACER
|
||||||
|
@ -35,16 +35,13 @@
|
|||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
#include <asm/segment.h>
|
#include <asm/segment.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/page_types.h>
|
|
||||||
#include <asm/percpu.h>
|
#include <asm/percpu.h>
|
||||||
#include <asm/processor-flags.h>
|
#include <asm/processor-flags.h>
|
||||||
#include <asm/ftrace.h>
|
|
||||||
#include <asm/irq_vectors.h>
|
#include <asm/irq_vectors.h>
|
||||||
#include <asm/cpufeatures.h>
|
#include <asm/cpufeatures.h>
|
||||||
#include <asm/alternative-asm.h>
|
#include <asm/alternative-asm.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <asm/smap.h>
|
#include <asm/smap.h>
|
||||||
#include <asm/export.h>
|
|
||||||
#include <asm/frame.h>
|
#include <asm/frame.h>
|
||||||
|
|
||||||
.section .entry.text, "ax"
|
.section .entry.text, "ax"
|
||||||
@ -585,7 +582,7 @@ ENTRY(iret_exc )
|
|||||||
* will soon execute iret and the tracer was already set to
|
* will soon execute iret and the tracer was already set to
|
||||||
* the irqstate after the IRET:
|
* the irqstate after the IRET:
|
||||||
*/
|
*/
|
||||||
DISABLE_INTERRUPTS(CLBR_EAX)
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||||
lss (%esp), %esp /* switch to espfix segment */
|
lss (%esp), %esp /* switch to espfix segment */
|
||||||
jmp .Lrestore_nocheck
|
jmp .Lrestore_nocheck
|
||||||
#endif
|
#endif
|
||||||
@ -886,172 +883,6 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
|
|||||||
|
|
||||||
#endif /* CONFIG_HYPERV */
|
#endif /* CONFIG_HYPERV */
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_TRACER
|
|
||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
||||||
|
|
||||||
ENTRY(mcount)
|
|
||||||
ret
|
|
||||||
END(mcount)
|
|
||||||
|
|
||||||
ENTRY(ftrace_caller)
|
|
||||||
pushl %eax
|
|
||||||
pushl %ecx
|
|
||||||
pushl %edx
|
|
||||||
pushl $0 /* Pass NULL as regs pointer */
|
|
||||||
movl 4*4(%esp), %eax
|
|
||||||
movl 0x4(%ebp), %edx
|
|
||||||
movl function_trace_op, %ecx
|
|
||||||
subl $MCOUNT_INSN_SIZE, %eax
|
|
||||||
|
|
||||||
.globl ftrace_call
|
|
||||||
ftrace_call:
|
|
||||||
call ftrace_stub
|
|
||||||
|
|
||||||
addl $4, %esp /* skip NULL pointer */
|
|
||||||
popl %edx
|
|
||||||
popl %ecx
|
|
||||||
popl %eax
|
|
||||||
.Lftrace_ret:
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
||||||
.globl ftrace_graph_call
|
|
||||||
ftrace_graph_call:
|
|
||||||
jmp ftrace_stub
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* This is weak to keep gas from relaxing the jumps */
|
|
||||||
WEAK(ftrace_stub)
|
|
||||||
ret
|
|
||||||
END(ftrace_caller)
|
|
||||||
|
|
||||||
ENTRY(ftrace_regs_caller)
|
|
||||||
pushf /* push flags before compare (in cs location) */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* i386 does not save SS and ESP when coming from kernel.
|
|
||||||
* Instead, to get sp, ®s->sp is used (see ptrace.h).
|
|
||||||
* Unfortunately, that means eflags must be at the same location
|
|
||||||
* as the current return ip is. We move the return ip into the
|
|
||||||
* ip location, and move flags into the return ip location.
|
|
||||||
*/
|
|
||||||
pushl 4(%esp) /* save return ip into ip slot */
|
|
||||||
|
|
||||||
pushl $0 /* Load 0 into orig_ax */
|
|
||||||
pushl %gs
|
|
||||||
pushl %fs
|
|
||||||
pushl %es
|
|
||||||
pushl %ds
|
|
||||||
pushl %eax
|
|
||||||
pushl %ebp
|
|
||||||
pushl %edi
|
|
||||||
pushl %esi
|
|
||||||
pushl %edx
|
|
||||||
pushl %ecx
|
|
||||||
pushl %ebx
|
|
||||||
|
|
||||||
movl 13*4(%esp), %eax /* Get the saved flags */
|
|
||||||
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
|
|
||||||
/* clobbering return ip */
|
|
||||||
movl $__KERNEL_CS, 13*4(%esp)
|
|
||||||
|
|
||||||
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
|
|
||||||
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
|
|
||||||
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
|
|
||||||
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
|
|
||||||
pushl %esp /* Save pt_regs as 4th parameter */
|
|
||||||
|
|
||||||
GLOBAL(ftrace_regs_call)
|
|
||||||
call ftrace_stub
|
|
||||||
|
|
||||||
addl $4, %esp /* Skip pt_regs */
|
|
||||||
movl 14*4(%esp), %eax /* Move flags back into cs */
|
|
||||||
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
|
|
||||||
movl 12*4(%esp), %eax /* Get return ip from regs->ip */
|
|
||||||
movl %eax, 14*4(%esp) /* Put return ip back for ret */
|
|
||||||
|
|
||||||
popl %ebx
|
|
||||||
popl %ecx
|
|
||||||
popl %edx
|
|
||||||
popl %esi
|
|
||||||
popl %edi
|
|
||||||
popl %ebp
|
|
||||||
popl %eax
|
|
||||||
popl %ds
|
|
||||||
popl %es
|
|
||||||
popl %fs
|
|
||||||
popl %gs
|
|
||||||
addl $8, %esp /* Skip orig_ax and ip */
|
|
||||||
popf /* Pop flags at end (no addl to corrupt flags) */
|
|
||||||
jmp .Lftrace_ret
|
|
||||||
|
|
||||||
popf
|
|
||||||
jmp ftrace_stub
|
|
||||||
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
|
||||||
|
|
||||||
ENTRY(mcount)
|
|
||||||
cmpl $__PAGE_OFFSET, %esp
|
|
||||||
jb ftrace_stub /* Paging not enabled yet? */
|
|
||||||
|
|
||||||
cmpl $ftrace_stub, ftrace_trace_function
|
|
||||||
jnz .Ltrace
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
||||||
cmpl $ftrace_stub, ftrace_graph_return
|
|
||||||
jnz ftrace_graph_caller
|
|
||||||
|
|
||||||
cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
|
|
||||||
jnz ftrace_graph_caller
|
|
||||||
#endif
|
|
||||||
.globl ftrace_stub
|
|
||||||
ftrace_stub:
|
|
||||||
ret
|
|
||||||
|
|
||||||
/* taken from glibc */
|
|
||||||
.Ltrace:
|
|
||||||
pushl %eax
|
|
||||||
pushl %ecx
|
|
||||||
pushl %edx
|
|
||||||
movl 0xc(%esp), %eax
|
|
||||||
movl 0x4(%ebp), %edx
|
|
||||||
subl $MCOUNT_INSN_SIZE, %eax
|
|
||||||
|
|
||||||
call *ftrace_trace_function
|
|
||||||
|
|
||||||
popl %edx
|
|
||||||
popl %ecx
|
|
||||||
popl %eax
|
|
||||||
jmp ftrace_stub
|
|
||||||
END(mcount)
|
|
||||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
||||||
EXPORT_SYMBOL(mcount)
|
|
||||||
#endif /* CONFIG_FUNCTION_TRACER */
|
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
||||||
ENTRY(ftrace_graph_caller)
|
|
||||||
pushl %eax
|
|
||||||
pushl %ecx
|
|
||||||
pushl %edx
|
|
||||||
movl 0xc(%esp), %eax
|
|
||||||
lea 0x4(%ebp), %edx
|
|
||||||
movl (%ebp), %ecx
|
|
||||||
subl $MCOUNT_INSN_SIZE, %eax
|
|
||||||
call prepare_ftrace_return
|
|
||||||
popl %edx
|
|
||||||
popl %ecx
|
|
||||||
popl %eax
|
|
||||||
ret
|
|
||||||
END(ftrace_graph_caller)
|
|
||||||
|
|
||||||
.globl return_to_handler
|
|
||||||
return_to_handler:
|
|
||||||
pushl %eax
|
|
||||||
pushl %edx
|
|
||||||
movl %ebp, %eax
|
|
||||||
call ftrace_return_to_handler
|
|
||||||
movl %eax, %ecx
|
|
||||||
popl %edx
|
|
||||||
popl %eax
|
|
||||||
jmp *%ecx
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
ENTRY(trace_page_fault)
|
ENTRY(trace_page_fault)
|
||||||
ASM_CLAC
|
ASM_CLAC
|
||||||
|
@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath:
|
|||||||
* If we see that no exit work is required (which we are required
|
* If we see that no exit work is required (which we are required
|
||||||
* to check with IRQs off), then we can go straight to SYSRET64.
|
* to check with IRQs off), then we can go straight to SYSRET64.
|
||||||
*/
|
*/
|
||||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
movq PER_CPU_VAR(current_task), %r11
|
movq PER_CPU_VAR(current_task), %r11
|
||||||
testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
|
testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
|
||||||
@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath:
|
|||||||
* raise(3) will trigger this, for example. IRQs are off.
|
* raise(3) will trigger this, for example. IRQs are off.
|
||||||
*/
|
*/
|
||||||
TRACE_IRQS_ON
|
TRACE_IRQS_ON
|
||||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
ENABLE_INTERRUPTS(CLBR_ANY)
|
||||||
SAVE_EXTRA_REGS
|
SAVE_EXTRA_REGS
|
||||||
movq %rsp, %rdi
|
movq %rsp, %rdi
|
||||||
call syscall_return_slowpath /* returns with IRQs disabled */
|
call syscall_return_slowpath /* returns with IRQs disabled */
|
||||||
@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64)
|
|||||||
* Called from fast path -- disable IRQs again, pop return address
|
* Called from fast path -- disable IRQs again, pop return address
|
||||||
* and jump to slow path
|
* and jump to slow path
|
||||||
*/
|
*/
|
||||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
popq %rax
|
popq %rax
|
||||||
jmp entry_SYSCALL64_slow_path
|
jmp entry_SYSCALL64_slow_path
|
||||||
@ -518,7 +518,7 @@ common_interrupt:
|
|||||||
interrupt do_IRQ
|
interrupt do_IRQ
|
||||||
/* 0(%rsp): old RSP */
|
/* 0(%rsp): old RSP */
|
||||||
ret_from_intr:
|
ret_from_intr:
|
||||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
decl PER_CPU_VAR(irq_count)
|
decl PER_CPU_VAR(irq_count)
|
||||||
|
|
||||||
@ -1051,7 +1051,7 @@ END(paranoid_entry)
|
|||||||
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
|
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
|
||||||
*/
|
*/
|
||||||
ENTRY(paranoid_exit)
|
ENTRY(paranoid_exit)
|
||||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||||
TRACE_IRQS_OFF_DEBUG
|
TRACE_IRQS_OFF_DEBUG
|
||||||
testl %ebx, %ebx /* swapgs needed? */
|
testl %ebx, %ebx /* swapgs needed? */
|
||||||
jnz paranoid_exit_no_swapgs
|
jnz paranoid_exit_no_swapgs
|
||||||
@ -1156,10 +1156,9 @@ END(error_entry)
|
|||||||
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
|
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
|
||||||
*/
|
*/
|
||||||
ENTRY(error_exit)
|
ENTRY(error_exit)
|
||||||
movl %ebx, %eax
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
testl %eax, %eax
|
testl %ebx, %ebx
|
||||||
jnz retint_kernel
|
jnz retint_kernel
|
||||||
jmp retint_user
|
jmp retint_user
|
||||||
END(error_exit)
|
END(error_exit)
|
||||||
|
@ -1,36 +1,82 @@
|
|||||||
#ifndef _ASM_X86_BUG_H
|
#ifndef _ASM_X86_BUG_H
|
||||||
#define _ASM_X86_BUG_H
|
#define _ASM_X86_BUG_H
|
||||||
|
|
||||||
#define HAVE_ARCH_BUG
|
#include <linux/stringify.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since some emulators terminate on UD2, we cannot use it for WARN.
|
||||||
|
* Since various instruction decoders disagree on the length of UD1,
|
||||||
|
* we cannot use it either. So use UD0 for WARN.
|
||||||
|
*
|
||||||
|
* (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas
|
||||||
|
* our kernel decoder thinks it takes a ModRM byte, which seems consistent
|
||||||
|
* with various things like the Intel SDM instruction encoding rules)
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define ASM_UD0 ".byte 0x0f, 0xff"
|
||||||
|
#define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */
|
||||||
|
#define ASM_UD2 ".byte 0x0f, 0x0b"
|
||||||
|
|
||||||
|
#define INSN_UD0 0xff0f
|
||||||
|
#define INSN_UD2 0x0b0f
|
||||||
|
|
||||||
|
#define LEN_UD0 2
|
||||||
|
|
||||||
|
#ifdef CONFIG_GENERIC_BUG
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
# define __BUG_REL(val) ".long " __stringify(val)
|
||||||
|
#else
|
||||||
|
# define __BUG_REL(val) ".long " __stringify(val) " - 2b"
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#define _BUG_FLAGS(ins, flags) \
|
||||||
# define __BUG_C0 "2:\t.long 1b, %c0\n"
|
do { \
|
||||||
#else
|
asm volatile("1:\t" ins "\n" \
|
||||||
# define __BUG_C0 "2:\t.long 1b - 2b, %c0 - 2b\n"
|
".pushsection __bug_table,\"a\"\n" \
|
||||||
#endif
|
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
|
||||||
|
"\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
|
||||||
|
"\t.word %c1" "\t# bug_entry::line\n" \
|
||||||
|
"\t.word %c2" "\t# bug_entry::flags\n" \
|
||||||
|
"\t.org 2b+%c3\n" \
|
||||||
|
".popsection" \
|
||||||
|
: : "i" (__FILE__), "i" (__LINE__), \
|
||||||
|
"i" (flags), \
|
||||||
|
"i" (sizeof(struct bug_entry))); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#else /* !CONFIG_DEBUG_BUGVERBOSE */
|
||||||
|
|
||||||
|
#define _BUG_FLAGS(ins, flags) \
|
||||||
|
do { \
|
||||||
|
asm volatile("1:\t" ins "\n" \
|
||||||
|
".pushsection __bug_table,\"a\"\n" \
|
||||||
|
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
|
||||||
|
"\t.word %c0" "\t# bug_entry::flags\n" \
|
||||||
|
"\t.org 2b+%c1\n" \
|
||||||
|
".popsection" \
|
||||||
|
: : "i" (flags), \
|
||||||
|
"i" (sizeof(struct bug_entry))); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#endif /* CONFIG_DEBUG_BUGVERBOSE */
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define _BUG_FLAGS(ins, flags) asm volatile(ins)
|
||||||
|
|
||||||
|
#endif /* CONFIG_GENERIC_BUG */
|
||||||
|
|
||||||
|
#define HAVE_ARCH_BUG
|
||||||
#define BUG() \
|
#define BUG() \
|
||||||
do { \
|
do { \
|
||||||
asm volatile("1:\tud2\n" \
|
_BUG_FLAGS(ASM_UD2, 0); \
|
||||||
".pushsection __bug_table,\"a\"\n" \
|
|
||||||
__BUG_C0 \
|
|
||||||
"\t.word %c1, 0\n" \
|
|
||||||
"\t.org 2b+%c2\n" \
|
|
||||||
".popsection" \
|
|
||||||
: : "i" (__FILE__), "i" (__LINE__), \
|
|
||||||
"i" (sizeof(struct bug_entry))); \
|
|
||||||
unreachable(); \
|
unreachable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#else
|
#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags))
|
||||||
#define BUG() \
|
|
||||||
do { \
|
|
||||||
asm volatile("ud2"); \
|
|
||||||
unreachable(); \
|
|
||||||
} while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <asm-generic/bug.h>
|
#include <asm-generic/bug.h>
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <asm/page_64_types.h>
|
#include <asm/page_64_types.h>
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
#include <asm/alternative.h>
|
||||||
|
|
||||||
/* duplicated to the one in bootmem.h */
|
/* duplicated to the one in bootmem.h */
|
||||||
extern unsigned long max_pfn;
|
extern unsigned long max_pfn;
|
||||||
@ -34,7 +35,20 @@ extern unsigned long __phys_addr_symbol(unsigned long);
|
|||||||
#define pfn_valid(pfn) ((pfn) < max_pfn)
|
#define pfn_valid(pfn) ((pfn) < max_pfn)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void clear_page(void *page);
|
void clear_page_orig(void *page);
|
||||||
|
void clear_page_rep(void *page);
|
||||||
|
void clear_page_erms(void *page);
|
||||||
|
|
||||||
|
static inline void clear_page(void *page)
|
||||||
|
{
|
||||||
|
alternative_call_2(clear_page_orig,
|
||||||
|
clear_page_rep, X86_FEATURE_REP_GOOD,
|
||||||
|
clear_page_erms, X86_FEATURE_ERMS,
|
||||||
|
"=D" (page),
|
||||||
|
"0" (page)
|
||||||
|
: "memory", "rax", "rcx");
|
||||||
|
}
|
||||||
|
|
||||||
void copy_page(void *to, void *from);
|
void copy_page(void *to, void *from);
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
@ -12,8 +12,10 @@ struct unwind_state {
|
|||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
int graph_idx;
|
int graph_idx;
|
||||||
#ifdef CONFIG_FRAME_POINTER
|
#ifdef CONFIG_FRAME_POINTER
|
||||||
|
bool got_irq;
|
||||||
unsigned long *bp, *orig_sp;
|
unsigned long *bp, *orig_sp;
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
|
unsigned long ip;
|
||||||
#else
|
#else
|
||||||
unsigned long *sp;
|
unsigned long *sp;
|
||||||
#endif
|
#endif
|
||||||
|
@ -27,7 +27,7 @@ KASAN_SANITIZE_stacktrace.o := n
|
|||||||
|
|
||||||
OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
|
OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
|
||||||
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
|
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
|
||||||
OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o := y
|
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
|
||||||
OBJECT_FILES_NON_STANDARD_test_nx.o := y
|
OBJECT_FILES_NON_STANDARD_test_nx.o := y
|
||||||
|
|
||||||
# If instrumentation of this dir is enabled, boot hangs during first second.
|
# If instrumentation of this dir is enabled, boot hangs during first second.
|
||||||
@ -46,7 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
|
|||||||
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
|
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
|
||||||
obj-$(CONFIG_IRQ_WORK) += irq_work.o
|
obj-$(CONFIG_IRQ_WORK) += irq_work.o
|
||||||
obj-y += probe_roms.o
|
obj-y += probe_roms.o
|
||||||
obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o
|
obj-$(CONFIG_X86_64) += sys_x86_64.o
|
||||||
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
|
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
|
||||||
obj-$(CONFIG_SYSFS) += ksysfs.o
|
obj-$(CONFIG_SYSFS) += ksysfs.o
|
||||||
obj-y += bootflag.o e820.o
|
obj-y += bootflag.o e820.o
|
||||||
@ -82,6 +82,7 @@ obj-y += apic/
|
|||||||
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
|
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
|
||||||
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
||||||
obj-$(CONFIG_LIVEPATCH) += livepatch.o
|
obj-$(CONFIG_LIVEPATCH) += livepatch.o
|
||||||
|
obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
|
||||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
||||||
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
|
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
|
||||||
obj-$(CONFIG_X86_TSC) += trace_clock.o
|
obj-$(CONFIG_X86_TSC) += trace_clock.o
|
||||||
|
@ -77,7 +77,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
|||||||
* - softirq stack
|
* - softirq stack
|
||||||
* - hardirq stack
|
* - hardirq stack
|
||||||
*/
|
*/
|
||||||
for (regs = NULL; stack; stack = stack_info.next_sp) {
|
for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
|
||||||
const char *stack_name;
|
const char *stack_name;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -289,9 +289,6 @@ void die(const char *str, struct pt_regs *regs, long err)
|
|||||||
unsigned long flags = oops_begin();
|
unsigned long flags = oops_begin();
|
||||||
int sig = SIGSEGV;
|
int sig = SIGSEGV;
|
||||||
|
|
||||||
if (!user_mode(regs))
|
|
||||||
report_bug(regs->ip, regs);
|
|
||||||
|
|
||||||
if (__die(str, regs, err))
|
if (__die(str, regs, err))
|
||||||
sig = 0;
|
sig = 0;
|
||||||
oops_end(flags, regs, sig);
|
oops_end(flags, regs, sig);
|
||||||
|
@ -162,15 +162,3 @@ void show_regs(struct pt_regs *regs)
|
|||||||
}
|
}
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
int is_valid_bugaddr(unsigned long ip)
|
|
||||||
{
|
|
||||||
unsigned short ud2;
|
|
||||||
|
|
||||||
if (ip < PAGE_OFFSET)
|
|
||||||
return 0;
|
|
||||||
if (probe_kernel_address((unsigned short *)ip, ud2))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return ud2 == 0x0b0f;
|
|
||||||
}
|
|
||||||
|
@ -178,13 +178,3 @@ void show_regs(struct pt_regs *regs)
|
|||||||
}
|
}
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
int is_valid_bugaddr(unsigned long ip)
|
|
||||||
{
|
|
||||||
unsigned short ud2;
|
|
||||||
|
|
||||||
if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return ud2 == 0x0b0f;
|
|
||||||
}
|
|
||||||
|
244
arch/x86/kernel/ftrace_32.S
Normal file
244
arch/x86/kernel/ftrace_32.S
Normal file
@ -0,0 +1,244 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2017 Steven Rostedt, VMware Inc.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/linkage.h>
|
||||||
|
#include <asm/page_types.h>
|
||||||
|
#include <asm/segment.h>
|
||||||
|
#include <asm/export.h>
|
||||||
|
#include <asm/ftrace.h>
|
||||||
|
|
||||||
|
#ifdef CC_USING_FENTRY
|
||||||
|
# define function_hook __fentry__
|
||||||
|
EXPORT_SYMBOL(__fentry__)
|
||||||
|
#else
|
||||||
|
# define function_hook mcount
|
||||||
|
EXPORT_SYMBOL(mcount)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
|
||||||
|
/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
|
||||||
|
#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
|
||||||
|
# define USING_FRAME_POINTER
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef USING_FRAME_POINTER
|
||||||
|
# define MCOUNT_FRAME 1 /* using frame = true */
|
||||||
|
#else
|
||||||
|
# define MCOUNT_FRAME 0 /* using frame = false */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ENTRY(function_hook)
|
||||||
|
ret
|
||||||
|
END(function_hook)
|
||||||
|
|
||||||
|
ENTRY(ftrace_caller)
|
||||||
|
|
||||||
|
#ifdef USING_FRAME_POINTER
|
||||||
|
# ifdef CC_USING_FENTRY
|
||||||
|
/*
|
||||||
|
* Frame pointers are of ip followed by bp.
|
||||||
|
* Since fentry is an immediate jump, we are left with
|
||||||
|
* parent-ip, function-ip. We need to add a frame with
|
||||||
|
* parent-ip followed by ebp.
|
||||||
|
*/
|
||||||
|
pushl 4(%esp) /* parent ip */
|
||||||
|
pushl %ebp
|
||||||
|
movl %esp, %ebp
|
||||||
|
pushl 2*4(%esp) /* function ip */
|
||||||
|
# endif
|
||||||
|
/* For mcount, the function ip is directly above */
|
||||||
|
pushl %ebp
|
||||||
|
movl %esp, %ebp
|
||||||
|
#endif
|
||||||
|
pushl %eax
|
||||||
|
pushl %ecx
|
||||||
|
pushl %edx
|
||||||
|
pushl $0 /* Pass NULL as regs pointer */
|
||||||
|
|
||||||
|
#ifdef USING_FRAME_POINTER
|
||||||
|
/* Load parent ebp into edx */
|
||||||
|
movl 4*4(%esp), %edx
|
||||||
|
#else
|
||||||
|
/* There's no frame pointer, load the appropriate stack addr instead */
|
||||||
|
lea 4*4(%esp), %edx
|
||||||
|
#endif
|
||||||
|
|
||||||
|
movl (MCOUNT_FRAME+4)*4(%esp), %eax /* load the rip */
|
||||||
|
/* Get the parent ip */
|
||||||
|
movl 4(%edx), %edx /* edx has ebp */
|
||||||
|
|
||||||
|
movl function_trace_op, %ecx
|
||||||
|
subl $MCOUNT_INSN_SIZE, %eax
|
||||||
|
|
||||||
|
.globl ftrace_call
|
||||||
|
ftrace_call:
|
||||||
|
call ftrace_stub
|
||||||
|
|
||||||
|
addl $4, %esp /* skip NULL pointer */
|
||||||
|
popl %edx
|
||||||
|
popl %ecx
|
||||||
|
popl %eax
|
||||||
|
#ifdef USING_FRAME_POINTER
|
||||||
|
popl %ebp
|
||||||
|
# ifdef CC_USING_FENTRY
|
||||||
|
addl $4,%esp /* skip function ip */
|
||||||
|
popl %ebp /* this is the orig bp */
|
||||||
|
addl $4, %esp /* skip parent ip */
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
.Lftrace_ret:
|
||||||
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
|
.globl ftrace_graph_call
|
||||||
|
ftrace_graph_call:
|
||||||
|
jmp ftrace_stub
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* This is weak to keep gas from relaxing the jumps */
|
||||||
|
WEAK(ftrace_stub)
|
||||||
|
ret
|
||||||
|
END(ftrace_caller)
|
||||||
|
|
||||||
|
ENTRY(ftrace_regs_caller)
|
||||||
|
/*
|
||||||
|
* i386 does not save SS and ESP when coming from kernel.
|
||||||
|
* Instead, to get sp, ®s->sp is used (see ptrace.h).
|
||||||
|
* Unfortunately, that means eflags must be at the same location
|
||||||
|
* as the current return ip is. We move the return ip into the
|
||||||
|
* regs->ip location, and move flags into the return ip location.
|
||||||
|
*/
|
||||||
|
pushl $__KERNEL_CS
|
||||||
|
pushl 4(%esp) /* Save the return ip */
|
||||||
|
pushl $0 /* Load 0 into orig_ax */
|
||||||
|
pushl %gs
|
||||||
|
pushl %fs
|
||||||
|
pushl %es
|
||||||
|
pushl %ds
|
||||||
|
pushl %eax
|
||||||
|
|
||||||
|
/* Get flags and place them into the return ip slot */
|
||||||
|
pushf
|
||||||
|
popl %eax
|
||||||
|
movl %eax, 8*4(%esp)
|
||||||
|
|
||||||
|
pushl %ebp
|
||||||
|
pushl %edi
|
||||||
|
pushl %esi
|
||||||
|
pushl %edx
|
||||||
|
pushl %ecx
|
||||||
|
pushl %ebx
|
||||||
|
|
||||||
|
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
|
||||||
|
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
|
||||||
|
#ifdef CC_USING_FENTRY
|
||||||
|
movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */
|
||||||
|
#else
|
||||||
|
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
|
||||||
|
#endif
|
||||||
|
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
|
||||||
|
pushl %esp /* Save pt_regs as 4th parameter */
|
||||||
|
|
||||||
|
GLOBAL(ftrace_regs_call)
|
||||||
|
call ftrace_stub
|
||||||
|
|
||||||
|
addl $4, %esp /* Skip pt_regs */
|
||||||
|
|
||||||
|
/* restore flags */
|
||||||
|
push 14*4(%esp)
|
||||||
|
popf
|
||||||
|
|
||||||
|
/* Move return ip back to its original location */
|
||||||
|
movl 12*4(%esp), %eax
|
||||||
|
movl %eax, 14*4(%esp)
|
||||||
|
|
||||||
|
popl %ebx
|
||||||
|
popl %ecx
|
||||||
|
popl %edx
|
||||||
|
popl %esi
|
||||||
|
popl %edi
|
||||||
|
popl %ebp
|
||||||
|
popl %eax
|
||||||
|
popl %ds
|
||||||
|
popl %es
|
||||||
|
popl %fs
|
||||||
|
popl %gs
|
||||||
|
|
||||||
|
/* use lea to not affect flags */
|
||||||
|
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
|
||||||
|
|
||||||
|
jmp .Lftrace_ret
|
||||||
|
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
||||||
|
|
||||||
|
ENTRY(function_hook)
|
||||||
|
cmpl $__PAGE_OFFSET, %esp
|
||||||
|
jb ftrace_stub /* Paging not enabled yet? */
|
||||||
|
|
||||||
|
cmpl $ftrace_stub, ftrace_trace_function
|
||||||
|
jnz .Ltrace
|
||||||
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
|
cmpl $ftrace_stub, ftrace_graph_return
|
||||||
|
jnz ftrace_graph_caller
|
||||||
|
|
||||||
|
cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
|
||||||
|
jnz ftrace_graph_caller
|
||||||
|
#endif
|
||||||
|
.globl ftrace_stub
|
||||||
|
ftrace_stub:
|
||||||
|
ret
|
||||||
|
|
||||||
|
/* taken from glibc */
|
||||||
|
.Ltrace:
|
||||||
|
pushl %eax
|
||||||
|
pushl %ecx
|
||||||
|
pushl %edx
|
||||||
|
movl 0xc(%esp), %eax
|
||||||
|
movl 0x4(%ebp), %edx
|
||||||
|
subl $MCOUNT_INSN_SIZE, %eax
|
||||||
|
|
||||||
|
call *ftrace_trace_function
|
||||||
|
|
||||||
|
popl %edx
|
||||||
|
popl %ecx
|
||||||
|
popl %eax
|
||||||
|
jmp ftrace_stub
|
||||||
|
END(function_hook)
|
||||||
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||||
|
|
||||||
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
|
ENTRY(ftrace_graph_caller)
|
||||||
|
pushl %eax
|
||||||
|
pushl %ecx
|
||||||
|
pushl %edx
|
||||||
|
movl 3*4(%esp), %eax
|
||||||
|
/* Even with frame pointers, fentry doesn't have one here */
|
||||||
|
#ifdef CC_USING_FENTRY
|
||||||
|
lea 4*4(%esp), %edx
|
||||||
|
movl $0, %ecx
|
||||||
|
#else
|
||||||
|
lea 0x4(%ebp), %edx
|
||||||
|
movl (%ebp), %ecx
|
||||||
|
#endif
|
||||||
|
subl $MCOUNT_INSN_SIZE, %eax
|
||||||
|
call prepare_ftrace_return
|
||||||
|
popl %edx
|
||||||
|
popl %ecx
|
||||||
|
popl %eax
|
||||||
|
ret
|
||||||
|
END(ftrace_graph_caller)
|
||||||
|
|
||||||
|
.globl return_to_handler
|
||||||
|
return_to_handler:
|
||||||
|
pushl %eax
|
||||||
|
pushl %edx
|
||||||
|
#ifdef CC_USING_FENTRY
|
||||||
|
movl $0, %eax
|
||||||
|
#else
|
||||||
|
movl %ebp, %eax
|
||||||
|
#endif
|
||||||
|
call ftrace_return_to_handler
|
||||||
|
movl %eax, %ecx
|
||||||
|
popl %edx
|
||||||
|
popl %eax
|
||||||
|
jmp *%ecx
|
||||||
|
#endif
|
@ -1,6 +1,4 @@
|
|||||||
/*
|
/*
|
||||||
* linux/arch/x86_64/mcount_64.S
|
|
||||||
*
|
|
||||||
* Copyright (C) 2014 Steven Rostedt, Red Hat Inc
|
* Copyright (C) 2014 Steven Rostedt, Red Hat Inc
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -13,9 +11,6 @@
|
|||||||
.code64
|
.code64
|
||||||
.section .entry.text, "ax"
|
.section .entry.text, "ax"
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_TRACER
|
|
||||||
|
|
||||||
#ifdef CC_USING_FENTRY
|
#ifdef CC_USING_FENTRY
|
||||||
# define function_hook __fentry__
|
# define function_hook __fentry__
|
||||||
EXPORT_SYMBOL(__fentry__)
|
EXPORT_SYMBOL(__fentry__)
|
||||||
@ -297,7 +292,6 @@ trace:
|
|||||||
jmp fgraph_trace
|
jmp fgraph_trace
|
||||||
END(function_hook)
|
END(function_hook)
|
||||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||||
#endif /* CONFIG_FUNCTION_TRACER */
|
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
ENTRY(ftrace_graph_caller)
|
ENTRY(ftrace_graph_caller)
|
@ -169,6 +169,37 @@ void ist_end_non_atomic(void)
|
|||||||
preempt_disable();
|
preempt_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int is_valid_bugaddr(unsigned long addr)
|
||||||
|
{
|
||||||
|
unsigned short ud;
|
||||||
|
|
||||||
|
if (addr < TASK_SIZE_MAX)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (probe_kernel_address((unsigned short *)addr, ud))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return ud == INSN_UD0 || ud == INSN_UD2;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int fixup_bug(struct pt_regs *regs, int trapnr)
|
||||||
|
{
|
||||||
|
if (trapnr != X86_TRAP_UD)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
switch (report_bug(regs->ip, regs)) {
|
||||||
|
case BUG_TRAP_TYPE_NONE:
|
||||||
|
case BUG_TRAP_TYPE_BUG:
|
||||||
|
break;
|
||||||
|
|
||||||
|
case BUG_TRAP_TYPE_WARN:
|
||||||
|
regs->ip += LEN_UD0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static nokprobe_inline int
|
static nokprobe_inline int
|
||||||
do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
|
do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
|
||||||
struct pt_regs *regs, long error_code)
|
struct pt_regs *regs, long error_code)
|
||||||
@ -187,12 +218,15 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!user_mode(regs)) {
|
if (!user_mode(regs)) {
|
||||||
if (!fixup_exception(regs, trapnr)) {
|
if (fixup_exception(regs, trapnr))
|
||||||
tsk->thread.error_code = error_code;
|
return 0;
|
||||||
tsk->thread.trap_nr = trapnr;
|
|
||||||
die(str, regs, error_code);
|
if (fixup_bug(regs, trapnr))
|
||||||
}
|
return 0;
|
||||||
return 0;
|
|
||||||
|
tsk->thread.error_code = error_code;
|
||||||
|
tsk->thread.trap_nr = trapnr;
|
||||||
|
die(str, regs, error_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/sched/task.h>
|
#include <linux/sched/task.h>
|
||||||
#include <linux/sched/task_stack.h>
|
#include <linux/sched/task_stack.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <asm/sections.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/bitops.h>
|
#include <asm/bitops.h>
|
||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
@ -23,53 +25,53 @@
|
|||||||
val; \
|
val; \
|
||||||
})
|
})
|
||||||
|
|
||||||
static void unwind_dump(struct unwind_state *state, unsigned long *sp)
|
static void unwind_dump(struct unwind_state *state)
|
||||||
{
|
{
|
||||||
static bool dumped_before = false;
|
static bool dumped_before = false;
|
||||||
bool prev_zero, zero = false;
|
bool prev_zero, zero = false;
|
||||||
unsigned long word;
|
unsigned long word, *sp;
|
||||||
|
struct stack_info stack_info = {0};
|
||||||
|
unsigned long visit_mask = 0;
|
||||||
|
|
||||||
if (dumped_before)
|
if (dumped_before)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dumped_before = true;
|
dumped_before = true;
|
||||||
|
|
||||||
printk_deferred("unwind stack type:%d next_sp:%p mask:%lx graph_idx:%d\n",
|
printk_deferred("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
|
||||||
state->stack_info.type, state->stack_info.next_sp,
|
state->stack_info.type, state->stack_info.next_sp,
|
||||||
state->stack_mask, state->graph_idx);
|
state->stack_mask, state->graph_idx);
|
||||||
|
|
||||||
for (sp = state->orig_sp; sp < state->stack_info.end; sp++) {
|
for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
|
||||||
word = READ_ONCE_NOCHECK(*sp);
|
if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
|
||||||
|
break;
|
||||||
|
|
||||||
prev_zero = zero;
|
for (; sp < stack_info.end; sp++) {
|
||||||
zero = word == 0;
|
|
||||||
|
|
||||||
if (zero) {
|
word = READ_ONCE_NOCHECK(*sp);
|
||||||
if (!prev_zero)
|
|
||||||
printk_deferred("%p: %016x ...\n", sp, 0);
|
prev_zero = zero;
|
||||||
continue;
|
zero = word == 0;
|
||||||
|
|
||||||
|
if (zero) {
|
||||||
|
if (!prev_zero)
|
||||||
|
printk_deferred("%p: %0*x ...\n",
|
||||||
|
sp, BITS_PER_LONG/4, 0);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
printk_deferred("%p: %0*lx (%pB)\n",
|
||||||
|
sp, BITS_PER_LONG/4, word, (void *)word);
|
||||||
}
|
}
|
||||||
|
|
||||||
printk_deferred("%p: %016lx (%pB)\n", sp, word, (void *)word);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long unwind_get_return_address(struct unwind_state *state)
|
unsigned long unwind_get_return_address(struct unwind_state *state)
|
||||||
{
|
{
|
||||||
unsigned long addr;
|
|
||||||
unsigned long *addr_p = unwind_get_return_address_ptr(state);
|
|
||||||
|
|
||||||
if (unwind_done(state))
|
if (unwind_done(state))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (state->regs && user_mode(state->regs))
|
return __kernel_text_address(state->ip) ? state->ip : 0;
|
||||||
return 0;
|
|
||||||
|
|
||||||
addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
|
|
||||||
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
|
|
||||||
addr_p);
|
|
||||||
|
|
||||||
return __kernel_text_address(addr) ? addr : 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(unwind_get_return_address);
|
EXPORT_SYMBOL_GPL(unwind_get_return_address);
|
||||||
|
|
||||||
@ -82,16 +84,41 @@ static size_t regs_size(struct pt_regs *regs)
|
|||||||
return sizeof(*regs);
|
return sizeof(*regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool in_entry_code(unsigned long ip)
|
||||||
|
{
|
||||||
|
char *addr = (char *)ip;
|
||||||
|
|
||||||
|
if (addr >= __entry_text_start && addr < __entry_text_end)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
|
||||||
|
if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long *last_frame(struct unwind_state *state)
|
||||||
|
{
|
||||||
|
return (unsigned long *)task_pt_regs(state->task) - 2;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
#define GCC_REALIGN_WORDS 3
|
#define GCC_REALIGN_WORDS 3
|
||||||
#else
|
#else
|
||||||
#define GCC_REALIGN_WORDS 1
|
#define GCC_REALIGN_WORDS 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline unsigned long *last_aligned_frame(struct unwind_state *state)
|
||||||
|
{
|
||||||
|
return last_frame(state) - GCC_REALIGN_WORDS;
|
||||||
|
}
|
||||||
|
|
||||||
static bool is_last_task_frame(struct unwind_state *state)
|
static bool is_last_task_frame(struct unwind_state *state)
|
||||||
{
|
{
|
||||||
unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
|
unsigned long *last_bp = last_frame(state);
|
||||||
unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
|
unsigned long *aligned_bp = last_aligned_frame(state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to check for the last task frame at two different locations
|
* We have to check for the last task frame at two different locations
|
||||||
@ -135,26 +162,70 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
|
|||||||
return (struct pt_regs *)(regs & ~0x1);
|
return (struct pt_regs *)(regs & ~0x1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool update_stack_state(struct unwind_state *state, void *addr,
|
static bool update_stack_state(struct unwind_state *state,
|
||||||
size_t len)
|
unsigned long *next_bp)
|
||||||
{
|
{
|
||||||
struct stack_info *info = &state->stack_info;
|
struct stack_info *info = &state->stack_info;
|
||||||
enum stack_type orig_type = info->type;
|
enum stack_type prev_type = info->type;
|
||||||
|
struct pt_regs *regs;
|
||||||
|
unsigned long *frame, *prev_frame_end, *addr_p, addr;
|
||||||
|
size_t len;
|
||||||
|
|
||||||
|
if (state->regs)
|
||||||
|
prev_frame_end = (void *)state->regs + regs_size(state->regs);
|
||||||
|
else
|
||||||
|
prev_frame_end = (void *)state->bp + FRAME_HEADER_SIZE;
|
||||||
|
|
||||||
|
/* Is the next frame pointer an encoded pointer to pt_regs? */
|
||||||
|
regs = decode_frame_pointer(next_bp);
|
||||||
|
if (regs) {
|
||||||
|
frame = (unsigned long *)regs;
|
||||||
|
len = regs_size(regs);
|
||||||
|
state->got_irq = true;
|
||||||
|
} else {
|
||||||
|
frame = next_bp;
|
||||||
|
len = FRAME_HEADER_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If addr isn't on the current stack, switch to the next one.
|
* If the next bp isn't on the current stack, switch to the next one.
|
||||||
*
|
*
|
||||||
* We may have to traverse multiple stacks to deal with the possibility
|
* We may have to traverse multiple stacks to deal with the possibility
|
||||||
* that 'info->next_sp' could point to an empty stack and 'addr' could
|
* that info->next_sp could point to an empty stack and the next bp
|
||||||
* be on a subsequent stack.
|
* could be on a subsequent stack.
|
||||||
*/
|
*/
|
||||||
while (!on_stack(info, addr, len))
|
while (!on_stack(info, frame, len))
|
||||||
if (get_stack_info(info->next_sp, state->task, info,
|
if (get_stack_info(info->next_sp, state->task, info,
|
||||||
&state->stack_mask))
|
&state->stack_mask))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!state->orig_sp || info->type != orig_type)
|
/* Make sure it only unwinds up and doesn't overlap the prev frame: */
|
||||||
state->orig_sp = addr;
|
if (state->orig_sp && state->stack_info.type == prev_type &&
|
||||||
|
frame < prev_frame_end)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Move state to the next frame: */
|
||||||
|
if (regs) {
|
||||||
|
state->regs = regs;
|
||||||
|
state->bp = NULL;
|
||||||
|
} else {
|
||||||
|
state->bp = next_bp;
|
||||||
|
state->regs = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Save the return address: */
|
||||||
|
if (state->regs && user_mode(state->regs))
|
||||||
|
state->ip = 0;
|
||||||
|
else {
|
||||||
|
addr_p = unwind_get_return_address_ptr(state);
|
||||||
|
addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
|
||||||
|
state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
|
||||||
|
addr, addr_p);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Save the original stack pointer for unwind_dump(): */
|
||||||
|
if (!state->orig_sp)
|
||||||
|
state->orig_sp = frame;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -162,14 +233,12 @@ static bool update_stack_state(struct unwind_state *state, void *addr,
|
|||||||
bool unwind_next_frame(struct unwind_state *state)
|
bool unwind_next_frame(struct unwind_state *state)
|
||||||
{
|
{
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
unsigned long *next_bp, *next_frame;
|
unsigned long *next_bp;
|
||||||
size_t next_len;
|
|
||||||
enum stack_type prev_type = state->stack_info.type;
|
|
||||||
|
|
||||||
if (unwind_done(state))
|
if (unwind_done(state))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* have we reached the end? */
|
/* Have we reached the end? */
|
||||||
if (state->regs && user_mode(state->regs))
|
if (state->regs && user_mode(state->regs))
|
||||||
goto the_end;
|
goto the_end;
|
||||||
|
|
||||||
@ -197,54 +266,19 @@ bool unwind_next_frame(struct unwind_state *state)
|
|||||||
*/
|
*/
|
||||||
state->regs = regs;
|
state->regs = regs;
|
||||||
state->bp = NULL;
|
state->bp = NULL;
|
||||||
|
state->ip = 0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* get the next frame pointer */
|
/* Get the next frame pointer: */
|
||||||
if (state->regs)
|
if (state->regs)
|
||||||
next_bp = (unsigned long *)state->regs->bp;
|
next_bp = (unsigned long *)state->regs->bp;
|
||||||
else
|
else
|
||||||
next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
|
next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
|
||||||
|
|
||||||
/* is the next frame pointer an encoded pointer to pt_regs? */
|
|
||||||
regs = decode_frame_pointer(next_bp);
|
|
||||||
if (regs) {
|
|
||||||
next_frame = (unsigned long *)regs;
|
|
||||||
next_len = sizeof(*regs);
|
|
||||||
} else {
|
|
||||||
next_frame = next_bp;
|
|
||||||
next_len = FRAME_HEADER_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* make sure the next frame's data is accessible */
|
|
||||||
if (!update_stack_state(state, next_frame, next_len)) {
|
|
||||||
/*
|
|
||||||
* Don't warn on bad regs->bp. An interrupt in entry code
|
|
||||||
* might cause a false positive warning.
|
|
||||||
*/
|
|
||||||
if (state->regs)
|
|
||||||
goto the_end;
|
|
||||||
|
|
||||||
|
/* Move to the next frame if it's safe: */
|
||||||
|
if (!update_stack_state(state, next_bp))
|
||||||
goto bad_address;
|
goto bad_address;
|
||||||
}
|
|
||||||
|
|
||||||
/* Make sure it only unwinds up and doesn't overlap the last frame: */
|
|
||||||
if (state->stack_info.type == prev_type) {
|
|
||||||
if (state->regs && (void *)next_frame < (void *)state->regs + regs_size(state->regs))
|
|
||||||
goto bad_address;
|
|
||||||
|
|
||||||
if (state->bp && (void *)next_frame < (void *)state->bp + FRAME_HEADER_SIZE)
|
|
||||||
goto bad_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* move to the next frame */
|
|
||||||
if (regs) {
|
|
||||||
state->regs = regs;
|
|
||||||
state->bp = NULL;
|
|
||||||
} else {
|
|
||||||
state->bp = next_bp;
|
|
||||||
state->regs = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
@ -259,18 +293,29 @@ bad_address:
|
|||||||
if (state->task != current)
|
if (state->task != current)
|
||||||
goto the_end;
|
goto the_end;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't warn if the unwinder got lost due to an interrupt in entry
|
||||||
|
* code or in the C handler before the first frame pointer got set up:
|
||||||
|
*/
|
||||||
|
if (state->got_irq && in_entry_code(state->ip))
|
||||||
|
goto the_end;
|
||||||
|
if (state->regs &&
|
||||||
|
state->regs->sp >= (unsigned long)last_aligned_frame(state) &&
|
||||||
|
state->regs->sp < (unsigned long)task_pt_regs(state->task))
|
||||||
|
goto the_end;
|
||||||
|
|
||||||
if (state->regs) {
|
if (state->regs) {
|
||||||
printk_deferred_once(KERN_WARNING
|
printk_deferred_once(KERN_WARNING
|
||||||
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
|
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
|
||||||
state->regs, state->task->comm,
|
state->regs, state->task->comm,
|
||||||
state->task->pid, next_frame);
|
state->task->pid, next_bp);
|
||||||
unwind_dump(state, (unsigned long *)state->regs);
|
unwind_dump(state);
|
||||||
} else {
|
} else {
|
||||||
printk_deferred_once(KERN_WARNING
|
printk_deferred_once(KERN_WARNING
|
||||||
"WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
|
"WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
|
||||||
state->bp, state->task->comm,
|
state->bp, state->task->comm,
|
||||||
state->task->pid, next_frame);
|
state->task->pid, next_bp);
|
||||||
unwind_dump(state, state->bp);
|
unwind_dump(state);
|
||||||
}
|
}
|
||||||
the_end:
|
the_end:
|
||||||
state->stack_info.type = STACK_TYPE_UNKNOWN;
|
state->stack_info.type = STACK_TYPE_UNKNOWN;
|
||||||
@ -281,35 +326,24 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
|
|||||||
void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
||||||
struct pt_regs *regs, unsigned long *first_frame)
|
struct pt_regs *regs, unsigned long *first_frame)
|
||||||
{
|
{
|
||||||
unsigned long *bp, *frame;
|
unsigned long *bp;
|
||||||
size_t len;
|
|
||||||
|
|
||||||
memset(state, 0, sizeof(*state));
|
memset(state, 0, sizeof(*state));
|
||||||
state->task = task;
|
state->task = task;
|
||||||
|
state->got_irq = (regs);
|
||||||
|
|
||||||
/* don't even attempt to start from user mode regs */
|
/* Don't even attempt to start from user mode regs: */
|
||||||
if (regs && user_mode(regs)) {
|
if (regs && user_mode(regs)) {
|
||||||
state->stack_info.type = STACK_TYPE_UNKNOWN;
|
state->stack_info.type = STACK_TYPE_UNKNOWN;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* set up the starting stack frame */
|
|
||||||
bp = get_frame_pointer(task, regs);
|
bp = get_frame_pointer(task, regs);
|
||||||
regs = decode_frame_pointer(bp);
|
|
||||||
if (regs) {
|
|
||||||
state->regs = regs;
|
|
||||||
frame = (unsigned long *)regs;
|
|
||||||
len = sizeof(*regs);
|
|
||||||
} else {
|
|
||||||
state->bp = bp;
|
|
||||||
frame = bp;
|
|
||||||
len = FRAME_HEADER_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* initialize stack info and make sure the frame data is accessible */
|
/* Initialize stack info and make sure the frame data is accessible: */
|
||||||
get_stack_info(frame, state->task, &state->stack_info,
|
get_stack_info(bp, state->task, &state->stack_info,
|
||||||
&state->stack_mask);
|
&state->stack_mask);
|
||||||
update_stack_state(state, frame, len);
|
update_stack_state(state, bp);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The caller can provide the address of the first frame directly
|
* The caller can provide the address of the first frame directly
|
||||||
|
@ -34,7 +34,7 @@ bool unwind_next_frame(struct unwind_state *state)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
state->sp = info->next_sp;
|
state->sp = PTR_ALIGN(info->next_sp, sizeof(long));
|
||||||
|
|
||||||
} while (!get_stack_info(state->sp, state->task, info,
|
} while (!get_stack_info(state->sp, state->task, info,
|
||||||
&state->stack_mask));
|
&state->stack_mask));
|
||||||
@ -49,7 +49,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
|||||||
memset(state, 0, sizeof(*state));
|
memset(state, 0, sizeof(*state));
|
||||||
|
|
||||||
state->task = task;
|
state->task = task;
|
||||||
state->sp = first_frame;
|
state->sp = PTR_ALIGN(first_frame, sizeof(long));
|
||||||
|
|
||||||
get_stack_info(first_frame, state->task, &state->stack_info,
|
get_stack_info(first_frame, state->task, &state->stack_info,
|
||||||
&state->stack_mask);
|
&state->stack_mask);
|
||||||
|
@ -146,6 +146,7 @@ SECTIONS
|
|||||||
_edata = .;
|
_edata = .;
|
||||||
} :data
|
} :data
|
||||||
|
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
__vvar_page = .;
|
__vvar_page = .;
|
||||||
|
@ -14,20 +14,15 @@
|
|||||||
* Zero a page.
|
* Zero a page.
|
||||||
* %rdi - page
|
* %rdi - page
|
||||||
*/
|
*/
|
||||||
ENTRY(clear_page)
|
ENTRY(clear_page_rep)
|
||||||
|
|
||||||
ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
|
|
||||||
"jmp clear_page_c_e", X86_FEATURE_ERMS
|
|
||||||
|
|
||||||
movl $4096/8,%ecx
|
movl $4096/8,%ecx
|
||||||
xorl %eax,%eax
|
xorl %eax,%eax
|
||||||
rep stosq
|
rep stosq
|
||||||
ret
|
ret
|
||||||
ENDPROC(clear_page)
|
ENDPROC(clear_page_rep)
|
||||||
EXPORT_SYMBOL(clear_page)
|
EXPORT_SYMBOL_GPL(clear_page_rep)
|
||||||
|
|
||||||
ENTRY(clear_page_orig)
|
ENTRY(clear_page_orig)
|
||||||
|
|
||||||
xorl %eax,%eax
|
xorl %eax,%eax
|
||||||
movl $4096/64,%ecx
|
movl $4096/64,%ecx
|
||||||
.p2align 4
|
.p2align 4
|
||||||
@ -47,10 +42,12 @@ ENTRY(clear_page_orig)
|
|||||||
nop
|
nop
|
||||||
ret
|
ret
|
||||||
ENDPROC(clear_page_orig)
|
ENDPROC(clear_page_orig)
|
||||||
|
EXPORT_SYMBOL_GPL(clear_page_orig)
|
||||||
|
|
||||||
ENTRY(clear_page_c_e)
|
ENTRY(clear_page_erms)
|
||||||
movl $4096,%ecx
|
movl $4096,%ecx
|
||||||
xorl %eax,%eax
|
xorl %eax,%eax
|
||||||
rep stosb
|
rep stosb
|
||||||
ret
|
ret
|
||||||
ENDPROC(clear_page_c_e)
|
ENDPROC(clear_page_erms)
|
||||||
|
EXPORT_SYMBOL_GPL(clear_page_erms)
|
||||||
|
@ -8,7 +8,7 @@ else
|
|||||||
BITS := 64
|
BITS := 64
|
||||||
endif
|
endif
|
||||||
|
|
||||||
obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \
|
obj-y = bugs_$(BITS).o delay.o fault.o ldt.o \
|
||||||
ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
|
ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
|
||||||
stub_$(BITS).o stub_segv.o \
|
stub_$(BITS).o stub_segv.o \
|
||||||
sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
|
sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
|
||||||
|
@ -1,21 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
|
|
||||||
* Licensed under the GPL V2
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/uaccess.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
|
|
||||||
* that's not relevant in skas mode.
|
|
||||||
*/
|
|
||||||
|
|
||||||
int is_valid_bugaddr(unsigned long eip)
|
|
||||||
{
|
|
||||||
unsigned short ud2;
|
|
||||||
|
|
||||||
if (probe_kernel_address((unsigned short __user *)eip, ud2))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return ud2 == 0x0b0f;
|
|
||||||
}
|
|
@ -5,7 +5,9 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_GENERIC_BUG
|
#ifdef CONFIG_GENERIC_BUG
|
||||||
#define BUGFLAG_WARNING (1 << 0)
|
#define BUGFLAG_WARNING (1 << 0)
|
||||||
#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8))
|
#define BUGFLAG_ONCE (1 << 1)
|
||||||
|
#define BUGFLAG_DONE (1 << 2)
|
||||||
|
#define BUGFLAG_TAINT(taint) ((taint) << 8)
|
||||||
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
|
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -55,6 +57,18 @@ struct bug_entry {
|
|||||||
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
|
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef __WARN_FLAGS
|
||||||
|
#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
|
||||||
|
#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
|
||||||
|
|
||||||
|
#define WARN_ON_ONCE(condition) ({ \
|
||||||
|
int __ret_warn_on = !!(condition); \
|
||||||
|
if (unlikely(__ret_warn_on)) \
|
||||||
|
__WARN_ONCE_TAINT(TAINT_WARN); \
|
||||||
|
unlikely(__ret_warn_on); \
|
||||||
|
})
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
|
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
|
||||||
* significant issues that need prompt attention if they should ever
|
* significant issues that need prompt attention if they should ever
|
||||||
@ -97,7 +111,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef WARN
|
#ifndef WARN
|
||||||
#define WARN(condition, format...) ({ \
|
#define WARN(condition, format...) ({ \
|
||||||
int __ret_warn_on = !!(condition); \
|
int __ret_warn_on = !!(condition); \
|
||||||
if (unlikely(__ret_warn_on)) \
|
if (unlikely(__ret_warn_on)) \
|
||||||
__WARN_printf(format); \
|
__WARN_printf(format); \
|
||||||
@ -112,6 +126,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
|||||||
unlikely(__ret_warn_on); \
|
unlikely(__ret_warn_on); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#ifndef WARN_ON_ONCE
|
||||||
#define WARN_ON_ONCE(condition) ({ \
|
#define WARN_ON_ONCE(condition) ({ \
|
||||||
static bool __section(.data.unlikely) __warned; \
|
static bool __section(.data.unlikely) __warned; \
|
||||||
int __ret_warn_once = !!(condition); \
|
int __ret_warn_once = !!(condition); \
|
||||||
@ -122,6 +137,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
|||||||
} \
|
} \
|
||||||
unlikely(__ret_warn_once); \
|
unlikely(__ret_warn_once); \
|
||||||
})
|
})
|
||||||
|
#endif
|
||||||
|
|
||||||
#define WARN_ONCE(condition, format...) ({ \
|
#define WARN_ONCE(condition, format...) ({ \
|
||||||
static bool __section(.data.unlikely) __warned; \
|
static bool __section(.data.unlikely) __warned; \
|
||||||
|
@ -287,8 +287,6 @@
|
|||||||
*(.rodata1) \
|
*(.rodata1) \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
BUG_TABLE \
|
|
||||||
\
|
|
||||||
/* PCI quirks */ \
|
/* PCI quirks */ \
|
||||||
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
|
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
|
||||||
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
|
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
|
||||||
@ -857,7 +855,8 @@
|
|||||||
READ_MOSTLY_DATA(cacheline) \
|
READ_MOSTLY_DATA(cacheline) \
|
||||||
DATA_DATA \
|
DATA_DATA \
|
||||||
CONSTRUCTORS \
|
CONSTRUCTORS \
|
||||||
}
|
} \
|
||||||
|
BUG_TABLE
|
||||||
|
|
||||||
#define INIT_TEXT_SECTION(inittext_align) \
|
#define INIT_TEXT_SECTION(inittext_align) \
|
||||||
. = ALIGN(inittext_align); \
|
. = ALIGN(inittext_align); \
|
||||||
|
@ -105,7 +105,7 @@ static inline int is_warning_bug(const struct bug_entry *bug)
|
|||||||
return bug->flags & BUGFLAG_WARNING;
|
return bug->flags & BUGFLAG_WARNING;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct bug_entry *find_bug(unsigned long bugaddr);
|
struct bug_entry *find_bug(unsigned long bugaddr);
|
||||||
|
|
||||||
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
|
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
|
||||||
|
|
||||||
|
28
lib/bug.c
28
lib/bug.c
@ -47,7 +47,7 @@
|
|||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
|
|
||||||
extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
|
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
|
||||||
|
|
||||||
static inline unsigned long bug_addr(const struct bug_entry *bug)
|
static inline unsigned long bug_addr(const struct bug_entry *bug)
|
||||||
{
|
{
|
||||||
@ -62,10 +62,10 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
|
|||||||
/* Updates are protected by module mutex */
|
/* Updates are protected by module mutex */
|
||||||
static LIST_HEAD(module_bug_list);
|
static LIST_HEAD(module_bug_list);
|
||||||
|
|
||||||
static const struct bug_entry *module_find_bug(unsigned long bugaddr)
|
static struct bug_entry *module_find_bug(unsigned long bugaddr)
|
||||||
{
|
{
|
||||||
struct module *mod;
|
struct module *mod;
|
||||||
const struct bug_entry *bug = NULL;
|
struct bug_entry *bug = NULL;
|
||||||
|
|
||||||
rcu_read_lock_sched();
|
rcu_read_lock_sched();
|
||||||
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
|
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
|
||||||
@ -122,15 +122,15 @@ void module_bug_cleanup(struct module *mod)
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static inline const struct bug_entry *module_find_bug(unsigned long bugaddr)
|
static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
const struct bug_entry *find_bug(unsigned long bugaddr)
|
struct bug_entry *find_bug(unsigned long bugaddr)
|
||||||
{
|
{
|
||||||
const struct bug_entry *bug;
|
struct bug_entry *bug;
|
||||||
|
|
||||||
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
|
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
|
||||||
if (bugaddr == bug_addr(bug))
|
if (bugaddr == bug_addr(bug))
|
||||||
@ -141,9 +141,9 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
|
|||||||
|
|
||||||
enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
|
enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
const struct bug_entry *bug;
|
struct bug_entry *bug;
|
||||||
const char *file;
|
const char *file;
|
||||||
unsigned line, warning;
|
unsigned line, warning, once, done;
|
||||||
|
|
||||||
if (!is_valid_bugaddr(bugaddr))
|
if (!is_valid_bugaddr(bugaddr))
|
||||||
return BUG_TRAP_TYPE_NONE;
|
return BUG_TRAP_TYPE_NONE;
|
||||||
@ -164,6 +164,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
|
|||||||
line = bug->line;
|
line = bug->line;
|
||||||
#endif
|
#endif
|
||||||
warning = (bug->flags & BUGFLAG_WARNING) != 0;
|
warning = (bug->flags & BUGFLAG_WARNING) != 0;
|
||||||
|
once = (bug->flags & BUGFLAG_ONCE) != 0;
|
||||||
|
done = (bug->flags & BUGFLAG_DONE) != 0;
|
||||||
|
|
||||||
|
if (warning && once) {
|
||||||
|
if (done)
|
||||||
|
return BUG_TRAP_TYPE_WARN;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since this is the only store, concurrency is not an issue.
|
||||||
|
*/
|
||||||
|
bug->flags |= BUGFLAG_DONE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (warning) {
|
if (warning) {
|
||||||
|
Reference in New Issue
Block a user