Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "A series of fixes for X86: - The final fix for the end-of-stack issue in the unwinder - Handle non PAT systems gracefully - Prevent access to uninitiliazed memory - Move early delay calaibration after basic init - Fix Kconfig help text - Fix a cross compile issue - Unbreak older make versions" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/timers: Move simple_udelay_calibration past init_hypervisor_platform x86/alternatives: Prevent uninitialized stack byte read in apply_alternatives() x86/PAT: Fix Xorg regression on CPUs that don't support PAT x86/watchdog: Fix Kconfig help text file path reference to lockup watchdog documentation x86/build: Permit building with old make versions x86/unwind: Add end-of-stack check for ftrace handlers Revert "x86/entry: Fix the end of the stack for newly forked tasks" x86/boot: Use CROSS_COMPILE prefix for readelf
This commit is contained in:
commit
38e6bf238d
@ -360,7 +360,7 @@ config SMP
|
|||||||
Management" code will be disabled if you say Y here.
|
Management" code will be disabled if you say Y here.
|
||||||
|
|
||||||
See also <file:Documentation/x86/i386/IO-APIC.txt>,
|
See also <file:Documentation/x86/i386/IO-APIC.txt>,
|
||||||
<file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
|
<file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at
|
||||||
<http://www.tldp.org/docs.html#howto>.
|
<http://www.tldp.org/docs.html#howto>.
|
||||||
|
|
||||||
If you don't know what to do here, say N.
|
If you don't know what to do here, say N.
|
||||||
|
@ -159,7 +159,7 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|||||||
# If '-Os' is enabled, disable it and print a warning.
|
# If '-Os' is enabled, disable it and print a warning.
|
||||||
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||||
undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
|
undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||||
$(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
|
$(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
|
|||||||
quiet_cmd_check_data_rel = DATAREL $@
|
quiet_cmd_check_data_rel = DATAREL $@
|
||||||
define cmd_check_data_rel
|
define cmd_check_data_rel
|
||||||
for obj in $(filter %.o,$^); do \
|
for obj in $(filter %.o,$^); do \
|
||||||
readelf -S $$obj | grep -qF .rel.local && { \
|
${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
|
||||||
echo "error: $$obj has data relocations!" >&2; \
|
echo "error: $$obj has data relocations!" >&2; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
} || true; \
|
} || true; \
|
||||||
|
@ -251,6 +251,23 @@ ENTRY(__switch_to_asm)
|
|||||||
jmp __switch_to
|
jmp __switch_to
|
||||||
END(__switch_to_asm)
|
END(__switch_to_asm)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The unwinder expects the last frame on the stack to always be at the same
|
||||||
|
* offset from the end of the page, which allows it to validate the stack.
|
||||||
|
* Calling schedule_tail() directly would break that convention because its an
|
||||||
|
* asmlinkage function so its argument has to be pushed on the stack. This
|
||||||
|
* wrapper creates a proper "end of stack" frame header before the call.
|
||||||
|
*/
|
||||||
|
ENTRY(schedule_tail_wrapper)
|
||||||
|
FRAME_BEGIN
|
||||||
|
|
||||||
|
pushl %eax
|
||||||
|
call schedule_tail
|
||||||
|
popl %eax
|
||||||
|
|
||||||
|
FRAME_END
|
||||||
|
ret
|
||||||
|
ENDPROC(schedule_tail_wrapper)
|
||||||
/*
|
/*
|
||||||
* A newly forked process directly context switches into this address.
|
* A newly forked process directly context switches into this address.
|
||||||
*
|
*
|
||||||
@ -259,24 +276,15 @@ END(__switch_to_asm)
|
|||||||
* edi: kernel thread arg
|
* edi: kernel thread arg
|
||||||
*/
|
*/
|
||||||
ENTRY(ret_from_fork)
|
ENTRY(ret_from_fork)
|
||||||
FRAME_BEGIN /* help unwinder find end of stack */
|
call schedule_tail_wrapper
|
||||||
|
|
||||||
/*
|
|
||||||
* schedule_tail() is asmlinkage so we have to put its 'prev' argument
|
|
||||||
* on the stack.
|
|
||||||
*/
|
|
||||||
pushl %eax
|
|
||||||
call schedule_tail
|
|
||||||
popl %eax
|
|
||||||
|
|
||||||
testl %ebx, %ebx
|
testl %ebx, %ebx
|
||||||
jnz 1f /* kernel threads are uncommon */
|
jnz 1f /* kernel threads are uncommon */
|
||||||
|
|
||||||
2:
|
2:
|
||||||
/* When we fork, we trace the syscall return in the child, too. */
|
/* When we fork, we trace the syscall return in the child, too. */
|
||||||
leal FRAME_OFFSET(%esp), %eax
|
movl %esp, %eax
|
||||||
call syscall_return_slowpath
|
call syscall_return_slowpath
|
||||||
FRAME_END
|
|
||||||
jmp restore_all
|
jmp restore_all
|
||||||
|
|
||||||
/* kernel thread */
|
/* kernel thread */
|
||||||
|
@ -36,7 +36,6 @@
|
|||||||
#include <asm/smap.h>
|
#include <asm/smap.h>
|
||||||
#include <asm/pgtable_types.h>
|
#include <asm/pgtable_types.h>
|
||||||
#include <asm/export.h>
|
#include <asm/export.h>
|
||||||
#include <asm/frame.h>
|
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
|
||||||
.code64
|
.code64
|
||||||
@ -406,19 +405,17 @@ END(__switch_to_asm)
|
|||||||
* r12: kernel thread arg
|
* r12: kernel thread arg
|
||||||
*/
|
*/
|
||||||
ENTRY(ret_from_fork)
|
ENTRY(ret_from_fork)
|
||||||
FRAME_BEGIN /* help unwinder find end of stack */
|
|
||||||
movq %rax, %rdi
|
movq %rax, %rdi
|
||||||
call schedule_tail /* rdi: 'prev' task parameter */
|
call schedule_tail /* rdi: 'prev' task parameter */
|
||||||
|
|
||||||
testq %rbx, %rbx /* from kernel_thread? */
|
testq %rbx, %rbx /* from kernel_thread? */
|
||||||
jnz 1f /* kernel threads are uncommon */
|
jnz 1f /* kernel threads are uncommon */
|
||||||
|
|
||||||
2:
|
2:
|
||||||
leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
|
movq %rsp, %rdi
|
||||||
call syscall_return_slowpath /* returns with IRQs disabled */
|
call syscall_return_slowpath /* returns with IRQs disabled */
|
||||||
TRACE_IRQS_ON /* user mode is traced as IRQS on */
|
TRACE_IRQS_ON /* user mode is traced as IRQS on */
|
||||||
SWAPGS
|
SWAPGS
|
||||||
FRAME_END
|
|
||||||
jmp restore_regs_and_iret
|
jmp restore_regs_and_iret
|
||||||
|
|
||||||
1:
|
1:
|
||||||
|
@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
|||||||
memcpy(insnbuf, replacement, a->replacementlen);
|
memcpy(insnbuf, replacement, a->replacementlen);
|
||||||
insnbuf_sz = a->replacementlen;
|
insnbuf_sz = a->replacementlen;
|
||||||
|
|
||||||
/* 0xe8 is a relative jump; fix the offset. */
|
/*
|
||||||
if (*insnbuf == 0xe8 && a->replacementlen == 5) {
|
* 0xe8 is a relative jump; fix the offset.
|
||||||
|
*
|
||||||
|
* Instruction length is checked before the opcode to avoid
|
||||||
|
* accessing uninitialized bytes for zero-length replacements.
|
||||||
|
*/
|
||||||
|
if (a->replacementlen == 5 && *insnbuf == 0xe8) {
|
||||||
*(s32 *)(insnbuf + 1) += replacement - instr;
|
*(s32 *)(insnbuf + 1) += replacement - instr;
|
||||||
DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
|
DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
|
||||||
*(s32 *)(insnbuf + 1),
|
*(s32 *)(insnbuf + 1),
|
||||||
|
@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
*/
|
*/
|
||||||
x86_configure_nx();
|
x86_configure_nx();
|
||||||
|
|
||||||
simple_udelay_calibration();
|
|
||||||
|
|
||||||
parse_early_param();
|
parse_early_param();
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
*/
|
*/
|
||||||
init_hypervisor_platform();
|
init_hypervisor_platform();
|
||||||
|
|
||||||
|
simple_udelay_calibration();
|
||||||
|
|
||||||
x86_init.resources.probe_roms();
|
x86_init.resources.probe_roms();
|
||||||
|
|
||||||
/* after parse_early_param, so could debug it */
|
/* after parse_early_param, so could debug it */
|
||||||
|
@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state)
|
|||||||
return (unsigned long *)task_pt_regs(state->task) - 2;
|
return (unsigned long *)task_pt_regs(state->task) - 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_last_frame(struct unwind_state *state)
|
||||||
|
{
|
||||||
|
return state->bp == last_frame(state);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
#define GCC_REALIGN_WORDS 3
|
#define GCC_REALIGN_WORDS 3
|
||||||
#else
|
#else
|
||||||
@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state)
|
|||||||
return last_frame(state) - GCC_REALIGN_WORDS;
|
return last_frame(state) - GCC_REALIGN_WORDS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_last_task_frame(struct unwind_state *state)
|
static bool is_last_aligned_frame(struct unwind_state *state)
|
||||||
{
|
{
|
||||||
unsigned long *last_bp = last_frame(state);
|
unsigned long *last_bp = last_frame(state);
|
||||||
unsigned long *aligned_bp = last_aligned_frame(state);
|
unsigned long *aligned_bp = last_aligned_frame(state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to check for the last task frame at two different locations
|
* GCC can occasionally decide to realign the stack pointer and change
|
||||||
* because gcc can occasionally decide to realign the stack pointer and
|
* the offset of the stack frame in the prologue of a function called
|
||||||
* change the offset of the stack frame in the prologue of a function
|
* by head/entry code. Examples:
|
||||||
* called by head/entry code. Examples:
|
|
||||||
*
|
*
|
||||||
* <start_secondary>:
|
* <start_secondary>:
|
||||||
* push %edi
|
* push %edi
|
||||||
@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state)
|
|||||||
* push %rbp
|
* push %rbp
|
||||||
* mov %rsp,%rbp
|
* mov %rsp,%rbp
|
||||||
*
|
*
|
||||||
* Note that after aligning the stack, it pushes a duplicate copy of
|
* After aligning the stack, it pushes a duplicate copy of the return
|
||||||
* the return address before pushing the frame pointer.
|
* address before pushing the frame pointer.
|
||||||
*/
|
*/
|
||||||
return (state->bp == last_bp ||
|
return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1));
|
||||||
(state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
|
}
|
||||||
|
|
||||||
|
static bool is_last_ftrace_frame(struct unwind_state *state)
|
||||||
|
{
|
||||||
|
unsigned long *last_bp = last_frame(state);
|
||||||
|
unsigned long *last_ftrace_bp = last_bp - 3;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When unwinding from an ftrace handler of a function called by entry
|
||||||
|
* code, the stack layout of the last frame is:
|
||||||
|
*
|
||||||
|
* bp
|
||||||
|
* parent ret addr
|
||||||
|
* bp
|
||||||
|
* function ret addr
|
||||||
|
* parent ret addr
|
||||||
|
* pt_regs
|
||||||
|
* -----------------
|
||||||
|
*/
|
||||||
|
return (state->bp == last_ftrace_bp &&
|
||||||
|
*state->bp == *(state->bp + 2) &&
|
||||||
|
*(state->bp + 1) == *(state->bp + 4));
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool is_last_task_frame(struct unwind_state *state)
|
||||||
|
{
|
||||||
|
return is_last_frame(state) || is_last_aligned_frame(state) ||
|
||||||
|
is_last_ftrace_frame(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -65,9 +65,11 @@ static int __init nopat(char *str)
|
|||||||
}
|
}
|
||||||
early_param("nopat", nopat);
|
early_param("nopat", nopat);
|
||||||
|
|
||||||
|
static bool __read_mostly __pat_initialized = false;
|
||||||
|
|
||||||
bool pat_enabled(void)
|
bool pat_enabled(void)
|
||||||
{
|
{
|
||||||
return !!__pat_enabled;
|
return __pat_initialized;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pat_enabled);
|
EXPORT_SYMBOL_GPL(pat_enabled);
|
||||||
|
|
||||||
@ -225,13 +227,14 @@ static void pat_bsp_init(u64 pat)
|
|||||||
}
|
}
|
||||||
|
|
||||||
wrmsrl(MSR_IA32_CR_PAT, pat);
|
wrmsrl(MSR_IA32_CR_PAT, pat);
|
||||||
|
__pat_initialized = true;
|
||||||
|
|
||||||
__init_cache_modes(pat);
|
__init_cache_modes(pat);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pat_ap_init(u64 pat)
|
static void pat_ap_init(u64 pat)
|
||||||
{
|
{
|
||||||
if (!boot_cpu_has(X86_FEATURE_PAT)) {
|
if (!this_cpu_has(X86_FEATURE_PAT)) {
|
||||||
/*
|
/*
|
||||||
* If this happens we are on a secondary CPU, but switched to
|
* If this happens we are on a secondary CPU, but switched to
|
||||||
* PAT on the boot CPU. We have no way to undo PAT.
|
* PAT on the boot CPU. We have no way to undo PAT.
|
||||||
@ -306,7 +309,7 @@ void pat_init(void)
|
|||||||
u64 pat;
|
u64 pat;
|
||||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||||
|
|
||||||
if (!pat_enabled()) {
|
if (!__pat_enabled) {
|
||||||
init_cache_modes();
|
init_cache_modes();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user