x86/asm/entry/32: Clean up entry_32.S
Make the 32-bit syscall entry code a bit more readable: - use consistent assembly coding style similar to entry_64.S - remove old comments that are not true anymore - eliminate whitespace noise - use consistent vertical spacing - fix various comments No code changed: # arch/x86/entry/entry_32.o: text data bss dec hex filename 6025 0 0 6025 1789 entry_32.o.before 6025 0 0 6025 1789 entry_32.o.after md5: f3fa16b2b0dca804f052deb6b30ba6cb entry_32.o.before.asm f3fa16b2b0dca804f052deb6b30ba6cb entry_32.o.after.asm Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b2502b418e
commit
a49976d14f
@ -1,23 +1,12 @@
|
||||
/*
|
||||
*
|
||||
* Copyright (C) 1991,1992 Linus Torvalds
|
||||
*/
|
||||
|
||||
/*
|
||||
* entry.S contains the system-call and fault low-level handling routines.
|
||||
* This also contains the timer-interrupt handler, as well as all interrupts
|
||||
* and faults that can result in a task-switch.
|
||||
*
|
||||
* NOTE: This code handles signal-recognition, which happens every time
|
||||
* after a timer-interrupt and after each system call.
|
||||
*
|
||||
* I changed all the .align's to 4 (16 byte alignment), as that's faster
|
||||
* on a 486.
|
||||
* entry_32.S contains the system-call and low-level fault and trap handling routines.
|
||||
*
|
||||
* Stack layout in 'syscall_exit':
|
||||
* ptrace needs to have all regs on the stack.
|
||||
* if the order here is changed, it needs to be
|
||||
* updated in fork.c:copy_process, signal.c:do_signal,
|
||||
* ptrace needs to have all registers on the stack.
|
||||
* If the order here is changed, it needs to be
|
||||
* updated in fork.c:copy_process(), signal.c:do_signal(),
|
||||
* ptrace.c and ptrace.h
|
||||
*
|
||||
* 0(%esp) - %ebx
|
||||
@ -37,8 +26,6 @@
|
||||
* 38(%esp) - %eflags
|
||||
* 3C(%esp) - %oldesp
|
||||
* 40(%esp) - %oldss
|
||||
*
|
||||
* "current" is in register %ebx during any slow entries.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
@ -303,10 +290,13 @@ need_resched:
|
||||
END(resume_kernel)
|
||||
#endif
|
||||
|
||||
/* SYSENTER_RETURN points to after the "sysenter" instruction in
|
||||
the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
|
||||
/*
|
||||
* SYSENTER_RETURN points to after the SYSENTER instruction
|
||||
* in the vsyscall page. See vsyscall-sysentry.S, which defines
|
||||
* the symbol.
|
||||
*/
|
||||
|
||||
# sysenter call handler stub
|
||||
# SYSENTER call handler stub
|
||||
ENTRY(entry_SYSENTER_32)
|
||||
movl TSS_sysenter_sp0(%esp), %esp
|
||||
sysenter_past_esp:
|
||||
@ -444,9 +434,11 @@ restore_all:
|
||||
restore_all_notrace:
|
||||
#ifdef CONFIG_X86_ESPFIX32
|
||||
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
|
||||
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
|
||||
# are returning to the kernel.
|
||||
# See comments in process.c:copy_thread() for details.
|
||||
/*
|
||||
* Warning: PT_OLDSS(%esp) contains the wrong/random values if we
|
||||
* are returning to the kernel.
|
||||
* See comments in process.c:copy_thread() for details.
|
||||
*/
|
||||
movb PT_OLDSS(%esp), %ah
|
||||
movb PT_CS(%esp), %al
|
||||
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
|
||||
@ -501,9 +493,11 @@ ldt_ss:
|
||||
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
|
||||
pushl $__ESPFIX_SS
|
||||
pushl %eax /* new kernel esp */
|
||||
/* Disable interrupts, but do not irqtrace this section: we
|
||||
/*
|
||||
* Disable interrupts, but do not irqtrace this section: we
|
||||
* will soon execute iret and the tracer was already set to
|
||||
* the irqstate after the iret */
|
||||
* the irqstate after the IRET:
|
||||
*/
|
||||
DISABLE_INTERRUPTS(CLBR_EAX)
|
||||
lss (%esp), %esp /* switch to espfix segment */
|
||||
jmp restore_nocheck
|
||||
@ -680,8 +674,7 @@ ENDPROC(name)
|
||||
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
#define TRACE_BUILD_INTERRUPT(name, nr) \
|
||||
BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
|
||||
# define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
|
||||
#else
|
||||
# define TRACE_BUILD_INTERRUPT(name, nr)
|
||||
#endif
|
||||
@ -809,8 +802,10 @@ ENTRY(spurious_interrupt_bug)
|
||||
END(spurious_interrupt_bug)
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
/* Xen doesn't set %esp to be precisely what the normal sysenter
|
||||
entrypoint expects, so fix it up before using the normal path. */
|
||||
/*
|
||||
* Xen doesn't set %esp to be precisely what the normal SYSENTER
|
||||
* entry point expects, so fix it up before using the normal path.
|
||||
*/
|
||||
ENTRY(xen_sysenter_target)
|
||||
addl $5*4, %esp /* remove xen-provided frame */
|
||||
jmp sysenter_past_esp
|
||||
@ -820,11 +815,13 @@ ENTRY(xen_hypervisor_callback)
|
||||
SAVE_ALL
|
||||
TRACE_IRQS_OFF
|
||||
|
||||
/* Check to see if we got the event in the critical
|
||||
region in xen_iret_direct, after we've reenabled
|
||||
events and checked for pending events. This simulates
|
||||
iret instruction's behaviour where it delivers a
|
||||
pending interrupt when enabling interrupts. */
|
||||
/*
|
||||
* Check to see if we got the event in the critical
|
||||
* region in xen_iret_direct, after we've reenabled
|
||||
* events and checked for pending events. This simulates
|
||||
* iret instruction's behaviour where it delivers a
|
||||
* pending interrupt when enabling interrupts:
|
||||
*/
|
||||
movl PT_EIP(%esp), %eax
|
||||
cmpl $xen_iret_start_crit, %eax
|
||||
jb 1f
|
||||
@ -842,16 +839,18 @@ ENTRY(xen_do_upcall)
|
||||
jmp ret_from_intr
|
||||
ENDPROC(xen_hypervisor_callback)
|
||||
|
||||
# Hypervisor uses this for application faults while it executes.
|
||||
# We get here for two reasons:
|
||||
# 1. Fault while reloading DS, ES, FS or GS
|
||||
# 2. Fault while executing IRET
|
||||
# Category 1 we fix up by reattempting the load, and zeroing the segment
|
||||
# register if the load fails.
|
||||
# Category 2 we fix up by jumping to do_iret_error. We cannot use the
|
||||
# normal Linux return path in this case because if we use the IRET hypercall
|
||||
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
|
||||
# We distinguish between categories by maintaining a status value in EAX.
|
||||
/*
|
||||
* Hypervisor uses this for application faults while it executes.
|
||||
* We get here for two reasons:
|
||||
* 1. Fault while reloading DS, ES, FS or GS
|
||||
* 2. Fault while executing IRET
|
||||
* Category 1 we fix up by reattempting the load, and zeroing the segment
|
||||
* register if the load fails.
|
||||
* Category 2 we fix up by jumping to do_iret_error. We cannot use the
|
||||
* normal Linux return path in this case because if we use the IRET hypercall
|
||||
* to pop the stack frame we end up in an infinite loop of failsafe callbacks.
|
||||
* We distinguish between categories by maintaining a status value in EAX.
|
||||
*/
|
||||
ENTRY(xen_failsafe_callback)
|
||||
pushl %eax
|
||||
movl $1, %eax
|
||||
@ -1169,7 +1168,8 @@ ENTRY(nmi)
|
||||
je nmi_stack_fixup
|
||||
pushl %eax
|
||||
movl %esp, %eax
|
||||
/* Do not access memory above the end of our stack page,
|
||||
/*
|
||||
* Do not access memory above the end of our stack page,
|
||||
* it might not exist.
|
||||
*/
|
||||
andl $(THREAD_SIZE-1), %eax
|
||||
@ -1246,4 +1246,3 @@ ENTRY(async_page_fault)
|
||||
jmp error_code
|
||||
END(async_page_fault)
|
||||
#endif
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user