6a613ac6bc
Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-and-tested-by: Borislav Petkov <bp@alien8.de> Cc: <mark.gross@intel.com> Cc: Su Tao <tao.su@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: <qiuxu.zhuo@intel.com> Cc: <frank.wang@intel.com> Cc: <borun.fu@intel.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: Mingwei Shi <mingwei.shi@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
58 lines
1.1 KiB
ArmAsm
58 lines
1.1 KiB
ArmAsm
/*
|
|
* AT_SYSINFO entry point
|
|
*/
|
|
|
|
#include <asm/dwarf2.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/alternative-asm.h>
|
|
|
|
/*
|
|
* First get the common code for the sigreturn entry points.
|
|
* This must come first.
|
|
*/
|
|
#include "sigreturn.S"
|
|
|
|
.text
|
|
.globl __kernel_vsyscall
|
|
.type __kernel_vsyscall,@function
|
|
ALIGN
|
|
__kernel_vsyscall:
|
|
CFI_STARTPROC
|
|
/*
|
|
* Reshuffle regs so that all of any of the entry instructions
|
|
* will preserve enough state.
|
|
*/
|
|
pushl %edx
|
|
CFI_ADJUST_CFA_OFFSET 4
|
|
CFI_REL_OFFSET edx, 0
|
|
pushl %ecx
|
|
CFI_ADJUST_CFA_OFFSET 4
|
|
CFI_REL_OFFSET ecx, 0
|
|
movl %esp, %ecx
|
|
|
|
#ifdef CONFIG_X86_64
|
|
/* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
|
|
ALTERNATIVE_2 "", "sysenter", X86_FEATURE_SYSENTER32, \
|
|
"syscall", X86_FEATURE_SYSCALL32
|
|
#else
|
|
ALTERNATIVE "", "sysenter", X86_FEATURE_SEP
|
|
#endif
|
|
|
|
/* Enter using int $0x80 */
|
|
movl (%esp), %ecx
|
|
int $0x80
|
|
GLOBAL(int80_landing_pad)
|
|
|
|
/* Restore ECX and EDX in case they were clobbered. */
|
|
popl %ecx
|
|
CFI_RESTORE ecx
|
|
CFI_ADJUST_CFA_OFFSET -4
|
|
popl %edx
|
|
CFI_RESTORE edx
|
|
CFI_ADJUST_CFA_OFFSET -4
|
|
ret
|
|
CFI_ENDPROC
|
|
|
|
.size __kernel_vsyscall,.-__kernel_vsyscall
|
|
.previous
|