ea5bc7b977
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmCGmYIACgkQEsHwGGHe VUr45w/8CSXr7MXaFBj4To0hTWJXSZyF6YGqlZOSJXFcFh4cWTNwfVOoFaV47aDo +HsCNTkGENcKhLrDUWDRiG/Uo46jxtOtl1vhq7U4pGemSYH871XWOKfb5k5XNMwn /uhaHMI4aEfd6bUFnF518NeyRIsD0BdqFj4tB7RbAiyFwdETDX9Tkj/uBKnQ4zon 4tEDoXgThuK5YKK9zVQg5pa7aFp2zg1CAdX/WzBkS8BHVBPXSV0CF97AJYQOM/V+ lUHv+BN3wp97GYHPQMPsbkNr8IuFoe2mIvikwjxg8iOFpzEU1G1u09XV9R+PXByX LclFTRqK/2uU5hJlcsBiKfUuidyErYMRYImbMAOREt2w0ogWVu2zQ7HkjVve25h1 sQPwPudbAt6STbqRxvpmB3yoV4TCYwnF91FcWgEy+rcEK2BDsHCnScA45TsK5I1C kGR1K17pHXprgMZFPveH+LgxewB6smDv+HllxQdSG67LhMJXcs2Epz0TsN8VsXw8 dlD3lGReK+5qy9FTgO7mY0xhiXGz1IbEdAPU4eRBgih13puu03+jqgMaMabvBWKD wax+BWJUrPtetwD5fBPhlS/XdJDnd8Mkv2xsf//+wT0s4p+g++l1APYxeB8QEehm Pd7Mvxm4GvQkfE13QEVIPYQRIXCMH/e9qixtY5SHUZDBVkUyFM0= =bO1i -----END PGP SIGNATURE----- Merge tag 'x86_cleanups_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull misc x86 cleanups from Borislav Petkov: "Trivial cleanups and fixes all over the place" * tag 'x86_cleanups_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: MAINTAINERS: Remove me from IDE/ATAPI section x86/pat: Do not compile stubbed functions when X86_PAT is off x86/asm: Ensure asm/proto.h can be included stand-alone x86/platform/intel/quark: Fix incorrect kernel-doc comment syntax in files x86/msr: Make locally used functions static x86/cacheinfo: Remove unneeded dead-store initialization x86/process/64: Move cpu_current_top_of_stack out of TSS tools/turbostat: Unmark non-kernel-doc comment x86/syscalls: Fix -Wmissing-prototypes warnings from COND_SYSCALL() x86/fpu/math-emu: Fix function cast warning x86/msr: Fix wr/rdmsr_safe_regs_on_cpu() prototypes x86: Fix various typos in comments, take #2 x86: Remove unusual Unicode characters from comments x86/kaslr: Return boolean values from a function returning bool x86: Fix various typos in comments x86/setup: Remove unused RESERVE_BRK_ARRAY() stacktrace: Move documentation for arch_stack_walk_reliable() to header x86: Remove duplicate TSC DEADLINE MSR definitions
86 lines
2.4 KiB
ArmAsm
86 lines
2.4 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* AT_SYSINFO entry point
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/alternative.h>
|
|
|
|
.text
|
|
.globl __kernel_vsyscall
|
|
.type __kernel_vsyscall,@function
|
|
ALIGN
|
|
__kernel_vsyscall:
|
|
CFI_STARTPROC
|
|
/*
|
|
* Reshuffle regs so that all of any of the entry instructions
|
|
* will preserve enough state.
|
|
*
|
|
* A really nice entry sequence would be:
|
|
* pushl %edx
|
|
* pushl %ecx
|
|
* movl %esp, %ecx
|
|
*
|
|
* Unfortunately, naughty Android versions between July and December
|
|
* 2015 actually hardcode the traditional Linux SYSENTER entry
|
|
* sequence. That is severely broken for a number of reasons (ask
|
|
* anyone with an AMD CPU, for example). Nonetheless, we try to keep
|
|
* it working approximately as well as it ever worked.
|
|
*
|
|
* This link may elucidate some of the history:
|
|
* https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7
|
|
* personally, I find it hard to understand what's going on there.
|
|
*
|
|
* Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE.
|
|
* Execute an indirect call to the address in the AT_SYSINFO auxv
|
|
* entry. That is the ONLY correct way to make a fast 32-bit system
|
|
* call on Linux. (Open-coding int $0x80 is also fine, but it's
|
|
* slow.)
|
|
*/
|
|
pushl %ecx
|
|
CFI_ADJUST_CFA_OFFSET 4
|
|
CFI_REL_OFFSET ecx, 0
|
|
pushl %edx
|
|
CFI_ADJUST_CFA_OFFSET 4
|
|
CFI_REL_OFFSET edx, 0
|
|
pushl %ebp
|
|
CFI_ADJUST_CFA_OFFSET 4
|
|
CFI_REL_OFFSET ebp, 0
|
|
|
|
#define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter"
|
|
#define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall"
|
|
|
|
#ifdef CONFIG_X86_64
|
|
/* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
|
|
ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \
|
|
SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
|
|
#else
|
|
ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP
|
|
#endif
|
|
|
|
/* Enter using int $0x80 */
|
|
int $0x80
|
|
SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL)
|
|
|
|
/*
|
|
* Restore EDX and ECX in case they were clobbered. EBP is not
|
|
* clobbered (the kernel restores it), but it's cleaner and
|
|
* probably faster to pop it than to adjust ESP using addl.
|
|
*/
|
|
popl %ebp
|
|
CFI_RESTORE ebp
|
|
CFI_ADJUST_CFA_OFFSET -4
|
|
popl %edx
|
|
CFI_RESTORE edx
|
|
CFI_ADJUST_CFA_OFFSET -4
|
|
popl %ecx
|
|
CFI_RESTORE ecx
|
|
CFI_ADJUST_CFA_OFFSET -4
|
|
ret
|
|
CFI_ENDPROC
|
|
|
|
.size __kernel_vsyscall,.-__kernel_vsyscall
|
|
.previous
|