Pull core x86 updates from Ingo Molnar: - The biggest change is the rework of the percpu code, to support the 'Named Address Spaces' GCC feature, by Uros Bizjak: - This allows C code to access GS and FS segment relative memory via variables declared with such attributes, which allows the compiler to better optimize those accesses than the previous inline assembly code. - The series also includes a number of micro-optimizations for various percpu access methods, plus a number of cleanups of %gs accesses in assembly code. - These changes have been exposed to linux-next testing for the last ~5 months, with no known regressions in this area. - Fix/clean up __switch_to()'s broken but accidentally working handling of FPU switching - which also generates better code - Propagate more RIP-relative addressing in assembly code, to generate slightly better code - Rework the CPU mitigations Kconfig space to be less idiosyncratic, to make it easier for distros to follow & maintain these options - Rework the x86 idle code to cure RCU violations and to clean up the logic - Clean up the vDSO Makefile logic - Misc cleanups and fixes * tag 'x86-core-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits) x86/idle: Select idle routine only once x86/idle: Let prefer_mwait_c1_over_halt() return bool x86/idle: Cleanup idle_setup() x86/idle: Clean up idle selection x86/idle: Sanitize X86_BUG_AMD_E400 handling sched/idle: Conditionally handle tick broadcast in default_idle_call() x86: Increase brk randomness entropy for 64-bit systems x86/vdso: Move vDSO to mmap region x86/vdso/kbuild: Group non-standard build attributes and primary object file rules together x86/vdso: Fix rethunk patching for vdso-image-{32,64}.o x86/retpoline: Ensure default return thunk isn't used at runtime x86/vdso: Use CONFIG_COMPAT_32 to specify vdso32 x86/vdso: Use $(addprefix ) instead of $(foreach ) x86/vdso: Simplify obj-y addition x86/vdso: Consolidate targets and clean-files x86/bugs: Rename CONFIG_RETHUNK => CONFIG_MITIGATION_RETHUNK x86/bugs: Rename CONFIG_CPU_SRSO => CONFIG_MITIGATION_SRSO x86/bugs: Rename CONFIG_CPU_IBRS_ENTRY => CONFIG_MITIGATION_IBRS_ENTRY x86/bugs: Rename CONFIG_CPU_UNRET_ENTRY => CONFIG_MITIGATION_UNRET_ENTRY x86/bugs: Rename CONFIG_SLS => CONFIG_MITIGATION_SLS ...
49 lines
1.0 KiB
ArmAsm
49 lines
1.0 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Common place for both 32- and 64-bit entry routines.
|
|
*/
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/linkage.h>
|
|
#include <asm/msr-index.h>
|
|
#include <asm/unwind_hints.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/cache.h>
|
|
|
|
#include "calling.h"
|
|
|
|
.pushsection .noinstr.text, "ax"
|
|
|
|
SYM_FUNC_START(entry_ibpb)
|
|
movl $MSR_IA32_PRED_CMD, %ecx
|
|
movl $PRED_CMD_IBPB, %eax
|
|
xorl %edx, %edx
|
|
wrmsr
|
|
RET
|
|
SYM_FUNC_END(entry_ibpb)
|
|
/* For KVM */
|
|
EXPORT_SYMBOL_GPL(entry_ibpb);
|
|
|
|
.popsection
|
|
|
|
/*
|
|
* Define the VERW operand that is disguised as entry code so that
|
|
* it can be referenced with KPTI enabled. This ensure VERW can be
|
|
* used late in exit-to-user path after page tables are switched.
|
|
*/
|
|
.pushsection .entry.text, "ax"
|
|
|
|
.align L1_CACHE_BYTES, 0xcc
|
|
SYM_CODE_START_NOALIGN(mds_verw_sel)
|
|
UNWIND_HINT_UNDEFINED
|
|
ANNOTATE_NOENDBR
|
|
.word __KERNEL_DS
|
|
.align L1_CACHE_BYTES, 0xcc
|
|
SYM_CODE_END(mds_verw_sel);
|
|
/* For KVM */
|
|
EXPORT_SYMBOL_GPL(mds_verw_sel);
|
|
|
|
.popsection
|
|
|
|
THUNK warn_thunk_thunk, __warn_thunk
|