877934769e
where possible, when supporting a debug registers swap feature for SEV-ES guests - Add support for AMD's version of eIBRS called Automatic IBRS which is a set-and-forget control of indirect branch restriction speculation resources on privilege change - Add support for a new x86 instruction - LKGS - Load kernel GS which is part of the FRED infrastructure - Reset SPEC_CTRL upon init to accomodate use cases like kexec which rediscover - Other smaller fixes and cleanups -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmP1RDIACgkQEsHwGGHe VUohBw//ZB9ZRqsrKdm6D9YaP2x4Zb+kqKqo6rjYeWaYqyPyCwDujPwh+pb3Oq1t aj62muDv1t/wEJc8mKNkfXkjEEtBVAOcpb5YIpKreoEvNKyevol83Ih0u5iJcTRE E5qf8HDS8b/JZrcazJJLl6WQmQNH5RiKSu5bbCpRhoeOcyo5pRYR5MztK9vNmAQk GMdwHsUSU+jN8uiE4HnpaOb/luhgFindRwZVTpdjJegQWLABS8cl3CKeTv4+PW45 isvv37XnQP248wsptIEVRHeG6g3g/HtvwRx7DikUw06QwUyUK7H9hJssOoSP8TL9 u4psRwfWnJ1OxU6klL+s0Ii+pjQ97wXmK/oqK7QkdUwhWqR/mQAW2e9kWHAngyDn A6mKbzSM6HFAeSXQpB9cMb6uvYRD44SngDFe3WXtEK8jiiQ70ikUm4E28I5KJOPg s+RyioHk0NFRHYSOOBqNG1NKz6ED7L3GbgbbzxkgMh21AAyI3X351t+PtGoLV5ew eqOsM7lbg9Scg1LvPk1JcoALS8USWqgar397rz9qGUs+OkPWBtEBCmTdMz/Eb+2t g/WHdLS5/ajSs5gNhT99W3DeqZMPDEkgBRSeyBBmY3CUD3gBL2wXEktRXv504zBR RC4oyUPX3c9E2ib6GATLE3kBLbcz9hTWbMxF+X3lLJvTVd/Qc2o= =v/ZC -----END PGP SIGNATURE----- Merge tag 'x86_cpu_for_v6.3_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 cpuid updates from Borislav Petkov: - Cache the AMD debug registers in per-CPU variables to avoid MSR writes where possible, when supporting a debug registers swap feature for SEV-ES guests - Add support for AMD's version of eIBRS called Automatic IBRS which is a set-and-forget control of indirect branch restriction speculation resources on privilege change - Add support for a new x86 instruction - LKGS - Load kernel GS which is part of the FRED infrastructure - Reset SPEC_CTRL upon init to accomodate use cases like kexec which rediscover - Other smaller fixes and cleanups * tag 'x86_cpu_for_v6.3_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/amd: Cache debug register values in percpu variables KVM: x86: Propagate the AMD Automatic IBRS feature to the guest x86/cpu: Support AMD Automatic IBRS x86/cpu, kvm: Add the SMM_CTL MSR not present feature x86/cpu, kvm: Add the Null Selector Clears Base feature x86/cpu, kvm: Move X86_FEATURE_LFENCE_RDTSC to its native leaf x86/cpu, kvm: Add the NO_NESTED_DATA_BP feature KVM: x86: Move open-coded CPUID leaf 0x80000021 EAX bit propagation code x86/cpu, kvm: Add support for CPUID_80000021_EAX x86/gsseg: Add the new <asm/gsseg.h> header to <asm/asm-prototypes.h> x86/gsseg: Use the LKGS instruction if available for load_gs_index() x86/gsseg: Move load_gs_index() to its own new header file x86/gsseg: Make asm_load_gs_index() take an u16 x86/opcode: Add the LKGS instruction to x86-opcode-map x86/cpufeature: Add the CPU feature bit for LKGS x86/bugs: Reset speculation control settings on init x86/cpu: Remove redundant extern x86_read_arch_cap_msr()
162 lines
3.7 KiB
C
162 lines
3.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_DEBUGREG_H
|
|
#define _ASM_X86_DEBUGREG_H
|
|
|
|
#include <linux/bug.h>
|
|
#include <linux/percpu.h>
|
|
#include <uapi/asm/debugreg.h>
|
|
|
|
DECLARE_PER_CPU(unsigned long, cpu_dr7);
|
|
|
|
#ifndef CONFIG_PARAVIRT_XXL
|
|
/*
|
|
* These special macros can be used to get or set a debugging register
|
|
*/
|
|
#define get_debugreg(var, register) \
|
|
(var) = native_get_debugreg(register)
|
|
#define set_debugreg(value, register) \
|
|
native_set_debugreg(register, value)
|
|
#endif
|
|
|
|
static __always_inline unsigned long native_get_debugreg(int regno)
|
|
{
|
|
unsigned long val = 0; /* Damn you, gcc! */
|
|
|
|
switch (regno) {
|
|
case 0:
|
|
asm("mov %%db0, %0" :"=r" (val));
|
|
break;
|
|
case 1:
|
|
asm("mov %%db1, %0" :"=r" (val));
|
|
break;
|
|
case 2:
|
|
asm("mov %%db2, %0" :"=r" (val));
|
|
break;
|
|
case 3:
|
|
asm("mov %%db3, %0" :"=r" (val));
|
|
break;
|
|
case 6:
|
|
asm("mov %%db6, %0" :"=r" (val));
|
|
break;
|
|
case 7:
|
|
/*
|
|
* Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
|
|
* with other code.
|
|
*
|
|
* This is needed because a DR7 access can cause a #VC exception
|
|
* when running under SEV-ES. Taking a #VC exception is not a
|
|
* safe thing to do just anywhere in the entry code and
|
|
* re-ordering might place the access into an unsafe location.
|
|
*
|
|
* This happened in the NMI handler, where the DR7 read was
|
|
* re-ordered to happen before the call to sev_es_ist_enter(),
|
|
* causing stack recursion.
|
|
*/
|
|
asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
|
|
break;
|
|
default:
|
|
BUG();
|
|
}
|
|
return val;
|
|
}
|
|
|
|
static __always_inline void native_set_debugreg(int regno, unsigned long value)
|
|
{
|
|
switch (regno) {
|
|
case 0:
|
|
asm("mov %0, %%db0" ::"r" (value));
|
|
break;
|
|
case 1:
|
|
asm("mov %0, %%db1" ::"r" (value));
|
|
break;
|
|
case 2:
|
|
asm("mov %0, %%db2" ::"r" (value));
|
|
break;
|
|
case 3:
|
|
asm("mov %0, %%db3" ::"r" (value));
|
|
break;
|
|
case 6:
|
|
asm("mov %0, %%db6" ::"r" (value));
|
|
break;
|
|
case 7:
|
|
/*
|
|
* Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
|
|
* with other code.
|
|
*
|
|
* While is didn't happen with a DR7 write (see the DR7 read
|
|
* comment above which explains where it happened), add the
|
|
* __FORCE_ORDER here too to avoid similar problems in the
|
|
* future.
|
|
*/
|
|
asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
|
|
break;
|
|
default:
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
static inline void hw_breakpoint_disable(void)
|
|
{
|
|
/* Zero the control register for HW Breakpoint */
|
|
set_debugreg(0UL, 7);
|
|
|
|
/* Zero-out the individual HW breakpoint address registers */
|
|
set_debugreg(0UL, 0);
|
|
set_debugreg(0UL, 1);
|
|
set_debugreg(0UL, 2);
|
|
set_debugreg(0UL, 3);
|
|
}
|
|
|
|
static __always_inline bool hw_breakpoint_active(void)
|
|
{
|
|
return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
|
|
}
|
|
|
|
extern void hw_breakpoint_restore(void);
|
|
|
|
static __always_inline unsigned long local_db_save(void)
|
|
{
|
|
unsigned long dr7;
|
|
|
|
if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
|
|
return 0;
|
|
|
|
get_debugreg(dr7, 7);
|
|
dr7 &= ~0x400; /* architecturally set bit */
|
|
if (dr7)
|
|
set_debugreg(0, 7);
|
|
/*
|
|
* Ensure the compiler doesn't lower the above statements into
|
|
* the critical section; disabling breakpoints late would not
|
|
* be good.
|
|
*/
|
|
barrier();
|
|
|
|
return dr7;
|
|
}
|
|
|
|
static __always_inline void local_db_restore(unsigned long dr7)
|
|
{
|
|
/*
|
|
* Ensure the compiler doesn't raise this statement into
|
|
* the critical section; enabling breakpoints early would
|
|
* not be good.
|
|
*/
|
|
barrier();
|
|
if (dr7)
|
|
set_debugreg(dr7, 7);
|
|
}
|
|
|
|
#ifdef CONFIG_CPU_SUP_AMD
|
|
extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
|
|
extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
|
|
#else
|
|
static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
|
|
static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_DEBUGREG_H */
|