x86/srso: Add SRSO_NO support
Upstream commit: 1b5277c0ea0b247393a9c426769fde18cff5e2f6 Add support for the CPUID flag which denotes that the CPU is not affected by SRSO. Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
df76a59feb
commit
e47af0c255
@ -402,7 +402,9 @@
|
|||||||
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
|
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
|
||||||
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
|
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
|
||||||
|
|
||||||
|
#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */
|
||||||
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
||||||
|
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BUG word(s)
|
* BUG word(s)
|
||||||
|
@ -60,6 +60,7 @@
|
|||||||
|
|
||||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||||
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
|
||||||
|
#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */
|
||||||
|
|
||||||
#define MSR_PPIN_CTL 0x0000004e
|
#define MSR_PPIN_CTL 0x0000004e
|
||||||
#define MSR_PPIN 0x0000004f
|
#define MSR_PPIN 0x0000004f
|
||||||
|
@ -307,11 +307,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
|||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern u64 x86_pred_cmd;
|
||||||
|
|
||||||
static inline void indirect_branch_prediction_barrier(void)
|
static inline void indirect_branch_prediction_barrier(void)
|
||||||
{
|
{
|
||||||
u64 val = PRED_CMD_IBPB;
|
alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
|
||||||
|
|
||||||
alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The Intel SPEC CTRL MSR base value cache */
|
/* The Intel SPEC CTRL MSR base value cache */
|
||||||
|
@ -1286,14 +1286,14 @@ bool cpu_has_ibpb_brtype_microcode(void)
|
|||||||
{
|
{
|
||||||
u8 fam = boot_cpu_data.x86;
|
u8 fam = boot_cpu_data.x86;
|
||||||
|
|
||||||
if (fam == 0x17) {
|
/* Zen1/2 IBPB flushes branch type predictions too. */
|
||||||
/* Zen1/2 IBPB flushes branch type predictions too. */
|
if (fam == 0x17)
|
||||||
return boot_cpu_has(X86_FEATURE_AMD_IBPB);
|
return boot_cpu_has(X86_FEATURE_AMD_IBPB);
|
||||||
} else if (fam == 0x19) {
|
/* Poke the MSR bit on Zen3/4 to check its presence. */
|
||||||
|
else if (fam == 0x19)
|
||||||
|
return !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB);
|
||||||
|
else
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void zenbleed_check_cpu(void *unused)
|
static void zenbleed_check_cpu(void *unused)
|
||||||
|
@ -56,6 +56,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
|||||||
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
|
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||||
|
|
||||||
|
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
|
||||||
|
EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
||||||
|
|
||||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||||
|
|
||||||
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||||
@ -2284,7 +2287,7 @@ static void __init srso_select_mitigation(void)
|
|||||||
bool has_microcode;
|
bool has_microcode;
|
||||||
|
|
||||||
if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
|
if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
|
||||||
return;
|
goto pred_cmd;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The first check is for the kernel running as a guest in order
|
* The first check is for the kernel running as a guest in order
|
||||||
@ -2297,9 +2300,18 @@ static void __init srso_select_mitigation(void)
|
|||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Enable the synthetic (even if in a real CPUID leaf)
|
* Enable the synthetic (even if in a real CPUID leaf)
|
||||||
* flag for guests.
|
* flags for guests.
|
||||||
*/
|
*/
|
||||||
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
|
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_SBPB);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Zen1/2 with SMT off aren't vulnerable after the right
|
||||||
|
* IBPB microcode has been applied.
|
||||||
|
*/
|
||||||
|
if ((boot_cpu_data.x86 < 0x19) &&
|
||||||
|
(cpu_smt_control == CPU_SMT_DISABLED))
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (srso_cmd) {
|
switch (srso_cmd) {
|
||||||
@ -2322,16 +2334,20 @@ static void __init srso_select_mitigation(void)
|
|||||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
||||||
} else {
|
} else {
|
||||||
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
|
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
|
||||||
return;
|
goto pred_cmd;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
|
pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
|
||||||
|
|
||||||
|
pred_cmd:
|
||||||
|
if (boot_cpu_has(X86_FEATURE_SRSO_NO) ||
|
||||||
|
srso_cmd == SRSO_CMD_OFF)
|
||||||
|
x86_pred_cmd = PRED_CMD_SBPB;
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef pr_fmt
|
#undef pr_fmt
|
||||||
|
@ -1300,8 +1300,10 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||||||
boot_cpu_has(X86_FEATURE_AVX))
|
boot_cpu_has(X86_FEATURE_AVX))
|
||||||
setup_force_cpu_bug(X86_BUG_GDS);
|
setup_force_cpu_bug(X86_BUG_GDS);
|
||||||
|
|
||||||
if (cpu_matches(cpu_vuln_blacklist, SRSO))
|
if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
|
||||||
setup_force_cpu_bug(X86_BUG_SRSO);
|
if (cpu_matches(cpu_vuln_blacklist, SRSO))
|
||||||
|
setup_force_cpu_bug(X86_BUG_SRSO);
|
||||||
|
}
|
||||||
|
|
||||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||||
return;
|
return;
|
||||||
|
@ -491,6 +491,9 @@ void kvm_set_cpu_caps(void)
|
|||||||
!boot_cpu_has(X86_FEATURE_AMD_SSBD))
|
!boot_cpu_has(X86_FEATURE_AMD_SSBD))
|
||||||
kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
|
kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
|
||||||
|
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_SRSO_NO))
|
||||||
|
kvm_cpu_cap_set(X86_FEATURE_SRSO_NO);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hide all SVM features by default, SVM will set the cap bits for
|
* Hide all SVM features by default, SVM will set the cap bits for
|
||||||
* features it emulates and/or exposes for L1.
|
* features it emulates and/or exposes for L1.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user