KVM: x86/mmu: Add sanity checks that KVM doesn't create EPT #VE SPTEs
Assert that KVM doesn't set a SPTE to a value that could trigger an EPT Violation #VE on a non-MMIO SPTE, e.g. to help detect bugs even without KVM_INTEL_PROVE_VE enabled, and to help debug actual #VE failures. Note, this will run afoul of TDX support, which needs to reflect emulated MMIO accesses into the guest as #VEs (which was the whole point of adding EPT Violation #VE support in KVM). The obvious fix for that is to exempt MMIO SPTEs, but that's annoyingly difficult now that is_mmio_spte() relies on a per-VM value. However, resolving that conundrum is a future problem, whereas getting KVM_INTEL_PROVE_VE healthy is a current problem. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-ID: <20240518000430.1118488-5-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9031b42139
commit
837d557aba
@ -336,16 +336,19 @@ static int is_cpuid_PSE36(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
static void __set_spte(u64 *sptep, u64 spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
|
||||
WRITE_ONCE(*sptep, spte);
|
||||
}
|
||||
|
||||
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
|
||||
WRITE_ONCE(*sptep, spte);
|
||||
}
|
||||
|
||||
static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(spte));
|
||||
return xchg(sptep, spte);
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,8 @@
|
||||
#ifndef KVM_X86_MMU_SPTE_H
|
||||
#define KVM_X86_MMU_SPTE_H
|
||||
|
||||
#include <asm/vmx.h>
|
||||
|
||||
#include "mmu.h"
|
||||
#include "mmu_internal.h"
|
||||
|
||||
@ -276,6 +278,13 @@ static inline bool is_shadow_present_pte(u64 pte)
|
||||
return !!(pte & SPTE_MMU_PRESENT_MASK);
|
||||
}
|
||||
|
||||
static inline bool is_ept_ve_possible(u64 spte)
|
||||
{
|
||||
return (shadow_present_mask & VMX_EPT_SUPPRESS_VE_BIT) &&
|
||||
!(spte & VMX_EPT_SUPPRESS_VE_BIT) &&
|
||||
(spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if A/D bits are supported in hardware and are enabled by KVM.
|
||||
* When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can
|
||||
|
@ -21,11 +21,13 @@ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
|
||||
|
||||
static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
|
||||
return xchg(rcu_dereference(sptep), new_spte);
|
||||
}
|
||||
|
||||
static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
|
||||
{
|
||||
KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
|
||||
WRITE_ONCE(*rcu_dereference(sptep), new_spte);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user