KVM: x86/mmu: Use dummy root, backed by zero page, for !visible guest roots
When attempting to allocate a shadow root for a !visible guest root gfn, e.g. that resides in MMIO space, load a dummy root that is backed by the zero page instead of immediately synthesizing a triple fault shutdown (using the zero page ensures any attempt to translate memory will generate a !PRESENT fault and thus VM-Exit). Unless the vCPU is racing with memslot activity, KVM will inject a page fault due to not finding a visible slot in FNAME(walk_addr_generic), i.e. the end result is mostly same, but critically KVM will inject a fault only *after* KVM runs the vCPU with the bogus root. Waiting to inject a fault until after running the vCPU fixes a bug where KVM would bail from nested VM-Enter if L1 tried to run L2 with TDP enabled and a !visible root. Even though a bad root will *probably* lead to shutdown, (a) it's not guaranteed and (b) the CPU won't read the underlying memory until after VM-Enter succeeds. E.g. if L1 runs L2 with a VMX preemption timer value of '0', then architecturally the preemption timer VM-Exit is guaranteed to occur before the CPU executes any instruction, i.e. before the CPU needs to translate a GPA to a HPA (so long as there are no injected events with higher priority than the preemption timer). If KVM manages to get to FNAME(fetch) with a dummy root, e.g. because userspace created a memslot between installing the dummy root and handling the page fault, simply unload the MMU to allocate a new root and retry the instruction. Use KVM_REQ_MMU_FREE_OBSOLETE_ROOTS to drop the root, as invoking kvm_mmu_free_roots() while holding mmu_lock would deadlock, and conceptually the dummy root has indeeed become obsolete. The only difference versus existing usage of KVM_REQ_MMU_FREE_OBSOLETE_ROOTS is that the root has become obsolete due to memslot *creation*, not memslot deletion or movement. Reported-by: Reima Ishii <ishiir@g.ecc.u-tokyo.ac.jp> Cc: Yu Zhang <yu.c.zhang@linux.intel.com> Link: https://lore.kernel.org/r/20230729005200.1057358-6-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
b5b359ac30
commit
0e3223d8d0
@ -3589,7 +3589,9 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
|
||||
&invalid_list);
|
||||
|
||||
if (free_active_root) {
|
||||
if (root_to_sp(mmu->root.hpa)) {
|
||||
if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
|
||||
/* Nothing to cleanup for dummy roots. */
|
||||
} else if (root_to_sp(mmu->root.hpa)) {
|
||||
mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
|
||||
} else if (mmu->pae_root) {
|
||||
for (i = 0; i < 4; ++i) {
|
||||
@ -3637,19 +3639,6 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
|
||||
|
||||
|
||||
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
|
||||
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
|
||||
u8 level)
|
||||
{
|
||||
@ -3787,8 +3776,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||
root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
|
||||
root_gfn = root_pgd >> PAGE_SHIFT;
|
||||
|
||||
if (mmu_check_root(vcpu, root_gfn))
|
||||
return 1;
|
||||
if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
|
||||
mmu->root.hpa = kvm_mmu_get_dummy_root();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* On SVM, reading PDPTRs might access guest memory, which might fault
|
||||
@ -3800,8 +3791,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||
if (!(pdptrs[i] & PT_PRESENT_MASK))
|
||||
continue;
|
||||
|
||||
if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
|
||||
return 1;
|
||||
if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT))
|
||||
pdptrs[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3968,7 +3959,7 @@ static bool is_unsync_root(hpa_t root)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
|
||||
if (!VALID_PAGE(root))
|
||||
if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
|
||||
return false;
|
||||
|
||||
/*
|
||||
@ -4374,6 +4365,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Dummy roots are used only for shadowing bad guest roots. */
|
||||
if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
|
||||
return RET_PF_RETRY;
|
||||
|
||||
if (page_fault_handle_page_track(vcpu, fault))
|
||||
return RET_PF_EMULATE;
|
||||
|
||||
@ -4609,9 +4604,8 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
|
||||
gpa_t new_pgd, union kvm_mmu_page_role new_role)
|
||||
{
|
||||
/*
|
||||
* For now, limit the caching to 64-bit hosts+VMs in order to avoid
|
||||
* having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
|
||||
* later if necessary.
|
||||
* Limit reuse to 64-bit hosts+VMs without "special" roots in order to
|
||||
* avoid having to deal with PDPTEs and other complexities.
|
||||
*/
|
||||
if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
|
||||
kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
|
||||
@ -5510,14 +5504,19 @@ static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
|
||||
|
||||
/*
|
||||
* When freeing obsolete roots, treat roots as obsolete if they don't
|
||||
* have an associated shadow page. This does mean KVM will get false
|
||||
* have an associated shadow page, as it's impossible to determine if
|
||||
* such roots are fresh or stale. This does mean KVM will get false
|
||||
* positives and free roots that don't strictly need to be freed, but
|
||||
* such false positives are relatively rare:
|
||||
*
|
||||
* (a) only PAE paging and nested NPT has roots without shadow pages
|
||||
* (a) only PAE paging and nested NPT have roots without shadow pages
|
||||
* (or any shadow paging flavor with a dummy root, see note below)
|
||||
* (b) remote reloads due to a memslot update obsoletes _all_ roots
|
||||
* (c) KVM doesn't track previous roots for PAE paging, and the guest
|
||||
* is unlikely to zap an in-use PGD.
|
||||
*
|
||||
* Note! Dummy roots are unique in that they are obsoleted by memslot
|
||||
* _creation_! See also FNAME(fetch).
|
||||
*/
|
||||
sp = root_to_sp(root_hpa);
|
||||
return !sp || is_obsolete_sp(kvm, sp);
|
||||
|
@ -36,6 +36,16 @@
|
||||
#define INVALID_PAE_ROOT 0
|
||||
#define IS_VALID_PAE_ROOT(x) (!!(x))
|
||||
|
||||
static inline hpa_t kvm_mmu_get_dummy_root(void)
|
||||
{
|
||||
return my_zero_pfn(0) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
|
||||
{
|
||||
return is_zero_pfn(shadow_page >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
typedef u64 __rcu *tdp_ptep_t;
|
||||
|
||||
struct kvm_mmu_page {
|
||||
|
@ -651,6 +651,17 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||
if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
|
||||
goto out_gpte_changed;
|
||||
|
||||
/*
|
||||
* Load a new root and retry the faulting instruction in the extremely
|
||||
* unlikely scenario that the guest root gfn became visible between
|
||||
* loading a dummy root and handling the resulting page fault, e.g. if
|
||||
* userspace create a memslot in the interim.
|
||||
*/
|
||||
if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
|
||||
kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu);
|
||||
goto out_gpte_changed;
|
||||
}
|
||||
|
||||
for_each_shadow_entry(vcpu, fault->addr, it) {
|
||||
gfn_t table_gfn;
|
||||
|
||||
|
@ -238,6 +238,9 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
|
||||
|
||||
static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
|
||||
{
|
||||
if (kvm_mmu_is_dummy_root(root))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The "root" may be a special root, e.g. a PAE entry, treat it as a
|
||||
* SPTE to ensure any non-PA bits are dropped.
|
||||
|
Loading…
x
Reference in New Issue
Block a user