KVM: x86/mmu: WARN on NULL pae_root or lm_root, or bad shadow root level
WARN if KVM is about to dereference a NULL pae_root or lm_root when loading an MMU, and convert the BUG() on a bad shadow_root_level into a WARN (now that errors are handled cleanly). With nested NPT, botching the level and sending KVM down the wrong path is all too easy, and the on-demand allocation of pae_root and lm_root means bugs crash the host. Obviously, KVM could unconditionally allocate the roots, but that's arguably a worse failure mode as it would potentially corrupt the guest instead of crashing it. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210305011101.3597423-18-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
a91f387b4b
commit
73ad160693
@ -3255,6 +3255,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
||||
root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
|
||||
mmu->root_hpa = root;
|
||||
} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
|
||||
if (WARN_ON_ONCE(!mmu->pae_root))
|
||||
return -EIO;
|
||||
|
||||
for (i = 0; i < 4; ++i) {
|
||||
WARN_ON_ONCE(mmu->pae_root[i] &&
|
||||
VALID_PAGE(mmu->pae_root[i]));
|
||||
@ -3265,8 +3268,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
||||
shadow_me_mask;
|
||||
}
|
||||
mmu->root_hpa = __pa(mmu->pae_root);
|
||||
} else
|
||||
BUG();
|
||||
} else {
|
||||
WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* root_pgd is ignored for direct MMUs. */
|
||||
mmu->root_pgd = 0;
|
||||
@ -3310,6 +3315,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||
goto set_root_pgd;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!mmu->pae_root))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* We shadow a 32 bit page table. This may be a legacy 2-level
|
||||
* or a PAE 3-level page table. In either case we need to be aware that
|
||||
@ -3319,6 +3327,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||
if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
|
||||
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
|
||||
|
||||
if (WARN_ON_ONCE(!mmu->lm_root))
|
||||
return -EIO;
|
||||
|
||||
mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user