KVM: x86/mmu: Handle no-slot faults in kvm_faultin_pfn()
Handle faults on GFNs that do not have a backing memslot in kvm_faultin_pfn() and drop handle_abnormal_pfn(). This eliminates duplicate code in the various page fault handlers. Opportunistically tweak the comment about handling gfn > host.MAXPHYADDR to reflect that the effect of returning RET_PF_EMULATE at that point is to avoid creating an MMIO SPTE for such GFNs. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20220921173546.2674386-7-dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
897e4526e5
commit
f09948ec1f
@ -3218,28 +3218,32 @@ static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||
unsigned int access)
|
||||
static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
|
||||
struct kvm_page_fault *fault,
|
||||
unsigned int access)
|
||||
{
|
||||
if (unlikely(!fault->slot)) {
|
||||
gva_t gva = fault->is_tdp ? 0 : fault->addr;
|
||||
gva_t gva = fault->is_tdp ? 0 : fault->addr;
|
||||
|
||||
vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
|
||||
access & shadow_mmio_access_mask);
|
||||
/*
|
||||
* If MMIO caching is disabled, emulate immediately without
|
||||
* touching the shadow page tables as attempting to install an
|
||||
* MMIO SPTE will just be an expensive nop. Do not cache MMIO
|
||||
* whose gfn is greater than host.MAXPHYADDR, any guest that
|
||||
* generates such gfns is running nested and is being tricked
|
||||
* by L0 userspace (you can observe gfn > L1.MAXPHYADDR if
|
||||
* and only if L1's MAXPHYADDR is inaccurate with respect to
|
||||
* the hardware's).
|
||||
*/
|
||||
if (unlikely(!enable_mmio_caching) ||
|
||||
unlikely(fault->gfn > kvm_mmu_max_gfn()))
|
||||
return RET_PF_EMULATE;
|
||||
}
|
||||
vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
|
||||
access & shadow_mmio_access_mask);
|
||||
|
||||
/*
|
||||
* If MMIO caching is disabled, emulate immediately without
|
||||
* touching the shadow page tables as attempting to install an
|
||||
* MMIO SPTE will just be an expensive nop.
|
||||
*/
|
||||
if (unlikely(!enable_mmio_caching))
|
||||
return RET_PF_EMULATE;
|
||||
|
||||
/*
|
||||
* Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
|
||||
* any guest that generates such gfns is running nested and is being
|
||||
* tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
|
||||
* only if L1's MAXPHYADDR is inaccurate with respect to the
|
||||
* hardware's).
|
||||
*/
|
||||
if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
|
||||
return RET_PF_EMULATE;
|
||||
|
||||
return RET_PF_CONTINUE;
|
||||
}
|
||||
@ -4248,7 +4252,8 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
return RET_PF_CONTINUE;
|
||||
}
|
||||
|
||||
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
|
||||
unsigned int access)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -4262,6 +4267,9 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||
if (unlikely(is_error_pfn(fault->pfn)))
|
||||
return kvm_handle_error_pfn(vcpu, fault);
|
||||
|
||||
if (unlikely(!fault->slot))
|
||||
return kvm_handle_noslot_fault(vcpu, fault, access);
|
||||
|
||||
return RET_PF_CONTINUE;
|
||||
}
|
||||
|
||||
@ -4312,11 +4320,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = kvm_faultin_pfn(vcpu, fault);
|
||||
if (r != RET_PF_CONTINUE)
|
||||
return r;
|
||||
|
||||
r = handle_abnormal_pfn(vcpu, fault, ACC_ALL);
|
||||
r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
|
||||
if (r != RET_PF_CONTINUE)
|
||||
return r;
|
||||
|
||||
|
@ -837,11 +837,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
else
|
||||
fault->max_level = walker.level;
|
||||
|
||||
r = kvm_faultin_pfn(vcpu, fault);
|
||||
if (r != RET_PF_CONTINUE)
|
||||
return r;
|
||||
|
||||
r = handle_abnormal_pfn(vcpu, fault, walker.pte_access);
|
||||
r = kvm_faultin_pfn(vcpu, fault, walker.pte_access);
|
||||
if (r != RET_PF_CONTINUE)
|
||||
return r;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user