KVM: x86: simplify ept_misconfig
commit e08d26f0712532c79b5ba6200862eaf2036f8df6 upstream. Calling handle_mmio_page_fault() has been unnecessary since commit e9ee956e311d ("KVM: x86: MMU: Move handle_mmio_page_fault() call to kvm_mmu_page_fault()", 2016-02-22). handle_mmio_page_fault() can now be made static. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> [bwh: Backported to 4.9: adjust context] Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
042a4417d1
commit
43a39a3e9b
@ -3383,7 +3383,23 @@ exit:
|
||||
return reserved;
|
||||
}
|
||||
|
||||
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
||||
/*
|
||||
* Return values of handle_mmio_page_fault:
|
||||
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
|
||||
* directly.
|
||||
* RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
|
||||
* fault path update the mmio spte.
|
||||
* RET_MMIO_PF_RETRY: let CPU fault again on the address.
|
||||
* RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
|
||||
*/
|
||||
enum {
|
||||
RET_MMIO_PF_EMULATE = 1,
|
||||
RET_MMIO_PF_INVALID = 2,
|
||||
RET_MMIO_PF_RETRY = 0,
|
||||
RET_MMIO_PF_BUG = -1
|
||||
};
|
||||
|
||||
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
||||
{
|
||||
u64 spte;
|
||||
bool reserved;
|
||||
@ -4520,6 +4536,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
|
||||
return 1;
|
||||
if (r < 0)
|
||||
return r;
|
||||
/* Must be RET_MMIO_PF_INVALID. */
|
||||
}
|
||||
|
||||
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
|
||||
|
@ -56,23 +56,6 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
|
||||
void
|
||||
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
|
||||
|
||||
/*
|
||||
* Return values of handle_mmio_page_fault:
|
||||
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
|
||||
* directly.
|
||||
* RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
|
||||
* fault path update the mmio spte.
|
||||
* RET_MMIO_PF_RETRY: let CPU fault again on the address.
|
||||
* RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
|
||||
*/
|
||||
enum {
|
||||
RET_MMIO_PF_EMULATE = 1,
|
||||
RET_MMIO_PF_INVALID = 2,
|
||||
RET_MMIO_PF_RETRY = 0,
|
||||
RET_MMIO_PF_BUG = -1
|
||||
};
|
||||
|
||||
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
||||
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
|
||||
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
|
||||
|
@ -6556,16 +6556,9 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
|
||||
NULL, 0) == EMULATE_DONE;
|
||||
}
|
||||
|
||||
ret = handle_mmio_page_fault(vcpu, gpa, true);
|
||||
if (likely(ret == RET_MMIO_PF_EMULATE))
|
||||
return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
|
||||
EMULATE_DONE;
|
||||
|
||||
if (unlikely(ret == RET_MMIO_PF_INVALID))
|
||||
return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
|
||||
|
||||
if (unlikely(ret == RET_MMIO_PF_RETRY))
|
||||
return 1;
|
||||
ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
|
||||
if (ret >= 0)
|
||||
return ret;
|
||||
|
||||
/* It is the real ept misconfig */
|
||||
WARN_ON(1);
|
||||
|
Loading…
x
Reference in New Issue
Block a user