KVM: X86: Add parameter struct kvm_mmu *mmu into mmu->gva_to_gpa()
The mmu->gva_to_gpa() has no "struct kvm_mmu *mmu", so an extra FNAME(gva_to_gpa_nested) is needed. Add the parameter can simplify the code. And it makes it explicit that the walk is upon vcpu->arch.walk_mmu for gva and vcpu->arch.mmu for L2 gpa in translate_nested_gpa() via the new parameter. Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> Message-Id: <20211124122055.64424-3-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
b46a13cb7e
commit
1f5a21ee84
@ -426,8 +426,9 @@ struct kvm_mmu {
|
||||
int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
|
||||
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
|
||||
struct x86_exception *fault);
|
||||
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
|
||||
u32 access, struct x86_exception *exception);
|
||||
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
gpa_t gva_or_gpa, u32 access,
|
||||
struct x86_exception *exception);
|
||||
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
|
||||
struct x86_exception *exception);
|
||||
int (*sync_page)(struct kvm_vcpu *vcpu,
|
||||
|
@ -3732,21 +3732,13 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
|
||||
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
|
||||
}
|
||||
|
||||
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
|
||||
u32 access, struct x86_exception *exception)
|
||||
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
gpa_t vaddr, u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
if (exception)
|
||||
exception->error_code = 0;
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
|
||||
u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
if (exception)
|
||||
exception->error_code = 0;
|
||||
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
|
||||
return mmu->translate_gpa(vcpu, vaddr, access, exception);
|
||||
}
|
||||
|
||||
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
||||
@ -5001,13 +4993,13 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
|
||||
* the gva_to_gpa functions between mmu and nested_mmu are swapped.
|
||||
*/
|
||||
if (!is_paging(vcpu))
|
||||
g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
|
||||
g_context->gva_to_gpa = nonpaging_gva_to_gpa;
|
||||
else if (is_long_mode(vcpu))
|
||||
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
|
||||
g_context->gva_to_gpa = paging64_gva_to_gpa;
|
||||
else if (is_pae(vcpu))
|
||||
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
|
||||
g_context->gva_to_gpa = paging64_gva_to_gpa;
|
||||
else
|
||||
g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
|
||||
g_context->gva_to_gpa = paging32_gva_to_gpa;
|
||||
|
||||
reset_guest_paging_metadata(vcpu, g_context);
|
||||
}
|
||||
|
@ -547,16 +547,6 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
|
||||
access);
|
||||
}
|
||||
|
||||
#if PTTYPE != PTTYPE_EPT
|
||||
static int FNAME(walk_addr_nested)(struct guest_walker *walker,
|
||||
struct kvm_vcpu *vcpu, gva_t addr,
|
||||
u32 access)
|
||||
{
|
||||
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
|
||||
addr, access);
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool
|
||||
FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
u64 *spte, pt_element_t gpte, bool no_dirty_log)
|
||||
@ -1000,14 +990,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
|
||||
}
|
||||
|
||||
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
|
||||
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
|
||||
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
gpa_t addr, u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct guest_walker walker;
|
||||
gpa_t gpa = UNMAPPED_GVA;
|
||||
int r;
|
||||
|
||||
r = FNAME(walk_addr)(&walker, vcpu, addr, access);
|
||||
#ifndef CONFIG_X86_64
|
||||
/* A 64-bit GVA should be impossible on 32-bit KVM. */
|
||||
WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu);
|
||||
#endif
|
||||
|
||||
r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access);
|
||||
|
||||
if (r) {
|
||||
gpa = gfn_to_gpa(walker.gfn);
|
||||
@ -1018,33 +1014,6 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
|
||||
return gpa;
|
||||
}
|
||||
|
||||
#if PTTYPE != PTTYPE_EPT
|
||||
/* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
|
||||
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
|
||||
u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct guest_walker walker;
|
||||
gpa_t gpa = UNMAPPED_GVA;
|
||||
int r;
|
||||
|
||||
#ifndef CONFIG_X86_64
|
||||
/* A 64-bit GVA should be impossible on 32-bit KVM. */
|
||||
WARN_ON_ONCE(vaddr >> 32);
|
||||
#endif
|
||||
|
||||
r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
|
||||
|
||||
if (r) {
|
||||
gpa = gfn_to_gpa(walker.gfn);
|
||||
gpa |= vaddr & ~PAGE_MASK;
|
||||
} else if (exception)
|
||||
*exception = walker.fault;
|
||||
|
||||
return gpa;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Using the cached information from sp->gfns is safe because:
|
||||
* - The spte has a reference to the struct page, so the pfn for a given gfn
|
||||
|
@ -6506,13 +6506,14 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
|
||||
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
||||
gpa_t t_gpa;
|
||||
|
||||
BUG_ON(!mmu_is_nested(vcpu));
|
||||
|
||||
/* NPT walks are always user-walks */
|
||||
access |= PFERR_USER_MASK;
|
||||
t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
|
||||
t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
|
||||
|
||||
return t_gpa;
|
||||
}
|
||||
@ -6520,25 +6521,31 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
|
||||
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
|
||||
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
||||
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
|
||||
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
|
||||
|
||||
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
|
||||
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
||||
access |= PFERR_FETCH_MASK;
|
||||
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
|
||||
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
|
||||
}
|
||||
|
||||
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
|
||||
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
||||
access |= PFERR_WRITE_MASK;
|
||||
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
|
||||
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
|
||||
|
||||
@ -6546,19 +6553,21 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
|
||||
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
|
||||
return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception);
|
||||
}
|
||||
|
||||
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
|
||||
struct kvm_vcpu *vcpu, u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
void *data = val;
|
||||
int r = X86EMUL_CONTINUE;
|
||||
|
||||
while (bytes) {
|
||||
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
|
||||
exception);
|
||||
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
|
||||
unsigned offset = addr & (PAGE_SIZE-1);
|
||||
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
|
||||
int ret;
|
||||
@ -6586,13 +6595,14 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
||||
unsigned offset;
|
||||
int ret;
|
||||
|
||||
/* Inline kvm_read_guest_virt_helper for speed. */
|
||||
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
|
||||
exception);
|
||||
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
|
||||
exception);
|
||||
if (unlikely(gpa == UNMAPPED_GVA))
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
|
||||
@ -6651,13 +6661,12 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes
|
||||
struct kvm_vcpu *vcpu, u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
void *data = val;
|
||||
int r = X86EMUL_CONTINUE;
|
||||
|
||||
while (bytes) {
|
||||
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
|
||||
access,
|
||||
exception);
|
||||
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
|
||||
unsigned offset = addr & (PAGE_SIZE-1);
|
||||
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
|
||||
int ret;
|
||||
@ -6744,6 +6753,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
gpa_t *gpa, struct x86_exception *exception,
|
||||
bool write)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
|
||||
| (write ? PFERR_WRITE_MASK : 0);
|
||||
|
||||
@ -6761,7 +6771,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
return 1;
|
||||
}
|
||||
|
||||
*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
|
||||
*gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
|
||||
|
||||
if (*gpa == UNMAPPED_GVA)
|
||||
return -1;
|
||||
@ -12312,12 +12322,13 @@ EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
|
||||
|
||||
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
struct x86_exception fault;
|
||||
u32 access = error_code &
|
||||
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
|
||||
|
||||
if (!(error_code & PFERR_PRESENT_MASK) ||
|
||||
vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) {
|
||||
mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != UNMAPPED_GVA) {
|
||||
/*
|
||||
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
|
||||
* tables probably do not match the TLB. Just proceed
|
||||
|
Loading…
x
Reference in New Issue
Block a user