KVM: nVMX: fix EPT permissions as reported in exit qualification

commit 0780516a18f87e881e42ed815f189279b0a1743c upstream.

This fixes the new ept_access_test_read_only and ept_access_test_read_write
testcases from vmx.flat.

The problem is that gpte_access moves bits around to switch from EPT
bit order (XWR) to ACC_*_MASK bit order (RWX).  This results in an
incorrect exit qualification.  To fix this, make pt_access and
pte_access operate on raw PTE values (only with NX flipped to mean
"can execute") and call gpte_access at the end of the walk.  This
lets us use pte_access to compute the exit qualification with XWR
bit order.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Xiao Guangrong <xiaoguangrong@tencent.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
[bwh: Backported to 4.9:
 - There's no support for EPT accessed/dirty bits, so do not use
   have_ad flag
 - Adjust context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Paolo Bonzini 2022-01-24 19:32:46 +01:00 committed by Greg Kroah-Hartman
parent 993892ed82
commit 9c9b899bfc

View File

@ -285,9 +285,11 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
pt_element_t pte;
pt_element_t __user *uninitialized_var(ptep_user);
gfn_t table_gfn;
unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
u64 pt_access, pte_access;
unsigned index, accessed_dirty, pte_pkey;
gpa_t pte_gpa;
int offset;
u64 walk_nx_mask = 0;
const int write_fault = access & PFERR_WRITE_MASK;
const int user_fault = access & PFERR_USER_MASK;
const int fetch_fault = access & PFERR_FETCH_MASK;
@ -301,6 +303,7 @@ retry_walk:
pte = mmu->get_cr3(vcpu);
#if PTTYPE == 64
walk_nx_mask = 1ULL << PT64_NX_SHIFT;
if (walker->level == PT32E_ROOT_LEVEL) {
pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
@ -312,15 +315,14 @@ retry_walk:
walker->max_level = walker->level;
ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
accessed_dirty = PT_GUEST_ACCESSED_MASK;
pt_access = pte_access = ACC_ALL;
pte_access = ~0;
++walker->level;
do {
gfn_t real_gfn;
unsigned long host_addr;
pt_access &= pte_access;
pt_access = pte_access;
--walker->level;
index = PT_INDEX(addr, walker->level);
@ -363,6 +365,12 @@ retry_walk:
trace_kvm_mmu_paging_element(pte, walker->level);
/*
* Inverting the NX it lets us AND it like other
* permission bits.
*/
pte_access = pt_access & (pte ^ walk_nx_mask);
if (unlikely(!FNAME(is_present_gpte)(pte)))
goto error;
@ -371,14 +379,16 @@ retry_walk:
goto error;
}
accessed_dirty &= pte;
pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
walker->ptes[walker->level - 1] = pte;
} while (!is_last_gpte(mmu, walker->level, pte));
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access);
accessed_dirty = pte_access & PT_GUEST_ACCESSED_MASK;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
goto error;
@ -395,7 +405,7 @@ retry_walk:
walker->gfn = real_gpa >> PAGE_SHIFT;
if (!write_fault)
FNAME(protect_clean_gpte)(&pte_access, pte);
FNAME(protect_clean_gpte)(&walker->pte_access, pte);
else
/*
* On a write fault, fold the dirty bit into accessed_dirty.
@ -413,10 +423,8 @@ retry_walk:
goto retry_walk;
}
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
__func__, (u64)pte, pte_access, pt_access);
__func__, (u64)pte, walker->pte_access, walker->pt_access);
return 1;
error:
@ -444,7 +452,7 @@ error:
*/
if (!(errcode & PFERR_RSVD_MASK)) {
vcpu->arch.exit_qualification &= 0x187;
vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
}
#endif
walker->fault.address = addr;