KVM: x86/mmu: Track write/user faults using bools
Use bools to track write and user faults throughout the page fault paths and down into mmu_set_spte(). The actual usage is purely boolean, but that's not obvious without digging into all paths as the current code uses a mix of bools (TDP and try_async_pf) and ints (shadow paging and mmu_set_spte()). No true functional change intended (although the pgprintk() will now print 0/1 instead of 0/PFERR_WRITE_MASK). Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200923183735.584-9-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
dcc7065170
commit
e88b809369
@ -3082,7 +3082,7 @@ set_pte:
|
||||
}
|
||||
|
||||
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
||||
unsigned int pte_access, int write_fault, int level,
|
||||
unsigned int pte_access, bool write_fault, int level,
|
||||
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
|
||||
bool host_writable)
|
||||
{
|
||||
@ -3188,7 +3188,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < ret; i++, gfn++, start++) {
|
||||
mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
|
||||
mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
|
||||
page_to_pfn(pages[i]), true, true);
|
||||
put_page(pages[i]);
|
||||
}
|
||||
|
@ -550,7 +550,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
* we call mmu_set_spte() with host_writable = true because
|
||||
* pte_prefetch_gfn_to_pfn always gets a writable pfn.
|
||||
*/
|
||||
mmu_set_spte(vcpu, spte, pte_access, 0, PG_LEVEL_4K, gfn, pfn,
|
||||
mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn,
|
||||
true, true);
|
||||
|
||||
kvm_release_pfn_clean(pfn);
|
||||
@ -630,7 +630,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
|
||||
bool prefault)
|
||||
{
|
||||
bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
|
||||
int write_fault = error_code & PFERR_WRITE_MASK;
|
||||
bool write_fault = error_code & PFERR_WRITE_MASK;
|
||||
bool exec = error_code & PFERR_FETCH_MASK;
|
||||
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
|
||||
struct kvm_mmu_page *sp = NULL;
|
||||
@ -746,7 +746,7 @@ out_gpte_changed:
|
||||
*/
|
||||
static bool
|
||||
FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
|
||||
struct guest_walker *walker, int user_fault,
|
||||
struct guest_walker *walker, bool user_fault,
|
||||
bool *write_fault_to_shadow_pgtable)
|
||||
{
|
||||
int level;
|
||||
@ -784,8 +784,8 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
|
||||
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
|
||||
bool prefault)
|
||||
{
|
||||
int write_fault = error_code & PFERR_WRITE_MASK;
|
||||
int user_fault = error_code & PFERR_USER_MASK;
|
||||
bool write_fault = error_code & PFERR_WRITE_MASK;
|
||||
bool user_fault = error_code & PFERR_USER_MASK;
|
||||
struct guest_walker walker;
|
||||
int r;
|
||||
kvm_pfn_t pfn;
|
||||
|
Loading…
x
Reference in New Issue
Block a user