KVM: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c
Rename mmu->sync_page to mmu->sync_spte and move the code out of FNAME(sync_page)'s loop body into mmu.c. No functionalities change intended. Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Link: https://lore.kernel.org/r/20230216154115.710033-6-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
8ef228c20c
commit
c3c6c9fc5d
@ -439,8 +439,8 @@ struct kvm_mmu {
|
||||
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
gpa_t gva_or_gpa, u64 access,
|
||||
struct x86_exception *exception);
|
||||
int (*sync_page)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp);
|
||||
int (*sync_spte)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp, int i);
|
||||
void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa);
|
||||
struct kvm_mmu_root_info root;
|
||||
union kvm_cpu_role cpu_role;
|
||||
|
@ -1934,7 +1934,7 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
* differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
|
||||
* reserved bits checks will be wrong, etc...
|
||||
*/
|
||||
if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_page ||
|
||||
if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
|
||||
(sp->role.word ^ root_role.word) & ~sync_role_ign.word))
|
||||
return false;
|
||||
|
||||
@ -1943,10 +1943,30 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
|
||||
static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
int flush = 0;
|
||||
int i;
|
||||
|
||||
if (!kvm_sync_page_check(vcpu, sp))
|
||||
return -1;
|
||||
|
||||
return vcpu->arch.mmu->sync_page(vcpu, sp);
|
||||
for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
|
||||
int ret = vcpu->arch.mmu->sync_spte(vcpu, sp, i);
|
||||
|
||||
if (ret < -1)
|
||||
return -1;
|
||||
flush |= ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note, any flush is purely for KVM's correctness, e.g. when dropping
|
||||
* an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
|
||||
* unmap or dirty logging event doesn't fail to flush. The guest is
|
||||
* responsible for flushing the TLB to ensure any changes in protection
|
||||
* bits are recognized, i.e. until the guest flushes or page faults on
|
||||
* a relevant address, KVM is architecturally allowed to let vCPUs use
|
||||
* cached translations with the old protection bits.
|
||||
*/
|
||||
return flush;
|
||||
}
|
||||
|
||||
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
@ -4504,7 +4524,7 @@ static void nonpaging_init_context(struct kvm_mmu *context)
|
||||
{
|
||||
context->page_fault = nonpaging_page_fault;
|
||||
context->gva_to_gpa = nonpaging_gva_to_gpa;
|
||||
context->sync_page = NULL;
|
||||
context->sync_spte = NULL;
|
||||
context->invlpg = NULL;
|
||||
}
|
||||
|
||||
@ -5095,7 +5115,7 @@ static void paging64_init_context(struct kvm_mmu *context)
|
||||
{
|
||||
context->page_fault = paging64_page_fault;
|
||||
context->gva_to_gpa = paging64_gva_to_gpa;
|
||||
context->sync_page = paging64_sync_page;
|
||||
context->sync_spte = paging64_sync_spte;
|
||||
context->invlpg = paging64_invlpg;
|
||||
}
|
||||
|
||||
@ -5103,7 +5123,7 @@ static void paging32_init_context(struct kvm_mmu *context)
|
||||
{
|
||||
context->page_fault = paging32_page_fault;
|
||||
context->gva_to_gpa = paging32_gva_to_gpa;
|
||||
context->sync_page = paging32_sync_page;
|
||||
context->sync_spte = paging32_sync_spte;
|
||||
context->invlpg = paging32_invlpg;
|
||||
}
|
||||
|
||||
@ -5192,7 +5212,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
|
||||
context->cpu_role.as_u64 = cpu_role.as_u64;
|
||||
context->root_role.word = root_role.word;
|
||||
context->page_fault = kvm_tdp_page_fault;
|
||||
context->sync_page = NULL;
|
||||
context->sync_spte = NULL;
|
||||
context->invlpg = NULL;
|
||||
context->get_guest_pgd = get_cr3;
|
||||
context->get_pdptr = kvm_pdptr_read;
|
||||
@ -5324,7 +5344,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||
|
||||
context->page_fault = ept_page_fault;
|
||||
context->gva_to_gpa = ept_gva_to_gpa;
|
||||
context->sync_page = ept_sync_page;
|
||||
context->sync_spte = ept_sync_spte;
|
||||
context->invlpg = ept_invlpg;
|
||||
|
||||
update_permission_bitmask(context, true);
|
||||
|
@ -937,87 +937,67 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
* can't change unless all sptes pointing to it are nuked first.
|
||||
*
|
||||
* Returns
|
||||
* < 0: the sp should be zapped
|
||||
* 0: the sp is synced and no tlb flushing is required
|
||||
* > 0: the sp is synced and tlb flushing is required
|
||||
* < 0: failed to sync spte
|
||||
* 0: the spte is synced and no tlb flushing is required
|
||||
* > 0: the spte is synced and tlb flushing is required
|
||||
*/
|
||||
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
|
||||
{
|
||||
int i;
|
||||
bool host_writable;
|
||||
gpa_t first_pte_gpa;
|
||||
bool flush = false;
|
||||
u64 *sptep, spte;
|
||||
struct kvm_memory_slot *slot;
|
||||
unsigned pte_access;
|
||||
pt_element_t gpte;
|
||||
gpa_t pte_gpa;
|
||||
gfn_t gfn;
|
||||
|
||||
if (!sp->spt[i])
|
||||
return 0;
|
||||
|
||||
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
|
||||
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
|
||||
|
||||
for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
|
||||
u64 *sptep, spte;
|
||||
struct kvm_memory_slot *slot;
|
||||
unsigned pte_access;
|
||||
pt_element_t gpte;
|
||||
gpa_t pte_gpa;
|
||||
gfn_t gfn;
|
||||
if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
|
||||
sizeof(pt_element_t)))
|
||||
return -1;
|
||||
|
||||
if (!sp->spt[i])
|
||||
continue;
|
||||
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte))
|
||||
return 1;
|
||||
|
||||
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
|
||||
gfn = gpte_to_gfn(gpte);
|
||||
pte_access = sp->role.access;
|
||||
pte_access &= FNAME(gpte_access)(gpte);
|
||||
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
|
||||
|
||||
if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
|
||||
sizeof(pt_element_t)))
|
||||
return -1;
|
||||
|
||||
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
|
||||
flush = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
gfn = gpte_to_gfn(gpte);
|
||||
pte_access = sp->role.access;
|
||||
pte_access &= FNAME(gpte_access)(gpte);
|
||||
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
|
||||
|
||||
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Drop the SPTE if the new protections would result in a RWX=0
|
||||
* SPTE or if the gfn is changing. The RWX=0 case only affects
|
||||
* EPT with execute-only support, i.e. EPT without an effective
|
||||
* "present" bit, as all other paging modes will create a
|
||||
* read-only SPTE if pte_access is zero.
|
||||
*/
|
||||
if ((!pte_access && !shadow_present_mask) ||
|
||||
gfn != kvm_mmu_page_get_gfn(sp, i)) {
|
||||
drop_spte(vcpu->kvm, &sp->spt[i]);
|
||||
flush = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Update the shadowed access bits in case they changed. */
|
||||
kvm_mmu_page_set_access(sp, i, pte_access);
|
||||
|
||||
sptep = &sp->spt[i];
|
||||
spte = *sptep;
|
||||
host_writable = spte & shadow_host_writable_mask;
|
||||
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
|
||||
make_spte(vcpu, sp, slot, pte_access, gfn,
|
||||
spte_to_pfn(spte), spte, true, false,
|
||||
host_writable, &spte);
|
||||
|
||||
flush |= mmu_spte_update(sptep, spte);
|
||||
}
|
||||
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Note, any flush is purely for KVM's correctness, e.g. when dropping
|
||||
* an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
|
||||
* unmap or dirty logging event doesn't fail to flush. The guest is
|
||||
* responsible for flushing the TLB to ensure any changes in protection
|
||||
* bits are recognized, i.e. until the guest flushes or page faults on
|
||||
* a relevant address, KVM is architecturally allowed to let vCPUs use
|
||||
* cached translations with the old protection bits.
|
||||
* Drop the SPTE if the new protections would result in a RWX=0
|
||||
* SPTE or if the gfn is changing. The RWX=0 case only affects
|
||||
* EPT with execute-only support, i.e. EPT without an effective
|
||||
* "present" bit, as all other paging modes will create a
|
||||
* read-only SPTE if pte_access is zero.
|
||||
*/
|
||||
return flush;
|
||||
if ((!pte_access && !shadow_present_mask) ||
|
||||
gfn != kvm_mmu_page_get_gfn(sp, i)) {
|
||||
drop_spte(vcpu->kvm, &sp->spt[i]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Update the shadowed access bits in case they changed. */
|
||||
kvm_mmu_page_set_access(sp, i, pte_access);
|
||||
|
||||
sptep = &sp->spt[i];
|
||||
spte = *sptep;
|
||||
host_writable = spte & shadow_host_writable_mask;
|
||||
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
|
||||
make_spte(vcpu, sp, slot, pte_access, gfn,
|
||||
spte_to_pfn(spte), spte, true, false,
|
||||
host_writable, &spte);
|
||||
|
||||
return mmu_spte_update(sptep, spte);
|
||||
}
|
||||
|
||||
#undef pt_element_t
|
||||
|
Loading…
Reference in New Issue
Block a user