KVM: x86/mmu: Add optimized helper to retrieve an SPTE's index
Add spte_index() to dedup all the code that calculates a SPTE's index into its parent's page table and/or spt array. Opportunistically tweak the calculation to avoid pointer arithmetic, which is subtle (subtract in 8-byte chunks) and less performant (requires the compiler to generate the subtraction). Suggested-by: David Matlack <dmatlack@google.com> Reviewed-by: David Matlack <dmatlack@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220712020724.1262121-2-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
b184b35d06
commit
79e48cec6c
@ -1038,7 +1038,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
|
||||
struct kvm_rmap_head *rmap_head;
|
||||
|
||||
sp = sptep_to_sp(spte);
|
||||
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
|
||||
gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
|
||||
|
||||
/*
|
||||
* Unlike rmap_add, rmap_remove does not run in the context of a vCPU
|
||||
@ -1589,7 +1589,7 @@ static void __rmap_add(struct kvm *kvm,
|
||||
int rmap_count;
|
||||
|
||||
sp = sptep_to_sp(spte);
|
||||
kvm_mmu_page_set_translation(sp, spte - sp->spt, gfn, access);
|
||||
kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
|
||||
kvm_update_page_stats(kvm, sp->role.level, 1);
|
||||
|
||||
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
|
||||
@ -1716,11 +1716,9 @@ static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
|
||||
static void mark_unsync(u64 *spte)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
unsigned int index;
|
||||
|
||||
sp = sptep_to_sp(spte);
|
||||
index = spte - sp->spt;
|
||||
if (__test_and_set_bit(index, sp->unsync_child_bitmap))
|
||||
if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap))
|
||||
return;
|
||||
if (sp->unsync_children++)
|
||||
return;
|
||||
@ -2203,7 +2201,7 @@ static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, unsig
|
||||
*/
|
||||
if (role.has_4_byte_gpte) {
|
||||
WARN_ON_ONCE(role.level != PG_LEVEL_4K);
|
||||
role.quadrant = (sptep - parent_sp->spt) % 2;
|
||||
role.quadrant = spte_index(sptep) & 1;
|
||||
}
|
||||
|
||||
return role;
|
||||
@ -2828,7 +2826,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
|
||||
rmap_add(vcpu, slot, sptep, gfn, pte_access);
|
||||
} else {
|
||||
/* Already rmapped but the pte_access bits may have changed. */
|
||||
kvm_mmu_page_set_access(sp, sptep - sp->spt, pte_access);
|
||||
kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -2844,7 +2842,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
|
||||
int i, ret;
|
||||
gfn_t gfn;
|
||||
|
||||
gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
|
||||
gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
|
||||
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
|
||||
if (!slot)
|
||||
return -1;
|
||||
@ -2870,7 +2868,7 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
|
||||
|
||||
WARN_ON(!sp->role.direct);
|
||||
|
||||
i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
|
||||
i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
|
||||
spte = sp->spt + i;
|
||||
|
||||
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
|
||||
@ -6156,8 +6154,8 @@ static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *hu
|
||||
unsigned int access;
|
||||
gfn_t gfn;
|
||||
|
||||
gfn = kvm_mmu_page_get_gfn(huge_sp, huge_sptep - huge_sp->spt);
|
||||
access = kvm_mmu_page_get_access(huge_sp, huge_sptep - huge_sp->spt);
|
||||
gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
|
||||
access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
|
||||
|
||||
/*
|
||||
* Note, huge page splitting always uses direct shadow pages, regardless
|
||||
@ -6231,7 +6229,7 @@ static int shadow_mmu_try_split_huge_page(struct kvm *kvm,
|
||||
u64 spte;
|
||||
|
||||
/* Grab information for the tracepoint before dropping the MMU lock. */
|
||||
gfn = kvm_mmu_page_get_gfn(huge_sp, huge_sptep - huge_sp->spt);
|
||||
gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
|
||||
level = huge_sp->role.level;
|
||||
spte = *huge_sptep;
|
||||
|
||||
|
@ -595,7 +595,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
|
||||
if (sp->role.direct)
|
||||
return __direct_pte_prefetch(vcpu, sp, sptep);
|
||||
|
||||
i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
|
||||
i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
|
||||
spte = sp->spt + i;
|
||||
|
||||
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
|
||||
@ -933,7 +933,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
|
||||
break;
|
||||
|
||||
pte_gpa = FNAME(get_level1_sp_gpa)(sp);
|
||||
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
|
||||
pte_gpa += spte_index(sptep) * sizeof(pt_element_t);
|
||||
|
||||
mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL);
|
||||
if (is_shadow_present_pte(old_spte))
|
||||
|
@ -190,6 +190,12 @@ static inline bool is_removed_spte(u64 spte)
|
||||
return spte == REMOVED_SPTE;
|
||||
}
|
||||
|
||||
/* Get an SPTE's index into its parent's page table (and the spt array). */
|
||||
static inline int spte_index(u64 *sptep)
|
||||
{
|
||||
return ((unsigned long)sptep / sizeof(*sptep)) & (SPTE_ENT_PER_PAGE - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* In some cases, we need to preserve the GFN of a non-present or reserved
|
||||
* SPTE when we usurp the upper five bits of the physical address space to
|
||||
|
Loading…
x
Reference in New Issue
Block a user