KVM: x86/mmu: Merge all handle_changed_pte*() functions
Merge __handle_changed_pte() and handle_changed_spte_acc_track() into a single function, handle_changed_pte(), as the two are always used together. Remove the existing handle_changed_pte(), as it's just a wrapper that calls __handle_changed_pte() and handle_changed_spte_acc_track(). Signed-off-by: Vipin Sharma <vipinsh@google.com> Reviewed-by: Ben Gardon <bgardon@google.com> Reviewed-by: David Matlack <dmatlack@google.com> [sean: massage changelog] Link: https://lore.kernel.org/r/20230321220021.2119033-14-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
1f9973456e
commit
40fa907e5a
@ -334,17 +334,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
|
|||||||
u64 old_spte, u64 new_spte, int level,
|
u64 old_spte, u64 new_spte, int level,
|
||||||
bool shared);
|
bool shared);
|
||||||
|
|
||||||
static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
|
|
||||||
{
|
|
||||||
if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (is_accessed_spte(old_spte) &&
|
|
||||||
(!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
|
|
||||||
spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
|
|
||||||
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
{
|
{
|
||||||
kvm_account_pgtable_pages((void *)sp->spt, +1);
|
kvm_account_pgtable_pages((void *)sp->spt, +1);
|
||||||
@ -487,7 +476,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __handle_changed_spte - handle bookkeeping associated with an SPTE change
|
* handle_changed_spte - handle bookkeeping associated with an SPTE change
|
||||||
* @kvm: kvm instance
|
* @kvm: kvm instance
|
||||||
* @as_id: the address space of the paging structure the SPTE was a part of
|
* @as_id: the address space of the paging structure the SPTE was a part of
|
||||||
* @gfn: the base GFN that was mapped by the SPTE
|
* @gfn: the base GFN that was mapped by the SPTE
|
||||||
@ -502,9 +491,9 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
|
|||||||
* dirty logging updates are handled in common code, not here (see make_spte()
|
* dirty logging updates are handled in common code, not here (see make_spte()
|
||||||
* and fast_pf_fix_direct_spte()).
|
* and fast_pf_fix_direct_spte()).
|
||||||
*/
|
*/
|
||||||
static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
|
static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
|
||||||
u64 old_spte, u64 new_spte, int level,
|
u64 old_spte, u64 new_spte, int level,
|
||||||
bool shared)
|
bool shared)
|
||||||
{
|
{
|
||||||
bool was_present = is_shadow_present_pte(old_spte);
|
bool was_present = is_shadow_present_pte(old_spte);
|
||||||
bool is_present = is_shadow_present_pte(new_spte);
|
bool is_present = is_shadow_present_pte(new_spte);
|
||||||
@ -588,15 +577,10 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
|
|||||||
if (was_present && !was_leaf &&
|
if (was_present && !was_leaf &&
|
||||||
(is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
|
(is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
|
||||||
handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
|
handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
|
||||||
}
|
|
||||||
|
|
||||||
static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
|
if (was_leaf && is_accessed_spte(old_spte) &&
|
||||||
u64 old_spte, u64 new_spte, int level,
|
(!is_present || !is_accessed_spte(new_spte) || pfn_changed))
|
||||||
bool shared)
|
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
|
||||||
{
|
|
||||||
__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
|
|
||||||
shared);
|
|
||||||
handle_changed_spte_acc_track(old_spte, new_spte, level);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -639,9 +623,8 @@ static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
|
|||||||
if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
|
if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
__handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
|
handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
|
||||||
new_spte, iter->level, true);
|
new_spte, iter->level, true);
|
||||||
handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -705,8 +688,7 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
|
|||||||
|
|
||||||
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
|
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
|
||||||
|
|
||||||
__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
|
handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
|
||||||
handle_changed_spte_acc_track(old_spte, new_spte, level);
|
|
||||||
return old_spte;
|
return old_spte;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1275,7 +1257,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
|
|||||||
* Note, when changing a read-only SPTE, it's not strictly necessary to
|
* Note, when changing a read-only SPTE, it's not strictly necessary to
|
||||||
* zero the SPTE before setting the new PFN, but doing so preserves the
|
* zero the SPTE before setting the new PFN, but doing so preserves the
|
||||||
* invariant that the PFN of a present * leaf SPTE can never change.
|
* invariant that the PFN of a present * leaf SPTE can never change.
|
||||||
* See __handle_changed_spte().
|
* See handle_changed_spte().
|
||||||
*/
|
*/
|
||||||
tdp_mmu_iter_set_spte(kvm, iter, 0);
|
tdp_mmu_iter_set_spte(kvm, iter, 0);
|
||||||
|
|
||||||
@ -1300,7 +1282,7 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
|
|||||||
/*
|
/*
|
||||||
* No need to handle the remote TLB flush under RCU protection, the
|
* No need to handle the remote TLB flush under RCU protection, the
|
||||||
* target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
|
* target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
|
||||||
* shadow page. See the WARN on pfn_changed in __handle_changed_spte().
|
* shadow page. See the WARN on pfn_changed in handle_changed_spte().
|
||||||
*/
|
*/
|
||||||
return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
|
return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user