Pass the address space ID to TDP MMU's primary "zap gfn range" helper to allow the MMU notifier paths to iterate over memslots exactly once. Currently, both the legacy MMU and TDP MMU iterate over memslots when looking for an overlapping hva range, which can be quite costly if there are a large number of memslots. Add a "flush" parameter so that iterating over multiple address spaces in the caller will continue to do the right thing when yielding while a flush is pending from a previous address space. Note, this also has a functional change in the form of coalescing TLB flushes across multiple address spaces in kvm_zap_gfn_range(), and also optimizes the TDP MMU to utilize range-based flushing when running as L1 with Hyper-V enlightenments. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210326021957.1424875-6-seanjc@google.com> [Keep separate for loops to prepare for other incoming patches. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
97 lines
3.4 KiB
C
97 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#ifndef __KVM_X86_MMU_TDP_MMU_H
|
|
#define __KVM_X86_MMU_TDP_MMU_H
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
|
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
|
|
|
|
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
|
|
gfn_t end, bool can_yield, bool flush);
|
|
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
|
|
gfn_t start, gfn_t end, bool flush)
|
|
{
|
|
return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
|
|
}
|
|
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|
{
|
|
gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
|
|
|
|
/*
|
|
* Don't allow yielding, as the caller may have a flush pending. Note,
|
|
* if mmu_lock is held for write, zapping will never yield in this case,
|
|
* but explicitly disallow it for safety. The TDP MMU does not yield
|
|
* until it has made forward progress (steps sideways), and when zapping
|
|
* a single shadow page that it's guaranteed to see (thus the mmu_lock
|
|
* requirement), its "step sideways" will always step beyond the bounds
|
|
* of the shadow page's gfn range and stop iterating before yielding.
|
|
*/
|
|
lockdep_assert_held_write(&kvm->mmu_lock);
|
|
return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
|
|
sp->gfn, end, false, false);
|
|
}
|
|
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
|
|
|
|
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
|
int map_writable, int max_level, kvm_pfn_t pfn,
|
|
bool prefault);
|
|
|
|
int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
|
|
unsigned long end);
|
|
|
|
int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
|
|
unsigned long end);
|
|
int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva);
|
|
|
|
int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
|
|
pte_t *host_ptep);
|
|
|
|
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
int min_level);
|
|
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot);
|
|
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot,
|
|
gfn_t gfn, unsigned long mask,
|
|
bool wrprot);
|
|
bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot, bool flush);
|
|
|
|
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
|
|
struct kvm_memory_slot *slot, gfn_t gfn);
|
|
|
|
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
|
|
int *root_level);
|
|
|
|
#ifdef CONFIG_X86_64
|
|
void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
|
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
|
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
|
|
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
|
|
#else
|
|
static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
|
|
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
|
|
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
|
|
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
|
|
#endif
|
|
|
|
static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
|
|
{
|
|
struct kvm_mmu_page *sp;
|
|
|
|
if (!is_tdp_mmu_enabled(kvm))
|
|
return false;
|
|
if (WARN_ON(!VALID_PAGE(hpa)))
|
|
return false;
|
|
|
|
sp = to_shadow_page(hpa);
|
|
if (WARN_ON(!sp))
|
|
return false;
|
|
|
|
return is_tdp_mmu_page(sp) && sp->root_count;
|
|
}
|
|
|
|
#endif /* __KVM_X86_MMU_TDP_MMU_H */
|