KVM: x86/mmu: Move TDP MMU VM init/uninit behind tdp_mmu_enabled
Move kvm_mmu_{init,uninit}_tdp_mmu() behind tdp_mmu_enabled. This makes these functions consistent with the rest of the calls into the TDP MMU from mmu.c, and which is now possible since tdp_mmu_enabled is only modified when the x86 vendor module is loaded. i.e. It will never change during the lifetime of a VM. This change also enabled removing the stub definitions for 32-bit KVM, as the compiler will just optimize the calls out like it does for all the other TDP MMU functions. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <20220921173546.2674386-3-dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
3af15ff47c
commit
991c8047b7
@ -6035,9 +6035,11 @@ int kvm_mmu_init_vm(struct kvm *kvm)
|
||||
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
|
||||
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
|
||||
|
||||
r = kvm_mmu_init_tdp_mmu(kvm);
|
||||
if (r < 0)
|
||||
return r;
|
||||
if (tdp_mmu_enabled) {
|
||||
r = kvm_mmu_init_tdp_mmu(kvm);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
node->track_write = kvm_mmu_pte_write;
|
||||
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
|
||||
@ -6067,7 +6069,8 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
|
||||
|
||||
kvm_page_track_unregister_notifier(kvm, node);
|
||||
|
||||
kvm_mmu_uninit_tdp_mmu(kvm);
|
||||
if (tdp_mmu_enabled)
|
||||
kvm_mmu_uninit_tdp_mmu(kvm);
|
||||
|
||||
mmu_free_vm_memory_caches(kvm);
|
||||
}
|
||||
|
@ -15,9 +15,6 @@ int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
|
||||
{
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
if (!tdp_mmu_enabled)
|
||||
return 0;
|
||||
|
||||
wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
|
||||
if (!wq)
|
||||
return -ENOMEM;
|
||||
@ -42,9 +39,6 @@ static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
|
||||
|
||||
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
|
||||
{
|
||||
if (!tdp_mmu_enabled)
|
||||
return;
|
||||
|
||||
/* Also waits for any queued work items. */
|
||||
destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
|
||||
|
||||
|
@ -7,6 +7,9 @@
|
||||
|
||||
#include "spte.h"
|
||||
|
||||
int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
||||
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
||||
|
||||
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
||||
|
||||
__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
|
||||
@ -68,8 +71,6 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
|
||||
u64 *spte);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
||||
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
||||
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
|
||||
|
||||
static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
|
||||
@ -89,8 +90,6 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
|
||||
return sp && is_tdp_mmu_page(sp) && sp->root_count;
|
||||
}
|
||||
#else
|
||||
static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
|
||||
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
|
||||
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
|
||||
static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user