KVM: MMU: propagate alloc_workqueue failure

If kvm->arch.tdp_mmu_zap_wq cannot be created, the failure has
to be propagated up to kvm_mmu_init_vm and kvm_arch_init_vm.
kvm_arch_init_vm also has to undo all the initialization, so
group all the MMU initialization code at the beginning and
handle cleaning up of kvm_page_track_init.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2022-03-25 12:42:52 -04:00
parent b1e34d3253
commit a1a39128fa
5 changed files with 32 additions and 17 deletions

View File

@ -1584,7 +1584,7 @@ void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu); void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu);
void kvm_mmu_init_vm(struct kvm *kvm); int kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm); void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu); void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);

View File

@ -5768,17 +5768,24 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
kvm_mmu_zap_all_fast(kvm); kvm_mmu_zap_all_fast(kvm);
} }
void kvm_mmu_init_vm(struct kvm *kvm) int kvm_mmu_init_vm(struct kvm *kvm)
{ {
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
int r;
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock); spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
kvm_mmu_init_tdp_mmu(kvm); r = kvm_mmu_init_tdp_mmu(kvm);
if (r < 0)
return r;
node->track_write = kvm_mmu_pte_write; node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node); kvm_page_track_register_notifier(kvm, node);
return 0;
} }
void kvm_mmu_uninit_vm(struct kvm *kvm) void kvm_mmu_uninit_vm(struct kvm *kvm)

View File

@ -14,21 +14,24 @@ static bool __read_mostly tdp_mmu_enabled = true;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
/* Initializes the TDP MMU for the VM, if enabled. */ /* Initializes the TDP MMU for the VM, if enabled. */
bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{ {
struct workqueue_struct *wq;
if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
return false; return 0;
wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
if (!wq)
return -ENOMEM;
/* This should not be changed for the lifetime of the VM. */ /* This should not be changed for the lifetime of the VM. */
kvm->arch.tdp_mmu_enabled = true; kvm->arch.tdp_mmu_enabled = true;
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
kvm->arch.tdp_mmu_zap_wq = kvm->arch.tdp_mmu_zap_wq = wq;
alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0); return 1;
return true;
} }
/* Arbitrarily returns true so that this may be used in if statements. */ /* Arbitrarily returns true so that this may be used in if statements. */

View File

@ -72,7 +72,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
u64 *spte); u64 *spte);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
bool kvm_mmu_init_tdp_mmu(struct kvm *kvm); int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
@ -93,7 +93,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
return sp && is_tdp_mmu_page(sp) && sp->root_count; return sp && is_tdp_mmu_page(sp) && sp->root_count;
} }
#else #else
static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; } static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; } static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }

View File

@ -11629,12 +11629,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
ret = kvm_page_track_init(kvm); ret = kvm_page_track_init(kvm);
if (ret) if (ret)
return ret; goto out;
ret = kvm_mmu_init_vm(kvm);
if (ret)
goto out_page_track;
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
atomic_set(&kvm->arch.noncoherent_dma_count, 0); atomic_set(&kvm->arch.noncoherent_dma_count, 0);
@ -11666,10 +11667,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_apicv_init(kvm); kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm); kvm_hv_init_vm(kvm);
kvm_mmu_init_vm(kvm);
kvm_xen_init_vm(kvm); kvm_xen_init_vm(kvm);
return static_call(kvm_x86_vm_init)(kvm); return static_call(kvm_x86_vm_init)(kvm);
out_page_track:
kvm_page_track_cleanup(kvm);
out:
return ret;
} }
int kvm_arch_post_init_vm(struct kvm *kvm) int kvm_arch_post_init_vm(struct kvm *kvm)