KVM: cleanup allocation of rmaps and page tracking data
Unify the flags for rmaps and page tracking data, using a single flag in struct kvm_arch and a single loop to go over all the address spaces and memslots. This avoids code duplication between alloc_all_memslots_rmaps and kvm_page_track_enable_mmu_write_tracking. Signed-off-by: David Stevens <stevensd@chromium.org> [This patch is the delta between David's v2 and v3, with conflicts fixed and my own commit message. - Paolo] Co-developed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
3f9808cac0
commit
1e76a3ce0d
@ -1212,18 +1212,11 @@ struct kvm_arch {
|
|||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If set, rmaps have been allocated for all memslots and should be
|
* If set, at least one shadow root has been allocated. This flag
|
||||||
* allocated for any newly created or modified memslots.
|
* is used as one input when determining whether certain memslot
|
||||||
|
* related allocations are necessary.
|
||||||
*/
|
*/
|
||||||
bool memslots_have_rmaps;
|
bool shadow_root_allocated;
|
||||||
|
|
||||||
/*
|
|
||||||
* Set when the KVM mmu needs guest write access page tracking. If
|
|
||||||
* set, the necessary gfn_track arrays have been allocated for
|
|
||||||
* all memslots and should be allocated for any newly created or
|
|
||||||
* modified memslots.
|
|
||||||
*/
|
|
||||||
bool memslots_mmu_write_tracking;
|
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_HYPERV)
|
#if IS_ENABLED(CONFIG_HYPERV)
|
||||||
hpa_t hv_root_tdp;
|
hpa_t hv_root_tdp;
|
||||||
@ -1946,7 +1939,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
|
|||||||
|
|
||||||
int kvm_cpu_dirty_log_size(void);
|
int kvm_cpu_dirty_log_size(void);
|
||||||
|
|
||||||
int alloc_all_memslots_rmaps(struct kvm *kvm);
|
int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
|
||||||
|
|
||||||
#define KVM_CLOCK_VALID_FLAGS \
|
#define KVM_CLOCK_VALID_FLAGS \
|
||||||
(KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
|
(KVM_CLOCK_TSC_STABLE | KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
|
||||||
|
@ -49,7 +49,8 @@ struct kvm_page_track_notifier_node {
|
|||||||
int kvm_page_track_init(struct kvm *kvm);
|
int kvm_page_track_init(struct kvm *kvm);
|
||||||
void kvm_page_track_cleanup(struct kvm *kvm);
|
void kvm_page_track_cleanup(struct kvm *kvm);
|
||||||
|
|
||||||
int kvm_page_track_enable_mmu_write_tracking(struct kvm *kvm);
|
bool kvm_page_track_write_tracking_enabled(struct kvm *kvm);
|
||||||
|
int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot);
|
||||||
|
|
||||||
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
|
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
|
||||||
int kvm_page_track_create_memslot(struct kvm *kvm,
|
int kvm_page_track_create_memslot(struct kvm *kvm,
|
||||||
|
@ -304,14 +304,26 @@ int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
|
|||||||
int kvm_mmu_post_init_vm(struct kvm *kvm);
|
int kvm_mmu_post_init_vm(struct kvm *kvm);
|
||||||
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
|
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
|
||||||
|
|
||||||
static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
|
static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Read memslot_have_rmaps before rmap pointers. Hence, threads reading
|
* Read shadow_root_allocated before related pointers. Hence, threads
|
||||||
* memslots_have_rmaps in any lock context are guaranteed to see the
|
* reading shadow_root_allocated in any lock context are guaranteed to
|
||||||
* pointers. Pairs with smp_store_release in alloc_all_memslots_rmaps.
|
* see the pointers. Pairs with smp_store_release in
|
||||||
|
* mmu_first_shadow_root_alloc.
|
||||||
*/
|
*/
|
||||||
return smp_load_acquire(&kvm->arch.memslots_have_rmaps);
|
return smp_load_acquire(&kvm->arch.shadow_root_allocated);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
|
||||||
|
#else
|
||||||
|
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
||||||
|
@ -3397,6 +3397,67 @@ out_unlock:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mmu_first_shadow_root_alloc(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
struct kvm_memslots *slots;
|
||||||
|
struct kvm_memory_slot *slot;
|
||||||
|
int r = 0, i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if this is the first shadow root being allocated before
|
||||||
|
* taking the lock.
|
||||||
|
*/
|
||||||
|
if (kvm_shadow_root_allocated(kvm))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
mutex_lock(&kvm->slots_arch_lock);
|
||||||
|
|
||||||
|
/* Recheck, under the lock, whether this is the first shadow root. */
|
||||||
|
if (kvm_shadow_root_allocated(kvm))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if anything actually needs to be allocated, e.g. all metadata
|
||||||
|
* will be allocated upfront if TDP is disabled.
|
||||||
|
*/
|
||||||
|
if (kvm_memslots_have_rmaps(kvm) &&
|
||||||
|
kvm_page_track_write_tracking_enabled(kvm))
|
||||||
|
goto out_success;
|
||||||
|
|
||||||
|
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
|
||||||
|
slots = __kvm_memslots(kvm, i);
|
||||||
|
kvm_for_each_memslot(slot, slots) {
|
||||||
|
/*
|
||||||
|
* Both of these functions are no-ops if the target is
|
||||||
|
* already allocated, so unconditionally calling both
|
||||||
|
* is safe. Intentionally do NOT free allocations on
|
||||||
|
* failure to avoid having to track which allocations
|
||||||
|
* were made now versus when the memslot was created.
|
||||||
|
* The metadata is guaranteed to be freed when the slot
|
||||||
|
* is freed, and will be kept/used if userspace retries
|
||||||
|
* KVM_RUN instead of killing the VM.
|
||||||
|
*/
|
||||||
|
r = memslot_rmap_alloc(slot, slot->npages);
|
||||||
|
if (r)
|
||||||
|
goto out_unlock;
|
||||||
|
r = kvm_page_track_write_tracking_alloc(slot);
|
||||||
|
if (r)
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that shadow_root_allocated becomes true strictly after
|
||||||
|
* all the related pointers are set.
|
||||||
|
*/
|
||||||
|
out_success:
|
||||||
|
smp_store_release(&kvm->arch.shadow_root_allocated, true);
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&kvm->slots_arch_lock);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
||||||
@ -3427,11 +3488,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r = alloc_all_memslots_rmaps(vcpu->kvm);
|
r = mmu_first_shadow_root_alloc(vcpu->kvm);
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = kvm_page_track_enable_mmu_write_tracking(vcpu->kvm);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -5604,16 +5661,7 @@ void kvm_mmu_init_vm(struct kvm *kvm)
|
|||||||
|
|
||||||
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
|
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
|
||||||
|
|
||||||
if (!kvm_mmu_init_tdp_mmu(kvm))
|
kvm_mmu_init_tdp_mmu(kvm);
|
||||||
/*
|
|
||||||
* No smp_load/store wrappers needed here as we are in
|
|
||||||
* VM init and there cannot be any memslots / other threads
|
|
||||||
* accessing this struct kvm yet.
|
|
||||||
*/
|
|
||||||
kvm->arch.memslots_have_rmaps = true;
|
|
||||||
|
|
||||||
if (!tdp_enabled)
|
|
||||||
kvm->arch.memslots_mmu_write_tracking = true;
|
|
||||||
|
|
||||||
node->track_write = kvm_mmu_pte_write;
|
node->track_write = kvm_mmu_pte_write;
|
||||||
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
|
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
|
||||||
|
@ -19,14 +19,10 @@
|
|||||||
#include "mmu.h"
|
#include "mmu.h"
|
||||||
#include "mmu_internal.h"
|
#include "mmu_internal.h"
|
||||||
|
|
||||||
static bool write_tracking_enabled(struct kvm *kvm)
|
bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* Read memslots_mmu_write_tracking before gfn_track pointers. Pairs
|
|
||||||
* with smp_store_release in kvm_page_track_enable_mmu_write_tracking.
|
|
||||||
*/
|
|
||||||
return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) ||
|
return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) ||
|
||||||
smp_load_acquire(&kvm->arch.memslots_mmu_write_tracking);
|
!tdp_enabled || kvm_shadow_root_allocated(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
|
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
|
||||||
@ -46,7 +42,8 @@ int kvm_page_track_create_memslot(struct kvm *kvm,
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
|
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
|
||||||
if (i == KVM_PAGE_TRACK_WRITE && !write_tracking_enabled(kvm))
|
if (i == KVM_PAGE_TRACK_WRITE &&
|
||||||
|
!kvm_page_track_write_tracking_enabled(kvm))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
slot->arch.gfn_track[i] =
|
slot->arch.gfn_track[i] =
|
||||||
@ -71,43 +68,18 @@ static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_page_track_enable_mmu_write_tracking(struct kvm *kvm)
|
int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot)
|
||||||
{
|
{
|
||||||
struct kvm_memslots *slots;
|
unsigned short *gfn_track;
|
||||||
struct kvm_memory_slot *slot;
|
|
||||||
unsigned short **gfn_track;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (write_tracking_enabled(kvm))
|
if (slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE])
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&kvm->slots_arch_lock);
|
gfn_track = kvcalloc(slot->npages, sizeof(*gfn_track), GFP_KERNEL_ACCOUNT);
|
||||||
|
if (gfn_track == NULL)
|
||||||
if (write_tracking_enabled(kvm)) {
|
|
||||||
mutex_unlock(&kvm->slots_arch_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
|
|
||||||
slots = __kvm_memslots(kvm, i);
|
|
||||||
kvm_for_each_memslot(slot, slots) {
|
|
||||||
gfn_track = slot->arch.gfn_track + KVM_PAGE_TRACK_WRITE;
|
|
||||||
*gfn_track = kvcalloc(slot->npages, sizeof(*gfn_track),
|
|
||||||
GFP_KERNEL_ACCOUNT);
|
|
||||||
if (*gfn_track == NULL) {
|
|
||||||
mutex_unlock(&kvm->slots_arch_lock);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ensure that memslots_mmu_write_tracking becomes true strictly
|
|
||||||
* after all the pointers are set.
|
|
||||||
*/
|
|
||||||
smp_store_release(&kvm->arch.memslots_mmu_write_tracking, true);
|
|
||||||
mutex_unlock(&kvm->slots_arch_lock);
|
|
||||||
|
|
||||||
|
slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE] = gfn_track;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,7 +119,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
|
if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
|
||||||
!write_tracking_enabled(kvm)))
|
!kvm_page_track_write_tracking_enabled(kvm)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
update_gfn_track(slot, gfn, mode, 1);
|
update_gfn_track(slot, gfn, mode, 1);
|
||||||
@ -185,7 +157,7 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
|
if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
|
||||||
!write_tracking_enabled(kvm)))
|
!kvm_page_track_write_tracking_enabled(kvm)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
update_gfn_track(slot, gfn, mode, -1);
|
update_gfn_track(slot, gfn, mode, -1);
|
||||||
@ -213,7 +185,8 @@ bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu,
|
|||||||
if (!slot)
|
if (!slot)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (mode == KVM_PAGE_TRACK_WRITE && !write_tracking_enabled(vcpu->kvm))
|
if (mode == KVM_PAGE_TRACK_WRITE &&
|
||||||
|
!kvm_page_track_write_tracking_enabled(vcpu->kvm))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
|
index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
|
||||||
|
@ -90,7 +90,6 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
|
|||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
||||||
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
||||||
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
|
|
||||||
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
|
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
|
||||||
|
|
||||||
static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
|
static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
|
||||||
@ -112,7 +111,6 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
|
|||||||
#else
|
#else
|
||||||
static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
|
static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
|
||||||
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
|
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
|
||||||
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
|
|
||||||
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
|
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
|
||||||
static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
|
static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
|
||||||
#endif
|
#endif
|
||||||
|
@ -11514,8 +11514,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|||||||
kvm_page_track_free_memslot(slot);
|
kvm_page_track_free_memslot(slot);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
|
int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
|
||||||
unsigned long npages)
|
|
||||||
{
|
{
|
||||||
const int sz = sizeof(*slot->arch.rmap[0]);
|
const int sz = sizeof(*slot->arch.rmap[0]);
|
||||||
int i;
|
int i;
|
||||||
@ -11537,50 +11536,6 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int alloc_all_memslots_rmaps(struct kvm *kvm)
|
|
||||||
{
|
|
||||||
struct kvm_memslots *slots;
|
|
||||||
struct kvm_memory_slot *slot;
|
|
||||||
int r, i;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if memslots alreday have rmaps early before acquiring
|
|
||||||
* the slots_arch_lock below.
|
|
||||||
*/
|
|
||||||
if (kvm_memslots_have_rmaps(kvm))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
mutex_lock(&kvm->slots_arch_lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Read memslots_have_rmaps again, under the slots arch lock,
|
|
||||||
* before allocating the rmaps
|
|
||||||
*/
|
|
||||||
if (kvm_memslots_have_rmaps(kvm)) {
|
|
||||||
mutex_unlock(&kvm->slots_arch_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
|
|
||||||
slots = __kvm_memslots(kvm, i);
|
|
||||||
kvm_for_each_memslot(slot, slots) {
|
|
||||||
r = memslot_rmap_alloc(slot, slot->npages);
|
|
||||||
if (r) {
|
|
||||||
mutex_unlock(&kvm->slots_arch_lock);
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ensure that memslots_have_rmaps becomes true strictly after
|
|
||||||
* all the rmap pointers are set.
|
|
||||||
*/
|
|
||||||
smp_store_release(&kvm->arch.memslots_have_rmaps, true);
|
|
||||||
mutex_unlock(&kvm->slots_arch_lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int kvm_alloc_memslot_metadata(struct kvm *kvm,
|
static int kvm_alloc_memslot_metadata(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *slot,
|
struct kvm_memory_slot *slot,
|
||||||
unsigned long npages)
|
unsigned long npages)
|
||||||
|
Loading…
Reference in New Issue
Block a user