KVM: x86/mmu: Yield in TDU MMU iter even if no SPTES changed
[ Upstream commit 1af4a96025b33587ca953c7ef12a1b20c6e70412 ] Given certain conditions, some TDP MMU functions may not yield reliably / frequently enough. For example, if a paging structure was very large but had few, if any writable entries, wrprot_gfn_range could traverse many entries before finding a writable entry and yielding because the check for yielding only happens after an SPTE is modified. Fix this issue by moving the yield to the beginning of the loop. Fixes: a6a0b05da9f3 ("kvm: x86/mmu: Support dirty logging for the TDP MMU") Reviewed-by: Peter Feiner <pfeiner@google.com> Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20210202185734.1680553-15-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
85f4ff2b06
commit
3c7a184406
@ -462,6 +462,12 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
bool flush_needed = false;
|
||||
|
||||
tdp_root_for_each_pte(iter, root, start, end) {
|
||||
if (can_yield &&
|
||||
tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
|
||||
flush_needed = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!is_shadow_present_pte(iter.old_spte))
|
||||
continue;
|
||||
|
||||
@ -476,9 +482,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
continue;
|
||||
|
||||
tdp_mmu_set_spte(kvm, &iter, 0);
|
||||
|
||||
flush_needed = !(can_yield &&
|
||||
tdp_mmu_iter_cond_resched(kvm, &iter, true));
|
||||
flush_needed = true;
|
||||
}
|
||||
return flush_needed;
|
||||
}
|
||||
@ -838,6 +842,9 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
|
||||
for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
|
||||
min_level, start, end) {
|
||||
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
|
||||
continue;
|
||||
|
||||
if (!is_shadow_present_pte(iter.old_spte) ||
|
||||
!is_last_spte(iter.old_spte, iter.level))
|
||||
continue;
|
||||
@ -846,8 +853,6 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
|
||||
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
|
||||
spte_set = true;
|
||||
|
||||
tdp_mmu_iter_cond_resched(kvm, &iter, false);
|
||||
}
|
||||
return spte_set;
|
||||
}
|
||||
@ -891,6 +896,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
bool spte_set = false;
|
||||
|
||||
tdp_root_for_each_leaf_pte(iter, root, start, end) {
|
||||
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
|
||||
continue;
|
||||
|
||||
if (spte_ad_need_write_protect(iter.old_spte)) {
|
||||
if (is_writable_pte(iter.old_spte))
|
||||
new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
|
||||
@ -905,8 +913,6 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
|
||||
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
|
||||
spte_set = true;
|
||||
|
||||
tdp_mmu_iter_cond_resched(kvm, &iter, false);
|
||||
}
|
||||
return spte_set;
|
||||
}
|
||||
@ -1014,6 +1020,9 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
bool spte_set = false;
|
||||
|
||||
tdp_root_for_each_pte(iter, root, start, end) {
|
||||
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
|
||||
continue;
|
||||
|
||||
if (!is_shadow_present_pte(iter.old_spte))
|
||||
continue;
|
||||
|
||||
@ -1021,8 +1030,6 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
|
||||
tdp_mmu_set_spte(kvm, &iter, new_spte);
|
||||
spte_set = true;
|
||||
|
||||
tdp_mmu_iter_cond_resched(kvm, &iter, false);
|
||||
}
|
||||
|
||||
return spte_set;
|
||||
@ -1063,6 +1070,11 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
|
||||
bool spte_set = false;
|
||||
|
||||
tdp_root_for_each_pte(iter, root, start, end) {
|
||||
if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
|
||||
spte_set = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!is_shadow_present_pte(iter.old_spte) ||
|
||||
!is_last_spte(iter.old_spte, iter.level))
|
||||
continue;
|
||||
@ -1075,7 +1087,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
|
||||
|
||||
tdp_mmu_set_spte(kvm, &iter, 0);
|
||||
|
||||
spte_set = !tdp_mmu_iter_cond_resched(kvm, &iter, true);
|
||||
spte_set = true;
|
||||
}
|
||||
|
||||
if (spte_set)
|
||||
|
Loading…
x
Reference in New Issue
Block a user