KVM: x86/mmu: Don't rebuild page when the page is synced and no tlb flushing is required
Before Commit c3e5e415bc1e6 ("KVM: X86: Change kvm_sync_page() to return true when remote flush is needed"), the return value of kvm_sync_page() indicates whether the page is synced, and kvm_mmu_get_page() would rebuild page when the sync fails. But now, kvm_sync_page() returns false when the page is synced and no tlb flushing is required, which leads to rebuild page in kvm_mmu_get_page(). So return the return value of mmu->sync_page() directly and check it in kvm_mmu_get_page(). If the sync fails, the page will be zapped and the invalid_list is not empty, so set flush as true is accepted in mmu_sync_children(). Cc: stable@vger.kernel.org Fixes: c3e5e415bc1e6 ("KVM: X86: Change kvm_sync_page() to return true when remote flush is needed") Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com> Acked-by: Lai Jiangshan <jiangshanlai@gmail.com> Message-Id: <0dabeeb789f57b0d793f85d073893063e692032d.1647336064.git.houwenlong.hwl@antgroup.com> [mmu_sync_children should not flush if the page is zapped. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
945024d764
commit
8d5678a766
@ -1866,17 +1866,14 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
|
||||
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
|
||||
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
|
||||
|
||||
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
struct list_head *invalid_list)
|
||||
{
|
||||
int ret = vcpu->arch.mmu->sync_page(vcpu, sp);
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret < 0)
|
||||
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
|
||||
return false;
|
||||
}
|
||||
|
||||
return !!ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
|
||||
@ -1998,7 +1995,7 @@ static int mmu_sync_children(struct kvm_vcpu *vcpu,
|
||||
|
||||
for_each_sp(pages, sp, parents, i) {
|
||||
kvm_unlink_unsync_page(vcpu->kvm, sp);
|
||||
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
|
||||
flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
|
||||
mmu_pages_clear_parents(&parents);
|
||||
}
|
||||
if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
|
||||
@ -2039,6 +2036,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
struct hlist_head *sp_list;
|
||||
unsigned quadrant;
|
||||
struct kvm_mmu_page *sp;
|
||||
int ret;
|
||||
int collisions = 0;
|
||||
LIST_HEAD(invalid_list);
|
||||
|
||||
@ -2091,11 +2089,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
* If the sync fails, the page is zapped. If so, break
|
||||
* in order to rebuild it.
|
||||
*/
|
||||
if (!kvm_sync_page(vcpu, sp, &invalid_list))
|
||||
ret = kvm_sync_page(vcpu, sp, &invalid_list);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
WARN_ON(!list_empty(&invalid_list));
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
if (ret > 0)
|
||||
kvm_flush_remote_tlbs(vcpu->kvm);
|
||||
}
|
||||
|
||||
__clear_sp_write_flooding_count(sp);
|
||||
|
Loading…
x
Reference in New Issue
Block a user