KVM: x86/mmu: Use KVM_REQ_TLB_FLUSH_CURRENT for MMU specific flushes

Flush only the current ASID/context when requesting a TLB flush due to a
change in the current vCPU's MMU to avoid blasting away TLB entries
associated with other ASIDs/contexts, e.g. entries cached for L1 when
a change in L2's MMU requires a flush.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-26-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2020-03-20 14:28:21 -07:00 committed by Paolo Bonzini
parent eeeb4f67a6
commit 8c8560b833

View File

@ -2309,7 +2309,7 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
return; return;
if (local_flush) if (local_flush)
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} }
#ifdef CONFIG_KVM_MMU_AUDIT #ifdef CONFIG_KVM_MMU_AUDIT
@ -2516,11 +2516,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
break; break;
WARN_ON(!list_empty(&invalid_list)); WARN_ON(!list_empty(&invalid_list));
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} }
if (sp->unsync_children) if (sp->unsync_children)
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
__clear_sp_write_flooding_count(sp); __clear_sp_write_flooding_count(sp);
trace_kvm_mmu_get_page(sp, false); trace_kvm_mmu_get_page(sp, false);
@ -3121,7 +3121,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
if (write_fault) if (write_fault)
ret = RET_PF_EMULATE; ret = RET_PF_EMULATE;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} }
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
@ -4310,7 +4310,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
if (!skip_tlb_flush) { if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} }
/* /*
@ -5179,7 +5179,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
if (r) if (r)
goto out; goto out;
kvm_mmu_load_pgd(vcpu); kvm_mmu_load_pgd(vcpu);
kvm_x86_ops.tlb_flush_all(vcpu); kvm_x86_ops.tlb_flush_current(vcpu);
out: out:
return r; return r;
} }