KVM: x86: Move SVM's APICv sanity check to common x86
Move SVM's assertion that vCPU's APICv state is consistent with its VM's state out of svm_vcpu_run() and into x86's common inner run loop. The assertion and underlying logic is not unique to SVM, it's just that SVM has more inhibiting conditions and thus is more likely to run headfirst into any KVM bugs. Add relevant comments to document exactly why the update path has unusual ordering between the update the kick, why said ordering is safe, and also the basic rules behind the assertion in the run loop. Cc: Maxim Levitsky <mlevitsk@redhat.com> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20211022004927.1448382-3-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9b4eb77099
commit
ee49a89329
@ -3864,8 +3864,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
pre_svm_run(vcpu);
|
pre_svm_run(vcpu);
|
||||||
|
|
||||||
WARN_ON_ONCE(kvm_apicv_activated(vcpu->kvm) != kvm_vcpu_apicv_active(vcpu));
|
|
||||||
|
|
||||||
sync_lapic_to_cr8(vcpu);
|
sync_lapic_to_cr8(vcpu);
|
||||||
|
|
||||||
if (unlikely(svm->asid != svm->vmcb->control.asid)) {
|
if (unlikely(svm->asid != svm->vmcb->control.asid)) {
|
||||||
|
@ -9481,6 +9481,18 @@ void __kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
|
|||||||
|
|
||||||
if (!!old != !!new) {
|
if (!!old != !!new) {
|
||||||
trace_kvm_apicv_update_request(activate, bit);
|
trace_kvm_apicv_update_request(activate, bit);
|
||||||
|
/*
|
||||||
|
* Kick all vCPUs before setting apicv_inhibit_reasons to avoid
|
||||||
|
* false positives in the sanity check WARN in svm_vcpu_run().
|
||||||
|
* This task will wait for all vCPUs to ack the kick IRQ before
|
||||||
|
* updating apicv_inhibit_reasons, and all other vCPUs will
|
||||||
|
* block on acquiring apicv_update_lock so that vCPUs can't
|
||||||
|
* redo svm_vcpu_run() without seeing the new inhibit state.
|
||||||
|
*
|
||||||
|
* Note, holding apicv_update_lock and taking it in the read
|
||||||
|
* side (handling the request) also prevents other vCPUs from
|
||||||
|
* servicing the request with a stale apicv_inhibit_reasons.
|
||||||
|
*/
|
||||||
kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
|
kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
|
||||||
kvm->arch.apicv_inhibit_reasons = new;
|
kvm->arch.apicv_inhibit_reasons = new;
|
||||||
if (new) {
|
if (new) {
|
||||||
@ -9815,6 +9827,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
/*
|
||||||
|
* Assert that vCPU vs. VM APICv state is consistent. An APICv
|
||||||
|
* update must kick and wait for all vCPUs before toggling the
|
||||||
|
* per-VM state, and responsing vCPUs must wait for the update
|
||||||
|
* to complete before servicing KVM_REQ_APICV_UPDATE.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(kvm_apicv_activated(vcpu->kvm) != kvm_vcpu_apicv_active(vcpu));
|
||||||
|
|
||||||
exit_fastpath = static_call(kvm_x86_run)(vcpu);
|
exit_fastpath = static_call(kvm_x86_run)(vcpu);
|
||||||
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
|
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
|
||||||
break;
|
break;
|
||||||
|
Loading…
Reference in New Issue
Block a user