KVM: x86: do not set st->preempted when going back to user space
Similar to the Xen path, only change the vCPU's reported state if the vCPU was actually preempted. The reason for KVM's behavior is that for example optimistic spinning might not be a good idea if the guest is doing repeated exits to userspace; however, it is confusing and unlikely to make a difference, because well-tuned guests will hardly ever exit KVM_RUN in the first place. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
@ -4654,7 +4654,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int idx;
|
||||
|
||||
if (vcpu->preempted && !vcpu->arch.guest_state_protected)
|
||||
if (vcpu->preempted) {
|
||||
if (!vcpu->arch.guest_state_protected)
|
||||
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
|
||||
|
||||
/*
|
||||
@ -4667,6 +4668,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
else
|
||||
kvm_steal_time_set_preempted(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
}
|
||||
|
||||
static_call(kvm_x86_vcpu_put)(vcpu);
|
||||
vcpu->arch.last_host_tsc = rdtsc();
|
||||
|
@ -159,7 +159,9 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
|
||||
* behalf of the vCPU. Only if the VMM does actually block
|
||||
* does it need to enter RUNSTATE_blocked.
|
||||
*/
|
||||
if (vcpu->preempted)
|
||||
if (WARN_ON_ONCE(!vcpu->preempted))
|
||||
return;
|
||||
|
||||
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user