KVM: x86: use guest_exit_irqoff

This gains a few clock cycles per vmexit.  On Intel there is no need
anymore to enable the interrupts in vmx_handle_external_intr, since
we are using the "acknowledge interrupt on exit" feature.  AMD
needs to do that, and must be careful to avoid the interrupt shadow.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2016-06-15 15:23:11 +02:00
parent 91fa0f8e9e
commit f2485b3e0c
3 changed files with 9 additions and 12 deletions

View File

@ -4935,6 +4935,12 @@ out:
static void svm_handle_external_intr(struct kvm_vcpu *vcpu) static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
{ {
local_irq_enable(); local_irq_enable();
/*
* We must have an instruction with interrupts enabled, so
* the timer interrupt isn't delayed by the interrupt shadow.
*/
asm("nop");
local_irq_disable();
} }
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)

View File

@ -8574,7 +8574,6 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
"push %[sp]\n\t" "push %[sp]\n\t"
#endif #endif
"pushf\n\t" "pushf\n\t"
"orl $0x200, (%%" _ASM_SP ")\n\t"
__ASM_SIZE(push) " $%c[cs]\n\t" __ASM_SIZE(push) " $%c[cs]\n\t"
"call *%[entry]\n\t" "call *%[entry]\n\t"
: :
@ -8587,8 +8586,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
[ss]"i"(__KERNEL_DS), [ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS) [cs]"i"(__KERNEL_CS)
); );
} else }
local_irq_enable();
} }
static bool vmx_has_high_real_mode_segbase(void) static bool vmx_has_high_real_mode_segbase(void)

View File

@ -6709,16 +6709,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
++vcpu->stat.exits; ++vcpu->stat.exits;
/* guest_exit_irqoff();
* We must have an instruction between local_irq_enable() and
* kvm_guest_exit(), so the timer interrupt isn't delayed by
* the interrupt shadow. The stat.exits increment will do nicely.
* But we need to prevent reordering, hence this barrier():
*/
barrier();
guest_exit();
local_irq_enable();
preempt_enable(); preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);