KVM: VMX: Fix host msr corruption with preemption enabled

Switching msrs can occur either synchronously as a result of calls to
the msr management functions (usually in response to the guest touching
virtualized msrs), or asynchronously when preempting a kvm thread that has
guest state loaded.  If we're unlucky enough to have the two at the same
time, host msrs are corrupted and the machine goes kaput on the next syscall.

Most easily triggered by Windows Server 2008, as it does a lot of msr
switching during bootup.

Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Avi Kivity 2008-06-24 11:48:49 +03:00
parent 4fa6b9c5dc
commit a9b21b6229

View File

@ -566,7 +566,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
load_transition_efer(vmx); load_transition_efer(vmx);
} }
static void vmx_load_host_state(struct vcpu_vmx *vmx) static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{ {
unsigned long flags; unsigned long flags;
@ -596,6 +596,13 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
reload_host_efer(vmx); reload_host_efer(vmx);
} }
static void vmx_load_host_state(struct vcpu_vmx *vmx)
{
preempt_disable();
__vmx_load_host_state(vmx);
preempt_enable();
}
/* /*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes * Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken. * vcpu mutex is already taken.
@ -654,7 +661,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void vmx_vcpu_put(struct kvm_vcpu *vcpu) static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{ {
vmx_load_host_state(to_vmx(vcpu)); __vmx_load_host_state(to_vmx(vcpu));
} }
static void vmx_fpu_activate(struct kvm_vcpu *vcpu) static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
@ -884,11 +891,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
switch (msr_index) { switch (msr_index) {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
case MSR_EFER: case MSR_EFER:
vmx_load_host_state(vmx);
ret = kvm_set_msr_common(vcpu, msr_index, data); ret = kvm_set_msr_common(vcpu, msr_index, data);
if (vmx->host_state.loaded) {
reload_host_efer(vmx);
load_transition_efer(vmx);
}
break; break;
case MSR_FS_BASE: case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data); vmcs_writel(GUEST_FS_BASE, data);
@ -910,11 +914,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
guest_write_tsc(data); guest_write_tsc(data);
break; break;
default: default:
vmx_load_host_state(vmx);
msr = find_msr_entry(vmx, msr_index); msr = find_msr_entry(vmx, msr_index);
if (msr) { if (msr) {
msr->data = data; msr->data = data;
if (vmx->host_state.loaded)
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
break; break;
} }
ret = kvm_set_msr_common(vcpu, msr_index, data); ret = kvm_set_msr_common(vcpu, msr_index, data);