x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs
The IA32_FLUSH_CMD MSR needs only to be written on VMENTER. Extend add_atomic_switch_msr() with an entry_only parameter to allow storing the MSR only in the guest (ENTRY) MSR array. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
3190709335
commit
989e3992d2
@ -2484,9 +2484,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
|
||||
}
|
||||
|
||||
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
||||
u64 guest_val, u64 host_val)
|
||||
u64 guest_val, u64 host_val, bool entry_only)
|
||||
{
|
||||
int i, j;
|
||||
int i, j = 0;
|
||||
struct msr_autoload *m = &vmx->msr_autoload;
|
||||
|
||||
switch (msr) {
|
||||
@ -2522,7 +2522,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
||||
}
|
||||
|
||||
i = find_msr(&m->guest, msr);
|
||||
j = find_msr(&m->host, msr);
|
||||
if (!entry_only)
|
||||
j = find_msr(&m->host, msr);
|
||||
|
||||
if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
|
||||
printk_once(KERN_WARNING "Not enough msr switch entries. "
|
||||
"Can't add msr %x\n", msr);
|
||||
@ -2532,12 +2534,16 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
||||
i = m->guest.nr++;
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
|
||||
}
|
||||
m->guest.val[i].index = msr;
|
||||
m->guest.val[i].value = guest_val;
|
||||
|
||||
if (entry_only)
|
||||
return;
|
||||
|
||||
if (j < 0) {
|
||||
j = m->host.nr++;
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
|
||||
}
|
||||
m->guest.val[i].index = msr;
|
||||
m->guest.val[i].value = guest_val;
|
||||
m->host.val[j].index = msr;
|
||||
m->host.val[j].value = host_val;
|
||||
}
|
||||
@ -2583,7 +2589,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
|
||||
guest_efer &= ~EFER_LME;
|
||||
if (guest_efer != host_efer)
|
||||
add_atomic_switch_msr(vmx, MSR_EFER,
|
||||
guest_efer, host_efer);
|
||||
guest_efer, host_efer, false);
|
||||
return false;
|
||||
} else {
|
||||
guest_efer &= ~ignore_bits;
|
||||
@ -4037,7 +4043,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
vcpu->arch.ia32_xss = data;
|
||||
if (vcpu->arch.ia32_xss != host_xss)
|
||||
add_atomic_switch_msr(vmx, MSR_IA32_XSS,
|
||||
vcpu->arch.ia32_xss, host_xss);
|
||||
vcpu->arch.ia32_xss, host_xss, false);
|
||||
else
|
||||
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
|
||||
break;
|
||||
@ -10040,7 +10046,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
||||
clear_atomic_switch_msr(vmx, msrs[i].msr);
|
||||
else
|
||||
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
|
||||
msrs[i].host);
|
||||
msrs[i].host, false);
|
||||
}
|
||||
|
||||
static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
|
||||
|
Loading…
x
Reference in New Issue
Block a user