KVM/ARM fixes for v4.4-rc7
- A series of fixes to the MTRR emulation, tested in the BZ by several users so they should be safe this late - A fix for a division by zero - Two very simple ARM and PPC fixes -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJWeV/6AAoJEL/70l94x66DqzsH/05YnLi2GsX5WeZHMfIUgzgT S/GoIkA7A4E2eXVoGg824MWppSViUzZkWgYFQTG4+KY9WPXzm9z2ij7DIlUHCD6n QfevgQx1kIu1obyhm6bYM2xUdM3f7NCsQgw9bXZObB0ay+b/+GjR9/RbCbx60EO5 K1P+kveK6PFlS9/hc0PLztu6WkPV9BCO1RJUbeAEdnrMbpuQfHC+coR7MHRCiv2V iy8f1CqrGaO5YPm9/3GbdH1xMKew4OZShOxTXwtvUThdrLkks2c8sk6FoLzqkznH LMHVIpkm4mrIgThZG7VqZMXOrWvBtsCt04Vr9MzCM6QetB02b/Uz0xKvMYx2kZQ= =pmYz -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: - A series of fixes to the MTRR emulation, tested in the BZ by several users so they should be safe this late - A fix for a division by zero - Two very simple ARM and PPC fixes * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: Reload pit counters for all channels when restoring state KVM: MTRR: treat memory as writeback if MTRR is disabled in guest CPUID KVM: MTRR: observe maxphyaddr from guest CPUID, not host KVM: MTRR: fix fixed MTRR segment look up KVM: VMX: Fix host initiated access to guest MSR_TSC_AUX KVM: arm/arm64: vgic: Fix kvm_vgic_map_is_active's dist check kvm: x86: move tracepoints outside extended quiescent state KVM: PPC: Book3S HV: Prohibit setting illegal transaction state in MSR
This commit is contained in:
commit
e73a31778a
@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
||||
{
|
||||
/*
|
||||
* Check for illegal transactional state bit combination
|
||||
* and if we find it, force the TS field to a safe state.
|
||||
*/
|
||||
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
|
||||
msr &= ~MSR_TS_MASK;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
kvmppc_end_cede(vcpu);
|
||||
}
|
||||
|
@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
|
||||
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
||||
return best && (best->edx & bit(X86_FEATURE_MTRR));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
|
||||
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
|
||||
}
|
||||
|
||||
static u8 mtrr_disabled_type(void)
|
||||
static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Intel SDM 11.11.2.2: all MTRRs are disabled when
|
||||
* IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
|
||||
* memory type is applied to all of physical memory.
|
||||
*
|
||||
* However, virtual machines can be run with CPUID such that
|
||||
* there are no MTRRs. In that case, the firmware will never
|
||||
* enable MTRRs and it is obviously undesirable to run the
|
||||
* guest entirely with UC memory and we use WB.
|
||||
*/
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
if (guest_cpuid_has_mtrr(vcpu))
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
else
|
||||
return MTRR_TYPE_WRBACK;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
|
||||
|
||||
for (seg = 0; seg < seg_num; seg++) {
|
||||
mtrr_seg = &fixed_seg_table[seg];
|
||||
if (mtrr_seg->start >= addr && addr < mtrr_seg->end)
|
||||
if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
|
||||
return seg;
|
||||
}
|
||||
|
||||
@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
|
||||
*start = range->base & PAGE_MASK;
|
||||
|
||||
mask = range->mask & PAGE_MASK;
|
||||
mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
|
||||
|
||||
/* This cannot overflow because writing to the reserved bits of
|
||||
* variable MTRRs causes a #GP.
|
||||
@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
if (var_mtrr_range_is_valid(cur))
|
||||
list_del(&mtrr_state->var_ranges[index].node);
|
||||
|
||||
/* Extend the mask with all 1 bits to the left, since those
|
||||
* bits must implicitly be 0. The bits are then cleared
|
||||
* when reading them.
|
||||
*/
|
||||
if (!is_mtrr_mask)
|
||||
cur->base = data;
|
||||
else
|
||||
cur->mask = data;
|
||||
cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
|
||||
|
||||
/* add it to the list if it's enabled. */
|
||||
if (var_mtrr_range_is_valid(cur)) {
|
||||
@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
|
||||
else
|
||||
*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
|
||||
|
||||
*pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
}
|
||||
|
||||
if (iter.mtrr_disabled)
|
||||
return mtrr_disabled_type();
|
||||
return mtrr_disabled_type(vcpu);
|
||||
|
||||
/* not contained in any MTRRs. */
|
||||
if (type == -1)
|
||||
|
@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
u32 exit_code = svm->vmcb->control.exit_code;
|
||||
|
||||
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
|
||||
|
||||
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
|
||||
vcpu->arch.cr0 = svm->vmcb->save.cr0;
|
||||
if (npt_enabled)
|
||||
@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
|
||||
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
|
||||
|
||||
trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
|
||||
|
||||
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
|
||||
kvm_before_handle_nmi(&svm->vcpu);
|
||||
|
||||
|
@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
msr_info->data = vcpu->arch.ia32_xss;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
if (!guest_cpuid_has_rdtscp(vcpu))
|
||||
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
|
||||
return 1;
|
||||
/* Otherwise falls through */
|
||||
default:
|
||||
@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
if (!guest_cpuid_has_rdtscp(vcpu))
|
||||
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
|
||||
return 1;
|
||||
/* Check reserved bit, higher 32 bits should be zero */
|
||||
if ((data >> 32) != 0)
|
||||
@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
||||
u32 exit_reason = vmx->exit_reason;
|
||||
u32 vectoring_info = vmx->idt_vectoring_info;
|
||||
|
||||
trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
|
||||
|
||||
/*
|
||||
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
|
||||
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
|
||||
@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vmx->loaded_vmcs->launched = 1;
|
||||
|
||||
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
|
||||
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
|
||||
|
||||
/*
|
||||
* the KVM_REQ_EVENT optimization bit is only on for one entry, and if
|
||||
|
@ -3572,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
|
||||
|
||||
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
|
||||
{
|
||||
int i;
|
||||
mutex_lock(&kvm->arch.vpit->pit_state.lock);
|
||||
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
|
||||
kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
|
||||
for (i = 0; i < 3; i++)
|
||||
kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
|
||||
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
|
||||
return 0;
|
||||
}
|
||||
@ -3593,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
|
||||
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
|
||||
{
|
||||
int start = 0;
|
||||
int i;
|
||||
u32 prev_legacy, cur_legacy;
|
||||
mutex_lock(&kvm->arch.vpit->pit_state.lock);
|
||||
prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
|
||||
@ -3602,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
|
||||
memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
|
||||
sizeof(kvm->arch.vpit->pit_state.channels));
|
||||
kvm->arch.vpit->pit_state.flags = ps->flags;
|
||||
kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
|
||||
for (i = 0; i < 3; i++)
|
||||
kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
|
||||
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
|
||||
return 0;
|
||||
}
|
||||
@ -6515,6 +6519,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
if (req_immediate_exit)
|
||||
smp_send_reschedule(vcpu->cpu);
|
||||
|
||||
trace_kvm_entry(vcpu->vcpu_id);
|
||||
wait_lapic_expire(vcpu);
|
||||
__kvm_guest_enter();
|
||||
|
||||
if (unlikely(vcpu->arch.switch_db_regs)) {
|
||||
@ -6527,8 +6533,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
|
||||
}
|
||||
|
||||
trace_kvm_entry(vcpu->vcpu_id);
|
||||
wait_lapic_expire(vcpu);
|
||||
kvm_x86_ops->run(vcpu);
|
||||
|
||||
/*
|
||||
|
@ -1114,7 +1114,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
|
||||
return true;
|
||||
}
|
||||
|
||||
return dist_active_irq(vcpu);
|
||||
return vgic_irq_is_active(vcpu, map->virt_irq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user