ARM:
* Read HW interrupt pending state from the HW x86: * Don't truncate the performance event mask on AMD * Fix Xen runstate updates to be atomic when preempting vCPU * Fix for AMD AVIC interrupt injection race * Several other AMD fixes -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmIL4G4UHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroNkQQf/Z75dnmdRl8sHHnGjwH2IhWHwAg+h 5O+mJphYt4cvVMexP5dj69b7mHtKMeg/0TxPvPfwCLlhzKkW1gQFwwBAq/YuBCKw cnMuVPeCSWo6znpS+jYUF4FAJgPKkzfFR9UwYAR5UexSWyOwU8rLcvSxj8vJjO/l sIke+f767Ks2KgcTMIudObg+vDcgnQXI8n8ztI7hF1WJKYHdTKFkYN7BYRxQ9BW6 4fq51218DhRMv6S7so5dhYC473f+D0t8b5S/Mygur/x6mzsdQJKeOmi8aWGoDa/B Bmse+X0lHoOkdXaxqpBgQCYeyrXohNcXx7cpGRVFnS45Jf7MLG4OfVHWNQ== =kD2l -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "ARM: - Read HW interrupt pending state from the HW x86: - Don't truncate the performance event mask on AMD - Fix Xen runstate updates to be atomic when preempting vCPU - Fix for AMD AVIC interrupt injection race - Several other AMD fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86/pmu: Use AMD64_RAW_EVENT_MASK for PERF_TYPE_RAW KVM: x86/pmu: Don't truncate the PerfEvtSeln MSR when creating a perf event KVM: SVM: fix race between interrupt delivery and AVIC inhibition KVM: SVM: set IRR in svm_deliver_interrupt KVM: SVM: extract avic_ring_doorbell selftests: kvm: Remove absent target file KVM: arm64: vgic: Read HW interrupt pending state from the HW KVM: x86/xen: Fix runstate updates to be atomic when preempting vCPU KVM: x86: SVM: move avic definitions from AMD's spec to svm.h KVM: x86: lapic: don't touch irr_pending in kvm_apic_update_apicv when inhibiting it KVM: x86: nSVM: deal with L1 hypervisor that intercepts interrupts but lets L2 control them KVM: x86: nSVM: expose clean bit support to the guest KVM: x86: nSVM/nVMX: set nested_run_pending on VM entry which is a result of RSM KVM: x86: nSVM: mark vmcb01 as dirty when restoring SMM saved state KVM: x86: nSVM: fix potential NULL derefernce on nested migration KVM: x86: SVM: don't passthrough SMAP/SMEP/PKE bits in !NPT && !gCR0.PG case Revert "svm: Add warning message for AVIC IPI invalid target"
This commit is contained in:
commit
c5d9ae265b
@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
IRQCHIP_STATE_PENDING,
|
||||
&val);
|
||||
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
|
||||
} else if (vgic_irq_is_mapped_level(irq)) {
|
||||
val = vgic_get_phys_line_level(irq);
|
||||
} else {
|
||||
val = irq_is_pending(irq);
|
||||
}
|
||||
|
@ -476,6 +476,7 @@
|
||||
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
|
||||
#define MSR_AMD64_IBSOPDATA4 0xc001103d
|
||||
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
|
||||
#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
|
||||
#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
|
||||
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
|
||||
#define MSR_AMD64_SEV 0xc0010131
|
||||
|
@ -220,6 +220,42 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
|
||||
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
|
||||
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
|
||||
|
||||
|
||||
/* AVIC */
|
||||
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
|
||||
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
|
||||
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
|
||||
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
|
||||
#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF)
|
||||
|
||||
#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF)
|
||||
|
||||
#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
|
||||
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
|
||||
#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
|
||||
|
||||
enum avic_ipi_failure_cause {
|
||||
AVIC_IPI_FAILURE_INVALID_INT_TYPE,
|
||||
AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
|
||||
AVIC_IPI_FAILURE_INVALID_TARGET,
|
||||
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* 0xff is broadcast, so the max index allowed for physical APIC ID
|
||||
* table is 0xfe. APIC IDs above 0xff are reserved.
|
||||
*/
|
||||
#define AVIC_MAX_PHYSICAL_ID_COUNT 0xff
|
||||
|
||||
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
|
||||
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
|
||||
|
||||
|
||||
struct vmcb_seg {
|
||||
u16 selector;
|
||||
u16 attrib;
|
||||
|
@ -2306,7 +2306,12 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
|
||||
apic->irr_pending = true;
|
||||
apic->isr_count = 1;
|
||||
} else {
|
||||
apic->irr_pending = (apic_search_irr(apic) != -1);
|
||||
/*
|
||||
* Don't clear irr_pending, searching the IRR can race with
|
||||
* updates from the CPU as APICv is still active from hardware's
|
||||
* perspective. The flag will be cleared as appropriate when
|
||||
* KVM injects the interrupt.
|
||||
*/
|
||||
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
|
||||
}
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
|
||||
}
|
||||
|
||||
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
|
||||
unsigned config, bool exclude_user,
|
||||
u64 config, bool exclude_user,
|
||||
bool exclude_kernel, bool intr,
|
||||
bool in_tx, bool in_tx_cp)
|
||||
{
|
||||
@ -181,7 +181,8 @@ static int cmp_u64(const void *a, const void *b)
|
||||
|
||||
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
{
|
||||
unsigned config, type = PERF_TYPE_RAW;
|
||||
u64 config;
|
||||
u32 type = PERF_TYPE_RAW;
|
||||
struct kvm *kvm = pmc->vcpu->kvm;
|
||||
struct kvm_pmu_event_filter *filter;
|
||||
bool allow_event = true;
|
||||
@ -220,7 +221,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
}
|
||||
|
||||
if (type == PERF_TYPE_RAW)
|
||||
config = eventsel & X86_RAW_EVENT_MASK;
|
||||
config = eventsel & AMD64_RAW_EVENT_MASK;
|
||||
|
||||
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
|
||||
return;
|
||||
|
@ -27,20 +27,6 @@
|
||||
#include "irq.h"
|
||||
#include "svm.h"
|
||||
|
||||
#define SVM_AVIC_DOORBELL 0xc001011b
|
||||
|
||||
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
|
||||
|
||||
/*
|
||||
* 0xff is broadcast, so the max index allowed for physical APIC ID
|
||||
* table is 0xfe. APIC IDs above 0xff are reserved.
|
||||
*/
|
||||
#define AVIC_MAX_PHYSICAL_ID_COUNT 255
|
||||
|
||||
#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
|
||||
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
|
||||
#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
|
||||
|
||||
/* AVIC GATAG is encoded using VM and VCPU IDs */
|
||||
#define AVIC_VCPU_ID_BITS 8
|
||||
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
|
||||
@ -73,12 +59,6 @@ struct amd_svm_iommu_ir {
|
||||
void *data; /* Storing pointer to struct amd_ir_data */
|
||||
};
|
||||
|
||||
enum avic_ipi_failure_cause {
|
||||
AVIC_IPI_FAILURE_INVALID_INT_TYPE,
|
||||
AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
|
||||
AVIC_IPI_FAILURE_INVALID_TARGET,
|
||||
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
|
||||
};
|
||||
|
||||
/* Note:
|
||||
* This function is called from IOMMU driver to notify
|
||||
@ -289,6 +269,22 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void avic_ring_doorbell(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Note, the vCPU could get migrated to a different pCPU at any point,
|
||||
* which could result in signalling the wrong/previous pCPU. But if
|
||||
* that happens the vCPU is guaranteed to do a VMRUN (after being
|
||||
* migrated) and thus will process pending interrupts, i.e. a doorbell
|
||||
* is not needed (and the spurious one is harmless).
|
||||
*/
|
||||
int cpu = READ_ONCE(vcpu->cpu);
|
||||
|
||||
if (cpu != get_cpu())
|
||||
wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
|
||||
u32 icrl, u32 icrh)
|
||||
{
|
||||
@ -304,8 +300,13 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
|
||||
GET_APIC_DEST_FIELD(icrh),
|
||||
icrl & APIC_DEST_MASK))
|
||||
kvm_vcpu_wake_up(vcpu);
|
||||
icrl & APIC_DEST_MASK)) {
|
||||
vcpu->arch.apic->irr_pending = true;
|
||||
svm_complete_interrupt_delivery(vcpu,
|
||||
icrl & APIC_MODE_MASK,
|
||||
icrl & APIC_INT_LEVELTRIG,
|
||||
icrl & APIC_VECTOR_MASK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,8 +346,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
|
||||
avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
|
||||
break;
|
||||
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
||||
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
|
||||
index, vcpu->vcpu_id, icrh, icrl);
|
||||
break;
|
||||
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
||||
WARN_ONCE(1, "Invalid backing page\n");
|
||||
@ -669,52 +668,6 @@ void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||
return;
|
||||
}
|
||||
|
||||
int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
|
||||
{
|
||||
if (!vcpu->arch.apicv_active)
|
||||
return -1;
|
||||
|
||||
kvm_lapic_set_irr(vec, vcpu->arch.apic);
|
||||
|
||||
/*
|
||||
* Pairs with the smp_mb_*() after setting vcpu->guest_mode in
|
||||
* vcpu_enter_guest() to ensure the write to the vIRR is ordered before
|
||||
* the read of guest_mode, which guarantees that either VMRUN will see
|
||||
* and process the new vIRR entry, or that the below code will signal
|
||||
* the doorbell if the vCPU is already running in the guest.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/*
|
||||
* Signal the doorbell to tell hardware to inject the IRQ if the vCPU
|
||||
* is in the guest. If the vCPU is not in the guest, hardware will
|
||||
* automatically process AVIC interrupts at VMRUN.
|
||||
*/
|
||||
if (vcpu->mode == IN_GUEST_MODE) {
|
||||
int cpu = READ_ONCE(vcpu->cpu);
|
||||
|
||||
/*
|
||||
* Note, the vCPU could get migrated to a different pCPU at any
|
||||
* point, which could result in signalling the wrong/previous
|
||||
* pCPU. But if that happens the vCPU is guaranteed to do a
|
||||
* VMRUN (after being migrated) and thus will process pending
|
||||
* interrupts, i.e. a doorbell is not needed (and the spurious
|
||||
* one is harmless).
|
||||
*/
|
||||
if (cpu != get_cpu())
|
||||
wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
|
||||
put_cpu();
|
||||
} else {
|
||||
/*
|
||||
* Wake the vCPU if it was blocking. KVM will then detect the
|
||||
* pending IRQ when checking if the vCPU has a wake event.
|
||||
*/
|
||||
kvm_vcpu_wake_up(vcpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
|
@ -1457,18 +1457,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
||||
!__nested_vmcb_check_save(vcpu, &save_cached))
|
||||
goto out_free;
|
||||
|
||||
/*
|
||||
* While the nested guest CR3 is already checked and set by
|
||||
* KVM_SET_SREGS, it was set when nested state was yet loaded,
|
||||
* thus MMU might not be initialized correctly.
|
||||
* Set it again to fix this.
|
||||
*/
|
||||
|
||||
ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
|
||||
nested_npt_enabled(svm), false);
|
||||
if (WARN_ON_ONCE(ret))
|
||||
goto out_free;
|
||||
|
||||
|
||||
/*
|
||||
* All checks done, we can enter guest mode. Userspace provides
|
||||
@ -1494,6 +1482,20 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
||||
|
||||
svm_switch_vmcb(svm, &svm->nested.vmcb02);
|
||||
nested_vmcb02_prepare_control(svm);
|
||||
|
||||
/*
|
||||
* While the nested guest CR3 is already checked and set by
|
||||
* KVM_SET_SREGS, it was set when nested state was yet loaded,
|
||||
* thus MMU might not be initialized correctly.
|
||||
* Set it again to fix this.
|
||||
*/
|
||||
|
||||
ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
|
||||
nested_npt_enabled(svm), false);
|
||||
if (WARN_ON_ONCE(ret))
|
||||
goto out_free;
|
||||
|
||||
|
||||
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
||||
ret = 0;
|
||||
out_free:
|
||||
|
@ -1585,6 +1585,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
u64 hcr0 = cr0;
|
||||
bool old_paging = is_paging(vcpu);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
|
||||
@ -1601,8 +1602,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
#endif
|
||||
vcpu->arch.cr0 = cr0;
|
||||
|
||||
if (!npt_enabled)
|
||||
if (!npt_enabled) {
|
||||
hcr0 |= X86_CR0_PG | X86_CR0_WP;
|
||||
if (old_paging != is_paging(vcpu))
|
||||
svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* re-enable caching here because the QEMU bios
|
||||
@ -1646,8 +1650,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
svm_flush_tlb(vcpu);
|
||||
|
||||
vcpu->arch.cr4 = cr4;
|
||||
if (!npt_enabled)
|
||||
if (!npt_enabled) {
|
||||
cr4 |= X86_CR4_PAE;
|
||||
|
||||
if (!is_paging(vcpu))
|
||||
cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
|
||||
}
|
||||
cr4 |= host_cr4_mce;
|
||||
to_svm(vcpu)->vmcb->save.cr4 = cr4;
|
||||
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
|
||||
@ -3291,19 +3299,53 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
|
||||
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
|
||||
}
|
||||
|
||||
static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = apic->vcpu;
|
||||
/*
|
||||
* vcpu->arch.apicv_active must be read after vcpu->mode.
|
||||
* Pairs with smp_store_release in vcpu_enter_guest.
|
||||
*/
|
||||
bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
|
||||
|
||||
if (svm_deliver_avic_intr(vcpu, vector)) {
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
if (!READ_ONCE(vcpu->arch.apicv_active)) {
|
||||
/* Process the interrupt via inject_pending_event */
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
} else {
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
|
||||
trig_mode, vector);
|
||||
return;
|
||||
}
|
||||
|
||||
trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
|
||||
if (in_guest_mode) {
|
||||
/*
|
||||
* Signal the doorbell to tell hardware to inject the IRQ. If
|
||||
* the vCPU exits the guest before the doorbell chimes, hardware
|
||||
* will automatically process AVIC interrupts at the next VMRUN.
|
||||
*/
|
||||
avic_ring_doorbell(vcpu);
|
||||
} else {
|
||||
/*
|
||||
* Wake the vCPU if it was blocking. KVM will then detect the
|
||||
* pending IRQ when checking if the vCPU has a wake event.
|
||||
*/
|
||||
kvm_vcpu_wake_up(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
|
||||
int trig_mode, int vector)
|
||||
{
|
||||
kvm_lapic_set_irr(vector, apic);
|
||||
|
||||
/*
|
||||
* Pairs with the smp_mb_*() after setting vcpu->guest_mode in
|
||||
* vcpu_enter_guest() to ensure the write to the vIRR is ordered before
|
||||
* the read of guest_mode. This guarantees that either VMRUN will see
|
||||
* and process the new vIRR entry, or that svm_complete_interrupt_delivery
|
||||
* will signal the doorbell if the CPU has already entered the guest.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
|
||||
}
|
||||
|
||||
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
|
||||
@ -3353,11 +3395,13 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
if (svm->nested.nested_run_pending)
|
||||
return -EBUSY;
|
||||
|
||||
if (svm_nmi_blocked(vcpu))
|
||||
return 0;
|
||||
|
||||
/* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
|
||||
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
|
||||
return -EBUSY;
|
||||
|
||||
return !svm_nmi_blocked(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
|
||||
@ -3409,9 +3453,13 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
|
||||
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
if (svm->nested.nested_run_pending)
|
||||
return -EBUSY;
|
||||
|
||||
if (svm_interrupt_blocked(vcpu))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
|
||||
* e.g. if the IRQ arrived asynchronously after checking nested events.
|
||||
@ -3419,7 +3467,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
|
||||
return -EBUSY;
|
||||
|
||||
return !svm_interrupt_blocked(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
|
||||
@ -4150,11 +4198,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
if (svm->nested.nested_run_pending)
|
||||
return -EBUSY;
|
||||
|
||||
if (svm_smi_blocked(vcpu))
|
||||
return 0;
|
||||
|
||||
/* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
|
||||
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
|
||||
return -EBUSY;
|
||||
|
||||
return !svm_smi_blocked(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
|
||||
@ -4248,11 +4299,18 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
||||
* Enter the nested guest now
|
||||
*/
|
||||
|
||||
vmcb_mark_all_dirty(svm->vmcb01.ptr);
|
||||
|
||||
vmcb12 = map.hva;
|
||||
nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
|
||||
nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
|
||||
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
|
||||
|
||||
if (ret)
|
||||
goto unmap_save;
|
||||
|
||||
svm->nested.nested_run_pending = 1;
|
||||
|
||||
unmap_save:
|
||||
kvm_vcpu_unmap(vcpu, &map_save, true);
|
||||
unmap_map:
|
||||
@ -4637,6 +4695,7 @@ static __init void svm_set_cpu_caps(void)
|
||||
/* CPUID 0x80000001 and 0x8000000A (SVM features) */
|
||||
if (nested) {
|
||||
kvm_cpu_cap_set(X86_FEATURE_SVM);
|
||||
kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
|
||||
|
||||
if (nrips)
|
||||
kvm_cpu_cap_set(X86_FEATURE_NRIPS);
|
||||
|
@ -489,6 +489,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
|
||||
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
|
||||
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
|
||||
int read, int write);
|
||||
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
|
||||
int trig_mode, int vec);
|
||||
|
||||
/* nested.c */
|
||||
|
||||
@ -556,17 +558,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
|
||||
|
||||
/* avic.c */
|
||||
|
||||
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
|
||||
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
|
||||
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
|
||||
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
|
||||
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
|
||||
|
||||
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
|
||||
|
||||
int avic_ga_log_notifier(u32 ga_tag);
|
||||
void avic_vm_destroy(struct kvm *kvm);
|
||||
int avic_vm_init(struct kvm *kvm);
|
||||
@ -583,12 +574,12 @@ bool svm_check_apicv_inhibit_reasons(ulong bit);
|
||||
void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||
void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
|
||||
void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
|
||||
int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
|
||||
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
|
||||
int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
uint32_t guest_irq, bool set);
|
||||
void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
|
||||
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
|
||||
void avic_ring_doorbell(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* sev.c */
|
||||
|
||||
|
@ -7659,6 +7659,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vmx->nested.nested_run_pending = 1;
|
||||
vmx->nested.smm.guest_mode = false;
|
||||
}
|
||||
return 0;
|
||||
|
@ -9983,7 +9983,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
* result in virtual interrupt delivery.
|
||||
*/
|
||||
local_irq_disable();
|
||||
vcpu->mode = IN_GUEST_MODE;
|
||||
|
||||
/* Store vcpu->apicv_active before vcpu->mode. */
|
||||
smp_store_release(&vcpu->mode, IN_GUEST_MODE);
|
||||
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
|
||||
|
@ -133,32 +133,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
|
||||
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
{
|
||||
struct kvm_vcpu_xen *vx = &v->arch.xen;
|
||||
struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
|
||||
struct kvm_memslots *slots = kvm_memslots(v->kvm);
|
||||
bool atomic = (state == RUNSTATE_runnable);
|
||||
uint64_t state_entry_time;
|
||||
unsigned int offset;
|
||||
int __user *user_state;
|
||||
uint64_t __user *user_times;
|
||||
|
||||
kvm_xen_update_runstate(v, state);
|
||||
|
||||
if (!vx->runstate_set)
|
||||
return;
|
||||
|
||||
if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
|
||||
kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
|
||||
return;
|
||||
|
||||
/* We made sure it fits in a single page */
|
||||
BUG_ON(!ghc->memslot);
|
||||
|
||||
if (atomic)
|
||||
pagefault_disable();
|
||||
|
||||
/*
|
||||
* The only difference between 32-bit and 64-bit versions of the
|
||||
* runstate struct us the alignment of uint64_t in 32-bit, which
|
||||
* means that the 64-bit version has an additional 4 bytes of
|
||||
* padding after the first field 'state'.
|
||||
*
|
||||
* So we use 'int __user *user_state' to point to the state field,
|
||||
* and 'uint64_t __user *user_times' for runstate_entry_time. So
|
||||
* the actual array of time[] in each state starts at user_times[1].
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
|
||||
BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
|
||||
user_state = (int __user *)ghc->hva;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
|
||||
|
||||
offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
|
||||
user_times = (uint64_t __user *)(ghc->hva +
|
||||
offsetof(struct compat_vcpu_runstate_info,
|
||||
state_entry_time));
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* The only difference is alignment of uint64_t in 32-bit.
|
||||
* So the first field 'state' is accessed directly using
|
||||
* offsetof() (where its offset happens to be zero), while the
|
||||
* remaining fields which are all uint64_t, start at 'offset'
|
||||
* which we tweak here by adding 4.
|
||||
*/
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
|
||||
offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
|
||||
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
|
||||
offsetof(struct compat_vcpu_runstate_info, time) + 4);
|
||||
|
||||
if (v->kvm->arch.xen.long_mode)
|
||||
offset = offsetof(struct vcpu_runstate_info, state_entry_time);
|
||||
user_times = (uint64_t __user *)(ghc->hva +
|
||||
offsetof(struct vcpu_runstate_info,
|
||||
state_entry_time));
|
||||
#endif
|
||||
/*
|
||||
* First write the updated state_entry_time at the appropriate
|
||||
@ -172,10 +197,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
|
||||
sizeof(state_entry_time));
|
||||
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
&state_entry_time, offset,
|
||||
sizeof(state_entry_time)))
|
||||
return;
|
||||
if (__put_user(state_entry_time, user_times))
|
||||
goto out;
|
||||
smp_wmb();
|
||||
|
||||
/*
|
||||
@ -189,11 +212,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
|
||||
sizeof(vx->current_runstate));
|
||||
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
&vx->current_runstate,
|
||||
offsetof(struct vcpu_runstate_info, state),
|
||||
sizeof(vx->current_runstate)))
|
||||
return;
|
||||
if (__put_user(vx->current_runstate, user_state))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Write the actual runstate times immediately after the
|
||||
@ -208,24 +228,23 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
|
||||
sizeof(vx->runstate_times));
|
||||
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
&vx->runstate_times[0],
|
||||
offset + sizeof(u64),
|
||||
sizeof(vx->runstate_times)))
|
||||
return;
|
||||
|
||||
if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
|
||||
goto out;
|
||||
smp_wmb();
|
||||
|
||||
/*
|
||||
* Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
|
||||
* runstate_entry_time field.
|
||||
*/
|
||||
|
||||
state_entry_time &= ~XEN_RUNSTATE_UPDATE;
|
||||
if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
|
||||
&state_entry_time, offset,
|
||||
sizeof(state_entry_time)))
|
||||
return;
|
||||
__put_user(state_entry_time, user_times);
|
||||
smp_wmb();
|
||||
|
||||
out:
|
||||
mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
|
||||
|
||||
if (atomic)
|
||||
pagefault_enable();
|
||||
}
|
||||
|
||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
@ -443,6 +462,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
break;
|
||||
}
|
||||
|
||||
/* It must fit within a single page */
|
||||
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_info_cache,
|
||||
data->u.gpa,
|
||||
@ -460,6 +485,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
break;
|
||||
}
|
||||
|
||||
/* It must fit within a single page */
|
||||
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_time_info_cache,
|
||||
data->u.gpa,
|
||||
@ -481,6 +512,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
break;
|
||||
}
|
||||
|
||||
/* It must fit within a single page */
|
||||
if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache,
|
||||
data->u.gpa,
|
||||
|
@ -82,7 +82,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
|
||||
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
|
||||
|
Loading…
Reference in New Issue
Block a user