x86:
* Miscellaneous bugfixes * A small cleanup for the new workqueue code * Documentation syntax fix RISC-V: * Remove hgatp zeroing in kvm_arch_vcpu_put() * Fix alignment of the guest_hang() in KVM selftest * Fix PTE A and D bits in KVM selftest * Missing #include in vcpu_fp.c ARM: * Some PSCI fixes after introducing PSCIv1.1 and SYSTEM_RESET2 * Fix the MMU write-lock not being taken on THP split * Fix mixed-width VM handling * Fix potential UAF when debugfs registration fails * Various selftest updates for all of the above -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmJVtdMUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroO33QgAiPh80xUkYfnl8FVN440S5F7UOPQ2 Cs/PbroNoP+Oz2GoG07aaqnUkFFApeBE5S+VMu1zhRNAernqpreN64/Y2iNaz0Y6 +MbvEX0FhQRW0UZJIF2m49ilgO8Gkt6aEpVRulq5G9w4NWiH1PtR25FVXfDMi8OG xdw4x1jwXNI9lOQJ5EpUKVde3rAbxCfoC6hCTh5pCNd9oLuVeLfnC+Uv91fzXltl EIeBlV0/mAi3RLp2E/AX38WP6ucMZqOOAy91/RTqX6oIx/7QL28ZNHXVrwQ67Hkd pAr3MAk84tZL58lnosw53i5aXAf9CBp0KBnpk2KGutfRNJ4Vzs1e+DZAJA== =vqAv -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "x86: - Miscellaneous bugfixes - A small cleanup for the new workqueue code - Documentation syntax fix RISC-V: - Remove hgatp zeroing in kvm_arch_vcpu_put() - Fix alignment of the guest_hang() in KVM selftest - Fix PTE A and D bits in KVM selftest - Missing #include in vcpu_fp.c ARM: - Some PSCI fixes after introducing PSCIv1.1 and SYSTEM_RESET2 - Fix the MMU write-lock not being taken on THP split - Fix mixed-width VM handling - Fix potential UAF when debugfs registration fails - Various selftest updates for all of the above" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (24 commits) KVM: x86: hyper-v: Avoid writing to TSC page without an active vCPU KVM: SVM: Do not activate AVIC for SEV-enabled guest Documentation: KVM: Add SPDX-License-Identifier tag selftests: kvm: add tsc_scaling_sync to .gitignore RISC-V: KVM: include missing hwcap.h into vcpu_fp KVM: selftests: riscv: Fix alignment of the guest_hang() function KVM: selftests: riscv: Set PTE A and D bits in VS-stage page table RISC-V: KVM: Don't clear hgatp CSR in kvm_arch_vcpu_put() selftests: KVM: Free the GIC FD when cleaning up in arch_timer selftests: KVM: Don't leak GIC FD across dirty log test iterations KVM: Don't create VM debugfs files outside of the VM directory KVM: selftests: get-reg-list: Add KVM_REG_ARM_FW_REG(3) KVM: avoid NULL pointer dereference in kvm_dirty_ring_push KVM: arm64: selftests: Introduce vcpu_width_config KVM: arm64: mixed-width check should be skipped for uninitialized vCPUs KVM: arm64: vgic: Remove unnecessary type castings KVM: arm64: Don't split hugepages outside of MMU write lock KVM: arm64: Drop unneeded minor version check from PSCI v1.x handler KVM: arm64: Actually prevent SMC64 SYSTEM_RESET2 from AArch32 KVM: arm64: Generally disallow SMC64 for AArch32 guests ...
This commit is contained in:
commit
453096eb04
@ -6190,6 +6190,7 @@ Valid values for 'type' are:
|
||||
unsigned long args[6];
|
||||
unsigned long ret[2];
|
||||
} riscv_sbi;
|
||||
|
||||
If exit reason is KVM_EXIT_RISCV_SBI then it indicates that the VCPU has
|
||||
done a SBI call which is not handled by KVM RISC-V kernel module. The details
|
||||
of the SBI call are available in 'riscv_sbi' member of kvm_run structure. The
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=================
|
||||
KVM VCPU Requests
|
||||
=================
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
======================================
|
||||
Secure Encrypted Virtualization (SEV)
|
||||
======================================
|
||||
|
@ -1,3 +1,4 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=======================================
|
||||
Known limitations of CPU virtualization
|
||||
@ -36,4 +37,3 @@ Nested virtualization features
|
||||
------------------------------
|
||||
|
||||
TBD
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
==============================
|
||||
Running nested guests with KVM
|
||||
==============================
|
||||
|
@ -43,10 +43,22 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
|
||||
|
||||
#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
|
||||
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !(vcpu->arch.hcr_el2 & HCR_RW);
|
||||
}
|
||||
#else
|
||||
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
|
||||
&kvm->arch.flags));
|
||||
|
||||
return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -72,15 +84,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.hcr_el2 |= HCR_TVM;
|
||||
}
|
||||
|
||||
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
|
||||
if (vcpu_el1_is_32bit(vcpu))
|
||||
vcpu->arch.hcr_el2 &= ~HCR_RW;
|
||||
|
||||
else
|
||||
/*
|
||||
* TID3: trap feature register accesses that we virtualise.
|
||||
* For now this is conditional, since no AArch32 feature regs
|
||||
* are currently virtualised.
|
||||
*/
|
||||
if (!vcpu_el1_is_32bit(vcpu))
|
||||
vcpu->arch.hcr_el2 |= HCR_TID3;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
|
||||
|
@ -127,6 +127,16 @@ struct kvm_arch {
|
||||
#define KVM_ARCH_FLAG_MTE_ENABLED 1
|
||||
/* At least one vCPU has ran in the VM */
|
||||
#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
|
||||
/*
|
||||
* The following two bits are used to indicate the guest's EL1
|
||||
* register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
|
||||
* bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
|
||||
* Otherwise, the guest's EL1 register width has not yet been
|
||||
* determined yet.
|
||||
*/
|
||||
#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3
|
||||
#define KVM_ARCH_FLAG_EL1_32BIT 4
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
|
@ -1079,7 +1079,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
gfn_t gfn;
|
||||
kvm_pfn_t pfn;
|
||||
bool logging_active = memslot_is_logging(memslot);
|
||||
bool logging_perm_fault = false;
|
||||
bool use_read_lock = false;
|
||||
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
|
||||
unsigned long vma_pagesize, fault_granule;
|
||||
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
|
||||
@ -1114,7 +1114,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
if (logging_active) {
|
||||
force_pte = true;
|
||||
vma_shift = PAGE_SHIFT;
|
||||
logging_perm_fault = (fault_status == FSC_PERM && write_fault);
|
||||
use_read_lock = (fault_status == FSC_PERM && write_fault &&
|
||||
fault_granule == PAGE_SIZE);
|
||||
} else {
|
||||
vma_shift = get_vma_page_shift(vma, hva);
|
||||
}
|
||||
@ -1218,7 +1219,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
* logging dirty logging, only acquire read lock for permission
|
||||
* relaxation.
|
||||
*/
|
||||
if (logging_perm_fault)
|
||||
if (use_read_lock)
|
||||
read_lock(&kvm->mmu_lock);
|
||||
else
|
||||
write_lock(&kvm->mmu_lock);
|
||||
@ -1268,6 +1269,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
|
||||
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
|
||||
} else {
|
||||
WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
|
||||
|
||||
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
|
||||
__pfn_to_phys(pfn), prot,
|
||||
memcache);
|
||||
@ -1280,7 +1283,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
if (logging_perm_fault)
|
||||
if (use_read_lock)
|
||||
read_unlock(&kvm->mmu_lock);
|
||||
else
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
|
@ -215,15 +215,11 @@ static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
|
||||
|
||||
static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
|
||||
{
|
||||
switch(fn) {
|
||||
case PSCI_0_2_FN64_CPU_SUSPEND:
|
||||
case PSCI_0_2_FN64_CPU_ON:
|
||||
case PSCI_0_2_FN64_AFFINITY_INFO:
|
||||
/* Disallow these functions for 32bit guests */
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
/*
|
||||
* Prevent 32 bit guests from calling 64 bit PSCI functions.
|
||||
*/
|
||||
if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
|
||||
return PSCI_RET_NOT_SUPPORTED;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -235,10 +231,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||
unsigned long val;
|
||||
int ret = 1;
|
||||
|
||||
val = kvm_psci_check_allowed_function(vcpu, psci_fn);
|
||||
if (val)
|
||||
goto out;
|
||||
|
||||
switch (psci_fn) {
|
||||
case PSCI_0_2_FN_PSCI_VERSION:
|
||||
/*
|
||||
@ -306,7 +298,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
smccc_set_retval(vcpu, val, 0, 0, 0);
|
||||
return ret;
|
||||
}
|
||||
@ -318,9 +309,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
|
||||
unsigned long val;
|
||||
int ret = 1;
|
||||
|
||||
if (minor > 1)
|
||||
return -EINVAL;
|
||||
|
||||
switch(psci_fn) {
|
||||
case PSCI_0_2_FN_PSCI_VERSION:
|
||||
val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
|
||||
@ -426,6 +414,15 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 psci_fn = smccc_get_function(vcpu);
|
||||
unsigned long val;
|
||||
|
||||
val = kvm_psci_check_allowed_function(vcpu, psci_fn);
|
||||
if (val) {
|
||||
smccc_set_retval(vcpu, val, 0, 0, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
switch (kvm_psci_version(vcpu)) {
|
||||
case KVM_ARM_PSCI_1_1:
|
||||
return kvm_psci_1_x_call(vcpu, 1);
|
||||
|
@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
|
||||
/**
|
||||
* kvm_set_vm_width() - set the register width for the guest
|
||||
* @vcpu: Pointer to the vcpu being configured
|
||||
*
|
||||
* Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
|
||||
* in the VM flags based on the vcpu's requested register width, the HW
|
||||
* capabilities and other options (such as MTE).
|
||||
* When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
|
||||
* consistent with the value of the FLAG_EL1_32BIT bit in the flags.
|
||||
*
|
||||
* Return: 0 on success, negative error code on failure.
|
||||
*/
|
||||
static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu *tmp;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
bool is32bit;
|
||||
unsigned long i;
|
||||
|
||||
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
|
||||
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
|
||||
return false;
|
||||
|
||||
/* MTE is incompatible with AArch32 */
|
||||
if (kvm_has_mte(vcpu->kvm) && is32bit)
|
||||
return false;
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
/* Check that the vcpus are either all 32bit or all 64bit */
|
||||
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
|
||||
if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
|
||||
return false;
|
||||
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
|
||||
/*
|
||||
* The guest's register width is already configured.
|
||||
* Make sure that the vcpu is consistent with it.
|
||||
*/
|
||||
if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return true;
|
||||
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
|
||||
return -EINVAL;
|
||||
|
||||
/* MTE is incompatible with AArch32 */
|
||||
if (kvm_has_mte(kvm) && is32bit)
|
||||
return -EINVAL;
|
||||
|
||||
if (is32bit)
|
||||
set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
u32 pstate;
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
ret = kvm_set_vm_width(vcpu);
|
||||
if (!ret) {
|
||||
reset_state = vcpu->arch.reset_state;
|
||||
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
|
||||
}
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Reset PMU outside of the non-preemptible section */
|
||||
kvm_pmu_vcpu_reset(vcpu);
|
||||
|
||||
@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
if (!vcpu_allowed_register_width(vcpu)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (vcpu->arch.target) {
|
||||
default:
|
||||
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
|
||||
if (vcpu_el1_is_32bit(vcpu)) {
|
||||
pstate = VCPU_RESET_PSTATE_SVC;
|
||||
} else {
|
||||
pstate = VCPU_RESET_PSTATE_EL1;
|
||||
|
@ -82,7 +82,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
|
||||
|
||||
static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
struct kvm *kvm = (struct kvm *)s->private;
|
||||
struct kvm *kvm = s->private;
|
||||
struct vgic_state_iter *iter;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
@ -110,7 +110,7 @@ out:
|
||||
|
||||
static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
{
|
||||
struct kvm *kvm = (struct kvm *)s->private;
|
||||
struct kvm *kvm = s->private;
|
||||
struct vgic_state_iter *iter = kvm->arch.vgic.iter;
|
||||
|
||||
++*pos;
|
||||
@ -122,7 +122,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
|
||||
static void vgic_debug_stop(struct seq_file *s, void *v)
|
||||
{
|
||||
struct kvm *kvm = (struct kvm *)s->private;
|
||||
struct kvm *kvm = s->private;
|
||||
struct vgic_state_iter *iter;
|
||||
|
||||
/*
|
||||
@ -229,8 +229,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
|
||||
|
||||
static int vgic_debug_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct kvm *kvm = (struct kvm *)s->private;
|
||||
struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
|
||||
struct kvm *kvm = s->private;
|
||||
struct vgic_state_iter *iter = v;
|
||||
struct vgic_irq *irq;
|
||||
struct kvm_vcpu *vcpu = NULL;
|
||||
unsigned long flags;
|
||||
|
@ -2143,7 +2143,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
|
||||
static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
|
||||
void *ptr, void *opaque)
|
||||
{
|
||||
struct its_device *dev = (struct its_device *)opaque;
|
||||
struct its_device *dev = opaque;
|
||||
struct its_collection *collection;
|
||||
struct kvm *kvm = its->dev->kvm;
|
||||
struct kvm_vcpu *vcpu = NULL;
|
||||
|
@ -653,8 +653,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.isa);
|
||||
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
|
||||
|
||||
csr_write(CSR_HGATP, 0);
|
||||
|
||||
csr->vsstatus = csr_read(CSR_VSSTATUS);
|
||||
csr->vsie = csr_read(CSR_VSIE);
|
||||
csr->vstvec = csr_read(CSR_VSTVEC);
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/hwcap.h>
|
||||
|
||||
#ifdef CONFIG_FPU
|
||||
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
|
||||
|
@ -974,12 +974,10 @@ enum hv_tsc_page_status {
|
||||
HV_TSC_PAGE_UNSET = 0,
|
||||
/* TSC page MSR was written by the guest, update pending */
|
||||
HV_TSC_PAGE_GUEST_CHANGED,
|
||||
/* TSC page MSR was written by KVM userspace, update pending */
|
||||
/* TSC page update was triggered from the host side */
|
||||
HV_TSC_PAGE_HOST_CHANGED,
|
||||
/* TSC page was properly set up and is currently active */
|
||||
HV_TSC_PAGE_SET,
|
||||
/* TSC page is currently being updated and therefore is inactive */
|
||||
HV_TSC_PAGE_UPDATING,
|
||||
/* TSC page was set up with an inaccessible GPA */
|
||||
HV_TSC_PAGE_BROKEN,
|
||||
};
|
||||
@ -1052,6 +1050,7 @@ enum kvm_apicv_inhibit {
|
||||
APICV_INHIBIT_REASON_X2APIC,
|
||||
APICV_INHIBIT_REASON_BLOCKIRQ,
|
||||
APICV_INHIBIT_REASON_ABSENT,
|
||||
APICV_INHIBIT_REASON_SEV,
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
@ -1585,8 +1584,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
|
||||
#define kvm_arch_pmi_in_guest(vcpu) \
|
||||
((vcpu) && (vcpu)->arch.handling_intr_from_guest)
|
||||
|
||||
int kvm_mmu_module_init(void);
|
||||
void kvm_mmu_module_exit(void);
|
||||
void kvm_mmu_x86_module_init(void);
|
||||
int kvm_mmu_vendor_module_init(void);
|
||||
void kvm_mmu_vendor_module_exit(void);
|
||||
|
||||
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
|
||||
int kvm_mmu_create(struct kvm_vcpu *vcpu);
|
||||
|
@ -1135,11 +1135,13 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
||||
BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
|
||||
BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
|
||||
|
||||
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
|
||||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
|
||||
return;
|
||||
|
||||
mutex_lock(&hv->hv_lock);
|
||||
|
||||
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
|
||||
hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
|
||||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
|
||||
goto out_unlock;
|
||||
|
||||
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
|
||||
goto out_unlock;
|
||||
|
||||
@ -1201,45 +1203,19 @@ out_unlock:
|
||||
mutex_unlock(&hv->hv_lock);
|
||||
}
|
||||
|
||||
void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
|
||||
void kvm_hv_request_tsc_page_update(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_hv *hv = to_kvm_hv(kvm);
|
||||
u64 gfn;
|
||||
int idx;
|
||||
|
||||
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
|
||||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
|
||||
tsc_page_update_unsafe(hv))
|
||||
return;
|
||||
|
||||
mutex_lock(&hv->hv_lock);
|
||||
|
||||
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
|
||||
goto out_unlock;
|
||||
if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
|
||||
!tsc_page_update_unsafe(hv))
|
||||
hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
|
||||
|
||||
/* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
|
||||
if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
|
||||
hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
|
||||
|
||||
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
|
||||
|
||||
hv->tsc_ref.tsc_sequence = 0;
|
||||
|
||||
/*
|
||||
* Take the srcu lock as memslots will be accessed to check the gfn
|
||||
* cache generation against the memslots generation.
|
||||
*/
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
|
||||
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
|
||||
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&hv->hv_lock);
|
||||
}
|
||||
|
||||
|
||||
static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
|
||||
{
|
||||
if (!hv_vcpu->enforce_cpuid)
|
||||
|
@ -137,7 +137,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_hv_setup_tsc_page(struct kvm *kvm,
|
||||
struct pvclock_vcpu_time_info *hv_clock);
|
||||
void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
|
||||
void kvm_hv_request_tsc_page_update(struct kvm *kvm);
|
||||
|
||||
void kvm_hv_init_vm(struct kvm *kvm);
|
||||
void kvm_hv_destroy_vm(struct kvm *kvm);
|
||||
|
@ -6237,12 +6237,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_mmu_module_init(void)
|
||||
/*
|
||||
* nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
|
||||
* its default value of -1 is technically undefined behavior for a boolean.
|
||||
*/
|
||||
void kvm_mmu_x86_module_init(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (nx_huge_pages == -1)
|
||||
__set_nx_huge_pages(get_nx_auto_mode());
|
||||
}
|
||||
|
||||
/*
|
||||
* The bulk of the MMU initialization is deferred until the vendor module is
|
||||
* loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
|
||||
* to be reset when a potentially different vendor module is loaded.
|
||||
*/
|
||||
int kvm_mmu_vendor_module_init(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
||||
/*
|
||||
* MMU roles use union aliasing which is, generally speaking, an
|
||||
@ -6290,7 +6302,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
mmu_free_memory_caches(vcpu);
|
||||
}
|
||||
|
||||
void kvm_mmu_module_exit(void)
|
||||
void kvm_mmu_vendor_module_exit(void)
|
||||
{
|
||||
mmu_destroy_caches();
|
||||
percpu_counter_destroy(&kvm_total_used_mmu_pages);
|
||||
|
@ -51,7 +51,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
|
||||
if (!kvm->arch.tdp_mmu_enabled)
|
||||
return;
|
||||
|
||||
flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
|
||||
/* Also waits for any queued work items. */
|
||||
destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
|
||||
|
||||
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
|
||||
|
@ -837,7 +837,8 @@ bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
|
||||
BIT(APICV_INHIBIT_REASON_IRQWIN) |
|
||||
BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
|
||||
BIT(APICV_INHIBIT_REASON_X2APIC) |
|
||||
BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
|
||||
BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
|
||||
BIT(APICV_INHIBIT_REASON_SEV);
|
||||
|
||||
return supported & BIT(reason);
|
||||
}
|
||||
|
@ -260,6 +260,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
INIT_LIST_HEAD(&sev->regions_list);
|
||||
INIT_LIST_HEAD(&sev->mirror_vms);
|
||||
|
||||
kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
|
||||
|
||||
return 0;
|
||||
|
||||
e_free:
|
||||
@ -465,6 +467,7 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
|
||||
page_virtual = kmap_atomic(pages[i]);
|
||||
clflush_cache_range(page_virtual, PAGE_SIZE);
|
||||
kunmap_atomic(page_virtual);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2901,7 +2901,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm)
|
||||
|
||||
static void kvm_update_masterclock(struct kvm *kvm)
|
||||
{
|
||||
kvm_hv_invalidate_tsc_page(kvm);
|
||||
kvm_hv_request_tsc_page_update(kvm);
|
||||
kvm_start_pvclock_update(kvm);
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
kvm_end_pvclock_update(kvm);
|
||||
@ -3113,7 +3113,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
if (vcpu->xen.vcpu_time_info_set)
|
||||
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
|
||||
if (!v->vcpu_idx)
|
||||
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
|
||||
return 0;
|
||||
}
|
||||
@ -6241,7 +6240,7 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
|
||||
if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
kvm_hv_invalidate_tsc_page(kvm);
|
||||
kvm_hv_request_tsc_page_update(kvm);
|
||||
kvm_start_pvclock_update(kvm);
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
|
||||
@ -8926,7 +8925,7 @@ int kvm_arch_init(void *opaque)
|
||||
}
|
||||
kvm_nr_uret_msrs = 0;
|
||||
|
||||
r = kvm_mmu_module_init();
|
||||
r = kvm_mmu_vendor_module_init();
|
||||
if (r)
|
||||
goto out_free_percpu;
|
||||
|
||||
@ -8974,7 +8973,7 @@ void kvm_arch_exit(void)
|
||||
cancel_work_sync(&pvclock_gtod_work);
|
||||
#endif
|
||||
kvm_x86_ops.hardware_enable = NULL;
|
||||
kvm_mmu_module_exit();
|
||||
kvm_mmu_vendor_module_exit();
|
||||
free_percpu(user_return_msrs);
|
||||
kmem_cache_destroy(x86_emulator_cache);
|
||||
#ifdef CONFIG_KVM_XEN
|
||||
@ -12986,3 +12985,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
|
||||
|
||||
static int __init kvm_x86_init(void)
|
||||
{
|
||||
kvm_mmu_x86_module_init();
|
||||
return 0;
|
||||
}
|
||||
module_init(kvm_x86_init);
|
||||
|
||||
static void __exit kvm_x86_exit(void)
|
||||
{
|
||||
/*
|
||||
* If module_init() is implemented, module_exit() must also be
|
||||
* implemented to allow module unload.
|
||||
*/
|
||||
}
|
||||
module_exit(kvm_x86_exit);
|
||||
|
2
tools/testing/selftests/kvm/.gitignore
vendored
2
tools/testing/selftests/kvm/.gitignore
vendored
@ -3,6 +3,7 @@
|
||||
/aarch64/debug-exceptions
|
||||
/aarch64/get-reg-list
|
||||
/aarch64/psci_cpu_on_test
|
||||
/aarch64/vcpu_width_config
|
||||
/aarch64/vgic_init
|
||||
/aarch64/vgic_irq
|
||||
/s390x/memop
|
||||
@ -33,6 +34,7 @@
|
||||
/x86_64/state_test
|
||||
/x86_64/svm_vmcall_test
|
||||
/x86_64/svm_int_ctl_test
|
||||
/x86_64/tsc_scaling_sync
|
||||
/x86_64/sync_regs_test
|
||||
/x86_64/tsc_msrs_test
|
||||
/x86_64/userspace_io_test
|
||||
|
@ -106,6 +106,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
|
||||
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
|
||||
TEST_GEN_PROGS_aarch64 += demand_paging_test
|
||||
|
@ -362,11 +362,12 @@ static void test_init_timer_irq(struct kvm_vm *vm)
|
||||
pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
|
||||
}
|
||||
|
||||
static int gic_fd;
|
||||
|
||||
static struct kvm_vm *test_vm_create(void)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
int nr_vcpus = test_args.nr_vcpus;
|
||||
|
||||
vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
|
||||
@ -383,8 +384,8 @@ static struct kvm_vm *test_vm_create(void)
|
||||
|
||||
ucall_init(vm, NULL);
|
||||
test_init_timer_irq(vm);
|
||||
ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
|
||||
if (ret < 0) {
|
||||
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
|
||||
if (gic_fd < 0) {
|
||||
print_skip("Failed to create vgic-v3");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
@ -395,6 +396,12 @@ static struct kvm_vm *test_vm_create(void)
|
||||
return vm;
|
||||
}
|
||||
|
||||
static void test_vm_cleanup(struct kvm_vm *vm)
|
||||
{
|
||||
close(gic_fd);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static void test_print_help(char *name)
|
||||
{
|
||||
pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
|
||||
@ -478,7 +485,7 @@ int main(int argc, char *argv[])
|
||||
|
||||
vm = test_vm_create();
|
||||
test_run(vm);
|
||||
kvm_vm_free(vm);
|
||||
test_vm_cleanup(vm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -503,8 +503,13 @@ static void run_test(struct vcpu_config *c)
|
||||
++missing_regs;
|
||||
|
||||
if (new_regs || missing_regs) {
|
||||
n = 0;
|
||||
for_each_reg_filtered(i)
|
||||
++n;
|
||||
|
||||
printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
|
||||
printf("%s: Number registers: %5lld\n", config_name(c), reg_list->n);
|
||||
printf("%s: Number registers: %5lld (includes %lld filtered registers)\n",
|
||||
config_name(c), reg_list->n, reg_list->n - n);
|
||||
}
|
||||
|
||||
if (new_regs) {
|
||||
@ -683,9 +688,10 @@ static __u64 base_regs[] = {
|
||||
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
|
||||
KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
|
||||
KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
|
||||
KVM_REG_ARM_FW_REG(0),
|
||||
KVM_REG_ARM_FW_REG(1),
|
||||
KVM_REG_ARM_FW_REG(2),
|
||||
KVM_REG_ARM_FW_REG(0), /* KVM_REG_ARM_PSCI_VERSION */
|
||||
KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
|
||||
KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
|
||||
KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
|
||||
ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
|
||||
ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
|
||||
ARM64_SYS_REG(3, 3, 14, 0, 2),
|
||||
|
122
tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
Normal file
122
tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
Normal file
@ -0,0 +1,122 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* vcpu_width_config - Test KVM_ARM_VCPU_INIT() with KVM_ARM_VCPU_EL1_32BIT.
|
||||
*
|
||||
* Copyright (c) 2022 Google LLC.
|
||||
*
|
||||
* This is a test that ensures that non-mixed-width vCPUs (all 64bit vCPUs
|
||||
* or all 32bit vcPUs) can be configured and mixed-width vCPUs cannot be
|
||||
* configured.
|
||||
*/
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
#include "test_util.h"
|
||||
|
||||
|
||||
/*
|
||||
* Add a vCPU, run KVM_ARM_VCPU_INIT with @init1, and then
|
||||
* add another vCPU, and run KVM_ARM_VCPU_INIT with @init2.
|
||||
*/
|
||||
static int add_init_2vcpus(struct kvm_vcpu_init *init1,
|
||||
struct kvm_vcpu_init *init2)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
int ret;
|
||||
|
||||
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
|
||||
|
||||
vm_vcpu_add(vm, 0);
|
||||
ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
|
||||
if (ret)
|
||||
goto free_exit;
|
||||
|
||||
vm_vcpu_add(vm, 1);
|
||||
ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
|
||||
|
||||
free_exit:
|
||||
kvm_vm_free(vm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init1,
|
||||
* and run KVM_ARM_VCPU_INIT for another vCPU with @init2.
|
||||
*/
|
||||
static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
|
||||
struct kvm_vcpu_init *init2)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
int ret;
|
||||
|
||||
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
|
||||
|
||||
vm_vcpu_add(vm, 0);
|
||||
vm_vcpu_add(vm, 1);
|
||||
|
||||
ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
|
||||
if (ret)
|
||||
goto free_exit;
|
||||
|
||||
ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
|
||||
|
||||
free_exit:
|
||||
kvm_vm_free(vm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tests that two 64bit vCPUs can be configured, two 32bit vCPUs can be
|
||||
* configured, and two mixed-width vCPUs cannot be configured.
|
||||
* Each of those three cases, configure vCPUs in two different orders.
|
||||
* The one is running KVM_CREATE_VCPU for 2 vCPUs, and then running
|
||||
* KVM_ARM_VCPU_INIT for them.
|
||||
* The other is running KVM_CREATE_VCPU and KVM_ARM_VCPU_INIT for a vCPU,
|
||||
* and then run those commands for another vCPU.
|
||||
*/
|
||||
int main(void)
|
||||
{
|
||||
struct kvm_vcpu_init init1, init2;
|
||||
struct kvm_vm *vm;
|
||||
int ret;
|
||||
|
||||
if (!kvm_check_cap(KVM_CAP_ARM_EL1_32BIT)) {
|
||||
print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
/* Get the preferred target type and copy that to init2 for later use */
|
||||
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
|
||||
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
|
||||
kvm_vm_free(vm);
|
||||
init2 = init1;
|
||||
|
||||
/* Test with 64bit vCPUs */
|
||||
ret = add_init_2vcpus(&init1, &init1);
|
||||
TEST_ASSERT(ret == 0,
|
||||
"Configuring 64bit EL1 vCPUs failed unexpectedly");
|
||||
ret = add_2vcpus_init_2vcpus(&init1, &init1);
|
||||
TEST_ASSERT(ret == 0,
|
||||
"Configuring 64bit EL1 vCPUs failed unexpectedly");
|
||||
|
||||
/* Test with 32bit vCPUs */
|
||||
init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
|
||||
ret = add_init_2vcpus(&init1, &init1);
|
||||
TEST_ASSERT(ret == 0,
|
||||
"Configuring 32bit EL1 vCPUs failed unexpectedly");
|
||||
ret = add_2vcpus_init_2vcpus(&init1, &init1);
|
||||
TEST_ASSERT(ret == 0,
|
||||
"Configuring 32bit EL1 vCPUs failed unexpectedly");
|
||||
|
||||
/* Test with mixed-width vCPUs */
|
||||
init1.features[0] = 0;
|
||||
init2.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
|
||||
ret = add_init_2vcpus(&init1, &init2);
|
||||
TEST_ASSERT(ret != 0,
|
||||
"Configuring mixed-width vCPUs worked unexpectedly");
|
||||
ret = add_2vcpus_init_2vcpus(&init1, &init2);
|
||||
TEST_ASSERT(ret != 0,
|
||||
"Configuring mixed-width vCPUs worked unexpectedly");
|
||||
|
||||
return 0;
|
||||
}
|
@ -18,11 +18,40 @@
|
||||
#include "test_util.h"
|
||||
#include "perf_test_util.h"
|
||||
#include "guest_modes.h"
|
||||
|
||||
#ifdef __aarch64__
|
||||
#include "aarch64/vgic.h"
|
||||
|
||||
#define GICD_BASE_GPA 0x8000000ULL
|
||||
#define GICR_BASE_GPA 0x80A0000ULL
|
||||
|
||||
static int gic_fd;
|
||||
|
||||
static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
|
||||
{
|
||||
/*
|
||||
* The test can still run even if hardware does not support GICv3, as it
|
||||
* is only an optimization to reduce guest exits.
|
||||
*/
|
||||
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
|
||||
}
|
||||
|
||||
static void arch_cleanup_vm(struct kvm_vm *vm)
|
||||
{
|
||||
if (gic_fd > 0)
|
||||
close(gic_fd);
|
||||
}
|
||||
|
||||
#else /* __aarch64__ */
|
||||
|
||||
static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
|
||||
{
|
||||
}
|
||||
|
||||
static void arch_cleanup_vm(struct kvm_vm *vm)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
|
||||
@ -206,9 +235,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
vm_enable_cap(vm, &cap);
|
||||
}
|
||||
|
||||
#ifdef __aarch64__
|
||||
vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
|
||||
#endif
|
||||
arch_setup_vm(vm, nr_vcpus);
|
||||
|
||||
/* Start the iterations */
|
||||
iteration = 0;
|
||||
@ -302,6 +329,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
}
|
||||
|
||||
free_bitmaps(bitmaps, p->slots);
|
||||
arch_cleanup_vm(vm);
|
||||
perf_test_destroy_vm(vm);
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,9 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
|
||||
#define PGTBL_PTE_WRITE_SHIFT 2
|
||||
#define PGTBL_PTE_READ_MASK 0x0000000000000002ULL
|
||||
#define PGTBL_PTE_READ_SHIFT 1
|
||||
#define PGTBL_PTE_PERM_MASK (PGTBL_PTE_EXECUTE_MASK | \
|
||||
#define PGTBL_PTE_PERM_MASK (PGTBL_PTE_ACCESSED_MASK | \
|
||||
PGTBL_PTE_DIRTY_MASK | \
|
||||
PGTBL_PTE_EXECUTE_MASK | \
|
||||
PGTBL_PTE_WRITE_MASK | \
|
||||
PGTBL_PTE_READ_MASK)
|
||||
#define PGTBL_PTE_VALID_MASK 0x0000000000000001ULL
|
||||
|
@ -268,7 +268,7 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
|
||||
core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
|
||||
}
|
||||
|
||||
static void guest_hang(void)
|
||||
static void __aligned(16) guest_hang(void)
|
||||
{
|
||||
while (1)
|
||||
;
|
||||
|
@ -434,8 +434,8 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
|
||||
|
||||
static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_dirty_ring_free(&vcpu->dirty_ring);
|
||||
kvm_arch_vcpu_destroy(vcpu);
|
||||
kvm_dirty_ring_free(&vcpu->dirty_ring);
|
||||
|
||||
/*
|
||||
* No need for rcu_read_lock as VCPU_RUN is the only place that changes
|
||||
@ -932,7 +932,7 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
|
||||
int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
|
||||
kvm_vcpu_stats_header.num_desc;
|
||||
|
||||
if (!kvm->debugfs_dentry)
|
||||
if (IS_ERR(kvm->debugfs_dentry))
|
||||
return;
|
||||
|
||||
debugfs_remove_recursive(kvm->debugfs_dentry);
|
||||
@ -955,6 +955,12 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
|
||||
int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
|
||||
kvm_vcpu_stats_header.num_desc;
|
||||
|
||||
/*
|
||||
* Force subsequent debugfs file creations to fail if the VM directory
|
||||
* is not created.
|
||||
*/
|
||||
kvm->debugfs_dentry = ERR_PTR(-ENOENT);
|
||||
|
||||
if (!debugfs_initialized())
|
||||
return 0;
|
||||
|
||||
@ -5479,7 +5485,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
|
||||
}
|
||||
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
|
||||
|
||||
if (kvm->debugfs_dentry) {
|
||||
if (!IS_ERR(kvm->debugfs_dentry)) {
|
||||
char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
|
||||
|
||||
if (p) {
|
||||
|
Loading…
Reference in New Issue
Block a user