From 2e1ae9c07df5956ebab19144aa0da58ea37c9f69 Mon Sep 17 00:00:00 2001 From: Liu Yu-B13201 Date: Thu, 15 Mar 2012 10:52:13 +0000 Subject: [PATCH 01/93] KVM: PPC: Factor out guest epapr initialization epapr paravirtualization support is now a Kconfig selectable option Signed-off-by: Liu Yu [stuart.yoder@freescale.com: misc minor fixes, description update] Signed-off-by: Stuart Yoder Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/epapr_hcalls.h | 2 + arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/epapr_hcalls.S | 25 ++++++++++++ arch/powerpc/kernel/epapr_paravirt.c | 52 +++++++++++++++++++++++++ arch/powerpc/kernel/kvm.c | 28 ++----------- arch/powerpc/kernel/kvm_emul.S | 10 ----- arch/powerpc/platforms/Kconfig | 9 +++++ 7 files changed, 92 insertions(+), 35 deletions(-) create mode 100644 arch/powerpc/kernel/epapr_hcalls.S create mode 100644 arch/powerpc/kernel/epapr_paravirt.c diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h index 976835d8f22e..bf2c06c33871 100644 --- a/arch/powerpc/include/asm/epapr_hcalls.h +++ b/arch/powerpc/include/asm/epapr_hcalls.h @@ -153,6 +153,8 @@ #define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5" #define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4" +extern bool epapr_paravirt_enabled; +extern u32 epapr_hypercall_start[]; /* * We use "uintptr_t" to define a register because it's guaranteed to be a diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 83afacd3ba7b..bb282dd81612 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -128,6 +128,7 @@ ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),) obj-y += ppc_save_regs.o endif +obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o # Disable GCOV in odd or sensitive code diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S new file mode 100644 index 000000000000..697b390ebfd8 --- /dev/null +++ b/arch/powerpc/kernel/epapr_hcalls.S @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2012 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Hypercall entry point. Will be patched with device tree instructions. */ +.global epapr_hypercall_start +epapr_hypercall_start: + li r3, -1 + nop + nop + nop + blr diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c new file mode 100644 index 000000000000..028aeae370b6 --- /dev/null +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -0,0 +1,52 @@ +/* + * ePAPR para-virtualization support. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright (C) 2012 Freescale Semiconductor, Inc. + */ + +#include +#include +#include +#include + +bool epapr_paravirt_enabled; + +static int __init epapr_paravirt_init(void) +{ + struct device_node *hyper_node; + const u32 *insts; + int len, i; + + hyper_node = of_find_node_by_path("/hypervisor"); + if (!hyper_node) + return -ENODEV; + + insts = of_get_property(hyper_node, "hcall-instructions", &len); + if (!insts) + return -ENODEV; + + if (len % 4 || len > (4 * 4)) + return -ENODEV; + + for (i = 0; i < (len / 4); i++) + patch_instruction(epapr_hypercall_start + i, insts[i]); + + epapr_paravirt_enabled = true; + + return 0; +} + +early_initcall(epapr_paravirt_init); diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 62bdf2389669..1c13307e0308 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -31,6 +31,7 @@ #include #include #include +#include #define KVM_MAGIC_PAGE (-4096L) #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) @@ -726,7 +727,7 @@ unsigned long kvm_hypercall(unsigned long *in, unsigned long register r11 asm("r11") = nr; unsigned long register r12 asm("r12"); - asm volatile("bl kvm_hypercall_start" + asm volatile("bl epapr_hypercall_start" : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), "=r"(r12) @@ -747,29 +748,6 @@ unsigned long kvm_hypercall(unsigned long *in, } EXPORT_SYMBOL_GPL(kvm_hypercall); -static int kvm_para_setup(void) -{ - extern u32 kvm_hypercall_start; - struct device_node *hyper_node; - u32 *insts; - int len, i; - - hyper_node = of_find_node_by_path("/hypervisor"); - if (!hyper_node) - return -1; - - insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len); - if (len % 4) - return -1; - if (len > (4 * 4)) - return -1; - - for (i = 0; i < (len / 4); i++) - kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]); - - return 0; -} - static __init void kvm_free_tmp(void) { unsigned long start, end; @@ -791,7 +769,7 @@ static int __init kvm_guest_init(void) if (!kvm_para_available()) goto free_tmp; - if (kvm_para_setup()) + if (!epapr_paravirt_enabled) goto free_tmp; if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE)) diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S index e291cf3cf954..62ceb2ac82cf 100644 --- a/arch/powerpc/kernel/kvm_emul.S +++ b/arch/powerpc/kernel/kvm_emul.S @@ -24,16 +24,6 @@ #include #include -/* Hypercall entry point. Will be patched with device tree instructions. */ - -.global kvm_hypercall_start -kvm_hypercall_start: - li r3, -1 - nop - nop - nop - blr - #define KVM_MAGIC_PAGE (-4096) #ifdef CONFIG_64BIT diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index a35ca44ade66..e7a896acd982 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -25,6 +25,7 @@ source "arch/powerpc/platforms/wsp/Kconfig" config KVM_GUEST bool "KVM Guest support" default n + select EPAPR_PARAVIRT ---help--- This option enables various optimizations for running under the KVM hypervisor. Overhead for the kernel when not running inside KVM should @@ -32,6 +33,14 @@ config KVM_GUEST In case of doubt, say Y +config EPAPR_PARAVIRT + bool "ePAPR para-virtualization support" + default n + help + Enables ePAPR para-virtualization support for guests. + + In case of doubt, say Y + config PPC_NATIVE bool depends on 6xx || PPC64 From 32fad281c0680ed0ccade7dda85a2121cf9b1d06 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 4 May 2012 02:32:53 +0000 Subject: [PATCH 02/93] KVM: PPC: Book3S HV: Make the guest hash table size configurable This adds a new ioctl to enable userspace to control the size of the guest hashed page table (HPT) and to clear it out when resetting the guest. The KVM_PPC_ALLOCATE_HTAB ioctl is a VM ioctl and takes as its parameter a pointer to a u32 containing the desired order of the HPT (log base 2 of the size in bytes), which is updated on successful return to the actual order of the HPT which was allocated. There must be no vcpus running at the time of this ioctl. To enforce this, we now keep a count of the number of vcpus running in kvm->arch.vcpus_running. If the ioctl is called when a HPT has already been allocated, we don't reallocate the HPT but just clear it out. We first clear the kvm->arch.rma_setup_done flag, which has two effects: (a) since we hold the kvm->lock mutex, it will prevent any vcpus from starting to run until we're done, and (b) it means that the first vcpu to run after we're done will re-establish the VRMA if necessary. If userspace doesn't call this ioctl before running the first vcpu, the kernel will allocate a default-sized HPT at that point. We do it then rather than when creating the VM, as the code did previously, so that userspace has a chance to do the ioctl if it wants. When allocating the HPT, we can allocate either from the kernel page allocator, or from the preallocated pool. If userspace is asking for a different size from the preallocated HPTs, we first try to allocate using the kernel page allocator. Then we try to allocate from the preallocated pool, and then if that fails, we try allocating decreasing sizes from the kernel page allocator, down to the minimum size allowed (256kB). Note that the kernel page allocator limits allocations to 1 << CONFIG_FORCE_MAX_ZONEORDER pages, which by default corresponds to 16MB (on 64-bit powerpc, at least). Signed-off-by: Paul Mackerras [agraf: fix module compilation] Signed-off-by: Alexander Graf --- Documentation/virtual/kvm/api.txt | 36 +++++++ arch/powerpc/include/asm/kvm_book3s_64.h | 7 +- arch/powerpc/include/asm/kvm_host.h | 4 + arch/powerpc/include/asm/kvm_ppc.h | 3 +- arch/powerpc/kvm/book3s_64_mmu_hv.c | 123 +++++++++++++++++------ arch/powerpc/kvm/book3s_hv.c | 40 +++++--- arch/powerpc/kvm/book3s_hv_builtin.c | 5 +- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 15 +-- arch/powerpc/kvm/powerpc.c | 18 ++++ include/linux/kvm.h | 3 + 10 files changed, 200 insertions(+), 54 deletions(-) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 930126698a0f..310fe508d9cd 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1930,6 +1930,42 @@ The "pte_enc" field provides a value that can OR'ed into the hash PTE's RPN field (ie, it needs to be shifted left by 12 to OR it into the hash PTE second double word). + +4.75 KVM_PPC_ALLOCATE_HTAB + +Capability: KVM_CAP_PPC_ALLOC_HTAB +Architectures: powerpc +Type: vm ioctl +Parameters: Pointer to u32 containing hash table order (in/out) +Returns: 0 on success, -1 on error + +This requests the host kernel to allocate an MMU hash table for a +guest using the PAPR paravirtualization interface. This only does +anything if the kernel is configured to use the Book 3S HV style of +virtualization. Otherwise the capability doesn't exist and the ioctl +returns an ENOTTY error. The rest of this description assumes Book 3S +HV. + +There must be no vcpus running when this ioctl is called; if there +are, it will do nothing and return an EBUSY error. + +The parameter is a pointer to a 32-bit unsigned integer variable +containing the order (log base 2) of the desired size of the hash +table, which must be between 18 and 46. On successful return from the +ioctl, it will have been updated with the order of the hash table that +was allocated. + +If no hash table has been allocated when any vcpu is asked to run +(with the KVM_RUN ioctl), the host kernel will allocate a +default-sized hash table (16 MB). + +If this ioctl is called when a hash table has already been allocated, +the kernel will clear out the existing hash table (zero all HPTEs) and +return the hash table order in the parameter. (If the guest is using +the virtualized real-mode area (VRMA) facility, the kernel will +re-create the VMRA HPTEs on the next KVM_RUN of any vcpu.) + + 5. The kvm_run structure ------------------------ diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index b0c08b142770..0dd1d86d3e31 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -36,11 +36,8 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) #define SPAPR_TCE_SHIFT 12 #ifdef CONFIG_KVM_BOOK3S_64_HV -/* For now use fixed-size 16MB page table */ -#define HPT_ORDER 24 -#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */ -#define HPT_NPTE (HPT_NPTEG << 3) /* 8 PTEs per PTEG */ -#define HPT_HASH_MASK (HPT_NPTEG - 1) +#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ +extern int kvm_hpt_order; /* order of preallocated HPTs */ #endif #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d848cdc49715..dd783beb88b3 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -237,6 +237,10 @@ struct kvm_arch { unsigned long vrma_slb_v; int rma_setup_done; int using_mmu_notifiers; + u32 hpt_order; + atomic_t vcpus_running; + unsigned long hpt_npte; + unsigned long hpt_mask; spinlock_t slot_phys_lock; unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; int slot_npages[KVM_MEM_SLOTS_NUM]; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index f68c22fa2fce..0124937a23b9 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -119,7 +119,8 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); -extern long kvmppc_alloc_hpt(struct kvm *kvm); +extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp); +extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp); extern void kvmppc_free_hpt(struct kvm *kvm); extern long kvmppc_prepare_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 80a577517584..d03eb6f7b058 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -37,56 +37,121 @@ /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ #define MAX_LPID_970 63 -long kvmppc_alloc_hpt(struct kvm *kvm) +/* Power architecture requires HPT is at least 256kB */ +#define PPC_MIN_HPT_ORDER 18 + +long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) { unsigned long hpt; - long lpid; struct revmap_entry *rev; struct kvmppc_linear_info *li; + long order = kvm_hpt_order; - /* Allocate guest's hashed page table */ - li = kvm_alloc_hpt(); - if (li) { - /* using preallocated memory */ - hpt = (ulong)li->base_virt; - kvm->arch.hpt_li = li; - } else { - /* using dynamic memory */ + if (htab_orderp) { + order = *htab_orderp; + if (order < PPC_MIN_HPT_ORDER) + order = PPC_MIN_HPT_ORDER; + } + + /* + * If the user wants a different size from default, + * try first to allocate it from the kernel page allocator. + */ + hpt = 0; + if (order != kvm_hpt_order) { hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| - __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT); + __GFP_NOWARN, order - PAGE_SHIFT); + if (!hpt) + --order; } + /* Next try to allocate from the preallocated pool */ if (!hpt) { - pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n"); - return -ENOMEM; + li = kvm_alloc_hpt(); + if (li) { + hpt = (ulong)li->base_virt; + kvm->arch.hpt_li = li; + order = kvm_hpt_order; + } } + + /* Lastly try successively smaller sizes from the page allocator */ + while (!hpt && order > PPC_MIN_HPT_ORDER) { + hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| + __GFP_NOWARN, order - PAGE_SHIFT); + if (!hpt) + --order; + } + + if (!hpt) + return -ENOMEM; + kvm->arch.hpt_virt = hpt; + kvm->arch.hpt_order = order; + /* HPTEs are 2**4 bytes long */ + kvm->arch.hpt_npte = 1ul << (order - 4); + /* 128 (2**7) bytes in each HPTEG */ + kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; /* Allocate reverse map array */ - rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE); + rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); if (!rev) { pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); goto out_freehpt; } kvm->arch.revmap = rev; + kvm->arch.sdr1 = __pa(hpt) | (order - 18); - lpid = kvmppc_alloc_lpid(); - if (lpid < 0) - goto out_freeboth; + pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", + hpt, order, kvm->arch.lpid); - kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); - kvm->arch.lpid = lpid; - - pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid); + if (htab_orderp) + *htab_orderp = order; return 0; - out_freeboth: - vfree(rev); out_freehpt: - free_pages(hpt, HPT_ORDER - PAGE_SHIFT); + if (kvm->arch.hpt_li) + kvm_release_hpt(kvm->arch.hpt_li); + else + free_pages(hpt, order - PAGE_SHIFT); return -ENOMEM; } +long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) +{ + long err = -EBUSY; + long order; + + mutex_lock(&kvm->lock); + if (kvm->arch.rma_setup_done) { + kvm->arch.rma_setup_done = 0; + /* order rma_setup_done vs. vcpus_running */ + smp_mb(); + if (atomic_read(&kvm->arch.vcpus_running)) { + kvm->arch.rma_setup_done = 1; + goto out; + } + } + if (kvm->arch.hpt_virt) { + order = kvm->arch.hpt_order; + /* Set the entire HPT to 0, i.e. invalid HPTEs */ + memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); + /* + * Set the whole last_vcpu array to an invalid vcpu number. + * This ensures that each vcpu will flush its TLB on next entry. + */ + memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu)); + *htab_orderp = order; + err = 0; + } else { + err = kvmppc_alloc_hpt(kvm, htab_orderp); + order = *htab_orderp; + } + out: + mutex_unlock(&kvm->lock); + return err; +} + void kvmppc_free_hpt(struct kvm *kvm) { kvmppc_free_lpid(kvm->arch.lpid); @@ -94,7 +159,8 @@ void kvmppc_free_hpt(struct kvm *kvm) if (kvm->arch.hpt_li) kvm_release_hpt(kvm->arch.hpt_li); else - free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); + free_pages(kvm->arch.hpt_virt, + kvm->arch.hpt_order - PAGE_SHIFT); } /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ @@ -119,6 +185,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, unsigned long psize; unsigned long hp0, hp1; long ret; + struct kvm *kvm = vcpu->kvm; psize = 1ul << porder; npages = memslot->npages >> (porder - PAGE_SHIFT); @@ -127,8 +194,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, if (npages > 1ul << (40 - porder)) npages = 1ul << (40 - porder); /* Can't use more than 1 HPTE per HPTEG */ - if (npages > HPT_NPTEG) - npages = HPT_NPTEG; + if (npages > kvm->arch.hpt_mask + 1) + npages = kvm->arch.hpt_mask + 1; hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); @@ -138,7 +205,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, for (i = 0; i < npages; ++i) { addr = i << porder; /* can't use hpt_hash since va > 64 bits */ - hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK; + hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; /* * We assume that the hash table is empty and no * vcpus are using it at this stage. Since we create diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index c6af1d623839..d084e412b3c5 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -56,7 +56,7 @@ /* #define EXIT_DEBUG_INT */ static void kvmppc_end_cede(struct kvm_vcpu *vcpu); -static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu); +static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { @@ -1068,11 +1068,15 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) return -EINTR; } - /* On the first time here, set up VRMA or RMA */ + atomic_inc(&vcpu->kvm->arch.vcpus_running); + /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */ + smp_mb(); + + /* On the first time here, set up HTAB and VRMA or RMA */ if (!vcpu->kvm->arch.rma_setup_done) { - r = kvmppc_hv_setup_rma(vcpu); + r = kvmppc_hv_setup_htab_rma(vcpu); if (r) - return r; + goto out; } flush_fp_to_thread(current); @@ -1090,6 +1094,9 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) kvmppc_core_prepare_to_enter(vcpu); } } while (r == RESUME_GUEST); + + out: + atomic_dec(&vcpu->kvm->arch.vcpus_running); return r; } @@ -1305,7 +1312,7 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, { } -static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) +static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) { int err = 0; struct kvm *kvm = vcpu->kvm; @@ -1324,6 +1331,15 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) if (kvm->arch.rma_setup_done) goto out; /* another vcpu beat us to it */ + /* Allocate hashed page table (if not done already) and reset it */ + if (!kvm->arch.hpt_virt) { + err = kvmppc_alloc_hpt(kvm, NULL); + if (err) { + pr_err("KVM: Couldn't alloc HPT\n"); + goto out; + } + } + /* Look up the memslot for guest physical address 0 */ memslot = gfn_to_memslot(kvm, 0); @@ -1435,13 +1451,14 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu) int kvmppc_core_init_vm(struct kvm *kvm) { - long r; - unsigned long lpcr; + unsigned long lpcr, lpid; - /* Allocate hashed page table */ - r = kvmppc_alloc_hpt(kvm); - if (r) - return r; + /* Allocate the guest's logical partition ID */ + + lpid = kvmppc_alloc_lpid(); + if (lpid < 0) + return -ENOMEM; + kvm->arch.lpid = lpid; INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); @@ -1451,7 +1468,6 @@ int kvmppc_core_init_vm(struct kvm *kvm) if (cpu_has_feature(CPU_FTR_ARCH_201)) { /* PPC970; HID4 is effectively the LPCR */ - unsigned long lpid = kvm->arch.lpid; kvm->arch.host_lpid = 0; kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index e1b60f56f2a1..fb4eac290fef 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -25,6 +25,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type); static struct kvmppc_linear_info *kvm_alloc_linear(int type); static void kvm_release_linear(struct kvmppc_linear_info *ri); +int kvm_hpt_order = KVM_DEFAULT_HPT_ORDER; +EXPORT_SYMBOL_GPL(kvm_hpt_order); + /*************** RMA *************/ /* @@ -209,7 +212,7 @@ static void kvm_release_linear(struct kvmppc_linear_info *ri) void __init kvm_linear_init(void) { /* HPT */ - kvm_linear_init_one(1 << HPT_ORDER, kvm_hpt_count, KVM_LINEAR_HPT); + kvm_linear_init_one(1 << kvm_hpt_order, kvm_hpt_count, KVM_LINEAR_HPT); /* RMA */ /* Only do this on PPC970 in HV mode */ diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index cec4daddbf31..5c70d19494f9 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -237,7 +237,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, /* Find and lock the HPTEG slot to use */ do_insert: - if (pte_index >= HPT_NPTE) + if (pte_index >= kvm->arch.hpt_npte) return H_PARAMETER; if (likely((flags & H_EXACT) == 0)) { pte_index &= ~7UL; @@ -352,7 +352,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long v, r, rb; struct revmap_entry *rev; - if (pte_index >= HPT_NPTE) + if (pte_index >= kvm->arch.hpt_npte) return H_PARAMETER; hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) @@ -419,7 +419,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) i = 4; break; } - if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) { + if (req != 1 || flags == 3 || + pte_index >= kvm->arch.hpt_npte) { /* parameter error */ args[j] = ((0xa0 | flags) << 56) + pte_index; ret = H_PARAMETER; @@ -521,7 +522,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, struct revmap_entry *rev; unsigned long v, r, rb, mask, bits; - if (pte_index >= HPT_NPTE) + if (pte_index >= kvm->arch.hpt_npte) return H_PARAMETER; hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); @@ -583,7 +584,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, int i, n = 1; struct revmap_entry *rev = NULL; - if (pte_index >= HPT_NPTE) + if (pte_index >= kvm->arch.hpt_npte) return H_PARAMETER; if (flags & H_READ_4) { pte_index &= ~3; @@ -678,7 +679,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, somask = (1UL << 28) - 1; vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; } - hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK; + hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; avpn = slb_v & ~(somask >> 16); /* also includes B */ avpn |= (eaddr & somask) >> 16; @@ -723,7 +724,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, if (val & HPTE_V_SECONDARY) break; val |= HPTE_V_SECONDARY; - hash = hash ^ HPT_HASH_MASK; + hash = hash ^ kvm->arch.hpt_mask; } return -1; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 1493c8de947b..87f4dc886076 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -246,6 +246,7 @@ int kvm_dev_ioctl_check_extension(long ext) #endif #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: + case KVM_CAP_PPC_ALLOC_HTAB: r = 1; break; #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -802,6 +803,23 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; break; } + + case KVM_PPC_ALLOCATE_HTAB: { + struct kvm *kvm = filp->private_data; + u32 htab_order; + + r = -EFAULT; + if (get_user(htab_order, (u32 __user *)argp)) + break; + r = kvmppc_alloc_reset_hpt(kvm, &htab_order); + if (r) + break; + r = -EFAULT; + if (put_user(htab_order, (u32 __user *)argp)) + break; + r = 0; + break; + } #endif /* CONFIG_KVM_BOOK3S_64_HV */ #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 09f2b3aa2da7..2ce09aa7d3b3 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -617,6 +617,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_SIGNAL_MSI 77 #define KVM_CAP_PPC_GET_SMMU_INFO 78 #define KVM_CAP_S390_COW 79 +#define KVM_CAP_PPC_ALLOC_HTAB 80 #ifdef KVM_CAP_IRQ_ROUTING @@ -828,6 +829,8 @@ struct kvm_s390_ucas_mapping { #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi) /* Available with KVM_CAP_PPC_GET_SMMU_INFO */ #define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info) +/* Available with KVM_CAP_PPC_ALLOC_HTAB */ +#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32) /* * ioctls for vcpu fds From 21bd000abff7d587229dbbee6f8c17f3aad9f9d8 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Sun, 20 May 2012 23:21:23 +0000 Subject: [PATCH 03/93] KVM: PPC: booke: Added DECAR support Added the decrementer auto-reload support. DECAR is readable on e500v2/e500mc and later cpus. Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_host.h | 2 ++ arch/powerpc/kvm/booke.c | 5 +++++ arch/powerpc/kvm/booke_emulate.c | 3 +++ arch/powerpc/kvm/e500_emulate.c | 3 +++ 4 files changed, 13 insertions(+) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index dd783beb88b3..50ea12fd7bf5 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -418,7 +418,9 @@ struct kvm_vcpu_arch { ulong mcsrr1; ulong mcsr; u32 dec; +#ifdef CONFIG_BOOKE u32 decar; +#endif u32 tbl; u32 tbu; u32 tcr; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 72f13f4a06e0..86681eec60b1 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1267,6 +1267,11 @@ void kvmppc_decrementer_func(unsigned long data) { struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; + if (vcpu->arch.tcr & TCR_ARE) { + vcpu->arch.dec = vcpu->arch.decar; + kvmppc_emulate_dec(vcpu); + } + kvmppc_set_tsr_bits(vcpu, TSR_DIS); } diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 6c76397f2af4..9eb9809eb13e 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -129,6 +129,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) kvmppc_set_tcr(vcpu, spr_val); break; + case SPRN_DECAR: + vcpu->arch.decar = spr_val; + break; /* * Note: SPRG4-7 are user-readable. * These values are loaded into the real SPRGs when resuming the diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 8b99e076dc81..e04b0ef55ce0 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -269,6 +269,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) *spr_val = vcpu->arch.shared->mas7_3 >> 32; break; #endif + case SPRN_DECAR: + *spr_val = vcpu->arch.decar; + break; case SPRN_TLB0CFG: *spr_val = vcpu->arch.tlbcfg[0]; break; From d35b1075afc0172caa44b57b56a2b9b57e06d32e Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Sun, 20 May 2012 23:21:53 +0000 Subject: [PATCH 04/93] KVM: PPC: Not optimizing MSR_CE and MSR_ME with paravirt. If there is pending critical or machine check interrupt then guest would like to capture it when guest enable MSR.CE and MSR_ME respectively. Also as mostly MSR_CE and MSR_ME are updated with rfi/rfci/rfmii which anyway traps so removing the the paravirt optimization for MSR.CE and MSR.ME. Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- Documentation/virtual/kvm/ppc-pv.txt | 2 -- arch/powerpc/kernel/kvm_emul.S | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/Documentation/virtual/kvm/ppc-pv.txt b/Documentation/virtual/kvm/ppc-pv.txt index 6e7c37050930..4911cf95c67e 100644 --- a/Documentation/virtual/kvm/ppc-pv.txt +++ b/Documentation/virtual/kvm/ppc-pv.txt @@ -109,8 +109,6 @@ The following bits are safe to be set inside the guest: MSR_EE MSR_RI - MSR_CR - MSR_ME If any other bit changes in the MSR, please still use mtmsr(d). diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S index 62ceb2ac82cf..e100ff324a85 100644 --- a/arch/powerpc/kernel/kvm_emul.S +++ b/arch/powerpc/kernel/kvm_emul.S @@ -122,7 +122,7 @@ kvm_emulate_mtmsrd_len: .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 -#define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI) +#define MSR_SAFE_BITS (MSR_EE | MSR_RI) #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS .global kvm_emulate_mtmsr From 92eca8faad2d1b136c939bc122842dcdabd6ff46 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Sun, 20 May 2012 13:13:28 +0900 Subject: [PATCH 05/93] KVM: Separate out dirty_bitmap allocation code as kvm_kvzalloc() Will be used for lpage_info allocation later. Signed-off-by: Takuya Yoshikawa Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7e140683ff14..1148c96a4817 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -516,16 +516,32 @@ out_err_nodisable: return ERR_PTR(r); } +/* + * Avoid using vmalloc for a small buffer. + * Should not be used when the size is statically known. + */ +static void *kvm_kvzalloc(unsigned long size) +{ + if (size > PAGE_SIZE) + return vzalloc(size); + else + return kzalloc(size, GFP_KERNEL); +} + +static void kvm_kvfree(const void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) { if (!memslot->dirty_bitmap) return; - if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE) - vfree(memslot->dirty_bitmap); - else - kfree(memslot->dirty_bitmap); - + kvm_kvfree(memslot->dirty_bitmap); memslot->dirty_bitmap = NULL; } @@ -617,11 +633,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) #ifndef CONFIG_S390 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); - if (dirty_bytes > PAGE_SIZE) - memslot->dirty_bitmap = vzalloc(dirty_bytes); - else - memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL); - + memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); if (!memslot->dirty_bitmap) return -ENOMEM; From c1a7b32a14138f908df52d7c53b5ce3415ec6b50 Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Sun, 20 May 2012 13:15:07 +0900 Subject: [PATCH 06/93] KVM: Avoid wasting pages for small lpage_info arrays lpage_info is created for each large level even when the memory slot is not for RAM. This means that when we add one slot for a PCI device, we end up allocating at least KVM_NR_PAGE_SIZES - 1 pages by vmalloc(). To make things worse, there is an increasing number of devices which would result in more pages being wasted this way. This patch mitigates this problem by using kvm_kvzalloc(). Signed-off-by: Takuya Yoshikawa Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 4 ++-- include/linux/kvm_host.h | 3 +++ virt/kvm/kvm_main.c | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index be6d54929fa7..f12a52408cda 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6304,7 +6304,7 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free, for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) { - vfree(free->arch.lpage_info[i]); + kvm_kvfree(free->arch.lpage_info[i]); free->arch.lpage_info[i] = NULL; } } @@ -6323,7 +6323,7 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) slot->base_gfn, level) + 1; slot->arch.lpage_info[i] = - vzalloc(lpages * sizeof(*slot->arch.lpage_info[i])); + kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i])); if (!slot->arch.lpage_info[i]) goto out_free; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c4464356b35b..19b83f6efa49 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -535,6 +535,9 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); void kvm_free_physmem(struct kvm *kvm); +void *kvm_kvzalloc(unsigned long size); +void kvm_kvfree(const void *addr); + #ifndef __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) { diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1148c96a4817..02cb440f802d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -520,7 +520,7 @@ out_err_nodisable: * Avoid using vmalloc for a small buffer. * Should not be used when the size is statically known. */ -static void *kvm_kvzalloc(unsigned long size) +void *kvm_kvzalloc(unsigned long size) { if (size > PAGE_SIZE) return vzalloc(size); @@ -528,7 +528,7 @@ static void *kvm_kvzalloc(unsigned long size) return kzalloc(size, GFP_KERNEL); } -static void kvm_kvfree(const void *addr) +void kvm_kvfree(const void *addr) { if (is_vmalloc_addr(addr)) vfree(addr); From aaf07bc291c828189ae5221b370905a89bbb859d Mon Sep 17 00:00:00 2001 From: Xudong Hao Date: Mon, 28 May 2012 19:33:34 +0800 Subject: [PATCH 07/93] KVM: VMX: Add EPT A/D bits definitions Signed-off-by: Haitao Shan Signed-off-by: Xudong Hao Signed-off-by: Avi Kivity --- arch/x86/include/asm/vmx.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 31f180c21ce9..de007c272730 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -404,6 +404,7 @@ enum vmcs_field { #define VMX_EPTP_WB_BIT (1ull << 14) #define VMX_EPT_2MB_PAGE_BIT (1ull << 16) #define VMX_EPT_1GB_PAGE_BIT (1ull << 17) +#define VMX_EPT_AD_BIT (1ull << 21) #define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) #define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) #define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) @@ -415,11 +416,14 @@ enum vmcs_field { #define VMX_EPT_MAX_GAW 0x4 #define VMX_EPT_MT_EPTE_SHIFT 3 #define VMX_EPT_GAW_EPTP_SHIFT 3 +#define VMX_EPT_AD_ENABLE_BIT (1ull << 6) #define VMX_EPT_DEFAULT_MT 0x6ull #define VMX_EPT_READABLE_MASK 0x1ull #define VMX_EPT_WRITABLE_MASK 0x2ull #define VMX_EPT_EXECUTABLE_MASK 0x4ull #define VMX_EPT_IPAT_BIT (1ull << 6) +#define VMX_EPT_ACCESS_BIT (1ull << 8) +#define VMX_EPT_DIRTY_BIT (1ull << 9) #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul From 83c3a3312235220476d3c207f67bd17be6e17ff9 Mon Sep 17 00:00:00 2001 From: Xudong Hao Date: Mon, 28 May 2012 19:33:35 +0800 Subject: [PATCH 08/93] KVM: VMX: Add parameter to control A/D bits support, default is on Add kernel parameter to control A/D bits support, it's on by default. Signed-off-by: Haitao Shan Signed-off-by: Xudong Hao Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 32eb58866292..18590e003bd2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -71,6 +71,9 @@ static bool __read_mostly enable_unrestricted_guest = 1; module_param_named(unrestricted_guest, enable_unrestricted_guest, bool, S_IRUGO); +static bool __read_mostly enable_ept_ad_bits = 1; +module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); + static bool __read_mostly emulate_invalid_guest_state = 0; module_param(emulate_invalid_guest_state, bool, S_IRUGO); @@ -789,6 +792,11 @@ static inline bool cpu_has_vmx_ept_4levels(void) return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; } +static inline bool cpu_has_vmx_ept_ad_bits(void) +{ + return vmx_capability.ept & VMX_EPT_AD_BIT; +} + static inline bool cpu_has_vmx_invept_individual_addr(void) { return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT; @@ -2645,8 +2653,12 @@ static __init int hardware_setup(void) !cpu_has_vmx_ept_4levels()) { enable_ept = 0; enable_unrestricted_guest = 0; + enable_ept_ad_bits = 0; } + if (!cpu_has_vmx_ept_ad_bits()) + enable_ept_ad_bits = 0; + if (!cpu_has_vmx_unrestricted_guest()) enable_unrestricted_guest = 0; From b38f99347871d7fc49e6367395dce0d757f6ab8d Mon Sep 17 00:00:00 2001 From: Xudong Hao Date: Mon, 28 May 2012 19:33:36 +0800 Subject: [PATCH 09/93] KVM: VMX: Enable EPT A/D bits if supported by turning on relevant bit in EPTP In EPT page structure entry, Enable EPT A/D bits if processor supported. Signed-off-by: Haitao Shan Signed-off-by: Xudong Hao Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 18590e003bd2..d392e5427ca0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3039,6 +3039,8 @@ static u64 construct_eptp(unsigned long root_hpa) /* TODO write the value reading from MSR */ eptp = VMX_EPT_DEFAULT_MT | VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; + if (enable_ept_ad_bits) + eptp |= VMX_EPT_AD_ENABLE_BIT; eptp |= (root_hpa & PAGE_MASK); return eptp; From 3f6d8c8a478dd1ab2a4944b0d65474df06ecd882 Mon Sep 17 00:00:00 2001 From: Xudong Hao Date: Tue, 22 May 2012 11:23:15 +0800 Subject: [PATCH 10/93] KVM: VMX: Use EPT Access bit in response to memory notifiers Signed-off-by: Haitao Shan Signed-off-by: Xudong Hao Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 14 ++++++++------ arch/x86/kvm/vmx.c | 6 ++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index be3cea4407ff..d07e436b7a42 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1242,7 +1242,8 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, int young = 0; /* - * Emulate the accessed bit for EPT, by checking if this page has + * In case of absence of EPT Access and Dirty Bits supports, + * emulate the accessed bit for EPT, by checking if this page has * an EPT mapping, and clearing it if it does. On the next access, * a new EPT mapping will be established. * This has some overhead, but not as much as the cost of swapping @@ -1253,11 +1254,12 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, for (sptep = rmap_get_first(*rmapp, &iter); sptep; sptep = rmap_get_next(&iter)) { - BUG_ON(!(*sptep & PT_PRESENT_MASK)); + BUG_ON(!is_shadow_present_pte(*sptep)); - if (*sptep & PT_ACCESSED_MASK) { + if (*sptep & shadow_accessed_mask) { young = 1; - clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)sptep); + clear_bit((ffs(shadow_accessed_mask) - 1), + (unsigned long *)sptep); } } @@ -1281,9 +1283,9 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, for (sptep = rmap_get_first(*rmapp, &iter); sptep; sptep = rmap_get_next(&iter)) { - BUG_ON(!(*sptep & PT_PRESENT_MASK)); + BUG_ON(!is_shadow_present_pte(*sptep)); - if (*sptep & PT_ACCESSED_MASK) { + if (*sptep & shadow_accessed_mask) { young = 1; break; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d392e5427ca0..396148ab089b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7289,8 +7289,10 @@ static int __init vmx_init(void) vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); if (enable_ept) { - kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, - VMX_EPT_EXECUTABLE_MASK); + kvm_mmu_set_mask_ptes(0ull, + (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull, + (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull, + 0ull, VMX_EPT_EXECUTABLE_MASK); ept_set_mmio_spte_mask(); kvm_enable_tdp(); } else From a6bb7929677aacfce3f864c3cdacaa7d527945d5 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 17 May 2012 13:14:08 +0300 Subject: [PATCH 11/93] KVM: ia64: Mark ia64 KVM as BROKEN Practically all patches to ia64 KVM are build fixes; numerous warnings remain; the last patch from the maintainer was committed more than three years ago. It is clear that no one is using this thing. Mark as BROKEN to ensure people don't get hit by pointless build problems. Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/ia64/kvm/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index 9806e55f91be..df5351e3eed7 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig @@ -19,6 +19,7 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support" + depends on BROKEN depends on HAVE_KVM && MODULES && EXPERIMENTAL # for device assignment: depends on PCI From 1952639665e92481c34c34c3e2a71bf3e66ba362 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 4 Jun 2012 14:53:23 +0300 Subject: [PATCH 12/93] KVM: MMU: do not iterate over all VMs in mmu_shrink() mmu_shrink() needlessly iterates over all VMs even though it will not attempt to free mmu pages from more than one on them. Fix that and also check used mmu pages count outside of VM lock to skip inactive VMs faster. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d07e436b7a42..1ca7164a74f1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3944,7 +3944,6 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) { struct kvm *kvm; - struct kvm *kvm_freed = NULL; int nr_to_scan = sc->nr_to_scan; if (nr_to_scan == 0) @@ -3956,22 +3955,30 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) int idx; LIST_HEAD(invalid_list); + /* + * n_used_mmu_pages is accessed without holding kvm->mmu_lock + * here. We may skip a VM instance errorneosly, but we do not + * want to shrink a VM that only started to populate its MMU + * anyway. + */ + if (kvm->arch.n_used_mmu_pages > 0) { + if (!nr_to_scan--) + break; + continue; + } + idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); - if (!kvm_freed && nr_to_scan > 0 && - kvm->arch.n_used_mmu_pages > 0) { - kvm_mmu_remove_some_alloc_mmu_pages(kvm, - &invalid_list); - kvm_freed = kvm; - } - nr_to_scan--; + kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list); + spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); + + list_move_tail(&kvm->vm_list, &vm_list); + break; } - if (kvm_freed) - list_move_tail(&kvm_freed->vm_list, &vm_list); raw_spin_unlock(&kvm_lock); From b246dd5df139501b974bd6b28f7815e53b3a792f Mon Sep 17 00:00:00 2001 From: Orit Wasserman Date: Thu, 31 May 2012 14:49:22 +0300 Subject: [PATCH 13/93] KVM: VMX: Fix KVM_SET_SREGS with big real mode segments For example migration between Westmere and Nehelem hosts, caught in big real mode. The code that fixes the segments for real mode guest was moved from enter_rmode to vmx_set_segments. enter_rmode calls vmx_set_segments for each segment. Signed-off-by: Orit Wasserman Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 70 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 58 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 396148ab089b..f78662ec8677 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -618,6 +618,10 @@ static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); +static void vmx_set_segment(struct kvm_vcpu *vcpu, + struct kvm_segment *var, int seg); +static void vmx_get_segment(struct kvm_vcpu *vcpu, + struct kvm_segment *var, int seg); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -2782,6 +2786,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); + struct kvm_segment var; if (enable_unrestricted_guest) return; @@ -2825,20 +2830,23 @@ static void enter_rmode(struct kvm_vcpu *vcpu) if (emulate_invalid_guest_state) goto continue_rmode; - vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); - vmcs_write32(GUEST_SS_LIMIT, 0xffff); - vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); + vmx_get_segment(vcpu, &var, VCPU_SREG_SS); + vmx_set_segment(vcpu, &var, VCPU_SREG_SS); - vmcs_write32(GUEST_CS_AR_BYTES, 0xf3); - vmcs_write32(GUEST_CS_LIMIT, 0xffff); - if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000) - vmcs_writel(GUEST_CS_BASE, 0xf0000); - vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); + vmx_get_segment(vcpu, &var, VCPU_SREG_CS); + vmx_set_segment(vcpu, &var, VCPU_SREG_CS); - fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es); - fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds); - fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs); - fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs); + vmx_get_segment(vcpu, &var, VCPU_SREG_ES); + vmx_set_segment(vcpu, &var, VCPU_SREG_ES); + + vmx_get_segment(vcpu, &var, VCPU_SREG_DS); + vmx_set_segment(vcpu, &var, VCPU_SREG_DS); + + vmx_get_segment(vcpu, &var, VCPU_SREG_GS); + vmx_set_segment(vcpu, &var, VCPU_SREG_GS); + + vmx_get_segment(vcpu, &var, VCPU_SREG_FS); + vmx_set_segment(vcpu, &var, VCPU_SREG_FS); continue_rmode: kvm_mmu_reset_context(vcpu); @@ -3243,6 +3251,44 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, vmcs_write32(sf->ar_bytes, ar); __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); + + /* + * Fix segments for real mode guest in hosts that don't have + * "unrestricted_mode" or it was disabled. + * This is done to allow migration of the guests from hosts with + * unrestricted guest like Westmere to older host that don't have + * unrestricted guest like Nehelem. + */ + if (!enable_unrestricted_guest && vmx->rmode.vm86_active) { + switch (seg) { + case VCPU_SREG_CS: + vmcs_write32(GUEST_CS_AR_BYTES, 0xf3); + vmcs_write32(GUEST_CS_LIMIT, 0xffff); + if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000) + vmcs_writel(GUEST_CS_BASE, 0xf0000); + vmcs_write16(GUEST_CS_SELECTOR, + vmcs_readl(GUEST_CS_BASE) >> 4); + break; + case VCPU_SREG_ES: + fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es); + break; + case VCPU_SREG_DS: + fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds); + break; + case VCPU_SREG_GS: + fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs); + break; + case VCPU_SREG_FS: + fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs); + break; + case VCPU_SREG_SS: + vmcs_write16(GUEST_SS_SELECTOR, + vmcs_readl(GUEST_SS_BASE) >> 4); + vmcs_write32(GUEST_SS_LIMIT, 0xffff); + vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); + break; + } + } } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) From 4ae57b6cfe67d12c9ba340cf172dc4e38a651ec5 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 5 Jun 2012 13:05:02 +0200 Subject: [PATCH 14/93] KVM: s390: Change maintainer Since Carsten is now working on a different project, Cornelia will work as the 2nd s390/kvm maintainer. Signed-off-by: Christian Borntraeger CC: Carsten Otte CC: Cornelia Huck Signed-off-by: Avi Kivity --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 6a52bb4a4fc7..4023d0b4e9db 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3987,8 +3987,8 @@ F: arch/ia64/include/asm/kvm* F: arch/ia64/kvm/ KERNEL VIRTUAL MACHINE for s390 (KVM/s390) -M: Carsten Otte M: Christian Borntraeger +M: Cornelia Huck M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ From a737f256bf14adf94920aa70d150ab4dcd145109 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 3 Jun 2012 21:17:48 +0300 Subject: [PATCH 15/93] KVM: Cleanup the kvm_print functions and introduce pr_XX wrappers Introduces a couple of print functions, which are essentially wrappers around standard printk functions, with a KVM: prefix. Functions introduced or modified are: - kvm_err(fmt, ...) - kvm_info(fmt, ...) - kvm_debug(fmt, ...) - kvm_pr_unimpl(fmt, ...) - pr_unimpl(vcpu, fmt, ...) -> vcpu_unimpl(vcpu, fmt, ...) Signed-off-by: Christoffer Dall Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 6 ++--- arch/x86/kvm/vmx.c | 2 +- arch/x86/kvm/x86.c | 54 ++++++++++++++++++++-------------------- include/linux/kvm_host.h | 18 +++++++++----- 4 files changed, 43 insertions(+), 37 deletions(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f75af406b268..7a418783259d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3185,8 +3185,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) break; case MSR_IA32_DEBUGCTLMSR: if (!boot_cpu_has(X86_FEATURE_LBRV)) { - pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", - __func__, data); + vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", + __func__, data); break; } if (data & DEBUGCTL_RESERVED_BITS) @@ -3205,7 +3205,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) case MSR_VM_CR: return svm_set_vm_cr(vcpu, data); case MSR_VM_IGNNE: - pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); + vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; default: return kvm_set_msr_common(vcpu, ecx, data); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f78662ec8677..eeeb4a25aed6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4549,7 +4549,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) break; } vcpu->run->exit_reason = 0; - pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", + vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", (int)(exit_qualification >> 4) & 3, cr); return 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f12a52408cda..a01a4241bc6b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1437,8 +1437,8 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) break; } default: - pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " - "data 0x%llx\n", msr, data); + vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " + "data 0x%llx\n", msr, data); return 1; } return 0; @@ -1470,8 +1470,8 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: - pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " - "data 0x%llx\n", msr, data); + vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " + "data 0x%llx\n", msr, data); return 1; } @@ -1551,15 +1551,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x8; /* ignore TLB cache disable */ if (data != 0) { - pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", - data); + vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", + data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { - pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " - "0x%llx\n", data); + vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " + "0x%llx\n", data); return 1; } break; @@ -1574,8 +1574,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) thus reserved and should throw a #GP */ return 1; } - pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", - __func__, data); + vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", + __func__, data); break; case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: @@ -1671,8 +1671,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: if (data != 0) - pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " - "0x%x data 0x%llx\n", msr, data); + vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " + "0x%x data 0x%llx\n", msr, data); break; /* at least RHEL 4 unconditionally writes to the perfctr registers, * so we ignore writes to make it happy. @@ -1681,8 +1681,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: - pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " - "0x%x data 0x%llx\n", msr, data); + vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " + "0x%x data 0x%llx\n", msr, data); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: @@ -1693,8 +1693,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) return kvm_pmu_set_msr(vcpu, msr, data); if (pr || data != 0) - pr_unimpl(vcpu, "disabled perfctr wrmsr: " - "0x%x data 0x%llx\n", msr, data); + vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " + "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* @@ -1720,7 +1720,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. */ - pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); + vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) @@ -1738,12 +1738,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr, data); if (!ignore_msrs) { - pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", - msr, data); + vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", + msr, data); return 1; } else { - pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", - msr, data); + vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", + msr, data); break; } } @@ -1846,7 +1846,7 @@ static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) data = kvm->arch.hv_hypercall; break; default: - pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); + vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } @@ -1877,7 +1877,7 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) data = vcpu->arch.hv_vapic; break; default: - pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); + vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; @@ -2030,10 +2030,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); if (!ignore_msrs) { - pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); + vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); return 1; } else { - pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); + vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); data = 0; } break; @@ -4116,7 +4116,7 @@ static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) value = kvm_get_cr8(vcpu); break; default: - vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); + kvm_err("%s: unexpected cr %u\n", __func__, cr); return 0; } @@ -4145,7 +4145,7 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) res = kvm_set_cr8(vcpu, val); break; default: - vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); + kvm_err("%s: unexpected cr %u\n", __func__, cr); res = -1; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 19b83f6efa49..27ac8a4767fa 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -314,13 +314,19 @@ struct kvm { long tlbs_dirty; }; -/* The guest did something we don't support. */ -#define pr_unimpl(vcpu, fmt, ...) \ - pr_err_ratelimited("kvm: %i: cpu%i " fmt, \ - current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__) +#define kvm_err(fmt, ...) \ + pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) +#define kvm_info(fmt, ...) \ + pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) +#define kvm_debug(fmt, ...) \ + pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) +#define kvm_pr_unimpl(fmt, ...) \ + pr_err_ratelimited("kvm [%i]: " fmt, \ + task_tgid_nr(current), ## __VA_ARGS__) -#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) -#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) +/* The guest did something we don't support. */ +#define vcpu_unimpl(vcpu, fmt, ...) \ + kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { From 79f702a6d18c75760c68202007265b2310d6f44e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 3 Jun 2012 11:34:08 +0300 Subject: [PATCH 16/93] KVM: disable uninitialized var warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I see this in 3.5-rc1: arch/x86/kvm/mmu.c: In function ‘kvm_test_age_rmapp’: arch/x86/kvm/mmu.c:1271: warning: ‘iter.desc’ may be used uninitialized in this function The line in question was introduced by commit 1e3f42f03c38c29c1814199a6f0a2f01b919ea3f static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long data) { - u64 *spte; + u64 *sptep; + struct rmap_iterator iter; <- line 1271 int young = 0; /* The reason I think is that the compiler assumes that the rmap value could be 0, so static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter) { if (!rmap) return NULL; if (!(rmap & 1)) { iter->desc = NULL; return (u64 *)rmap; } iter->desc = (struct pte_list_desc *)(rmap & ~1ul); iter->pos = 0; return iter->desc->sptes[iter->pos]; } will not initialize iter.desc, but the compiler isn't smart enough to see that for (sptep = rmap_get_first(*rmapp, &iter); sptep; sptep = rmap_get_next(&iter)) { will immediately exit in this case. I checked by adding if (!*rmapp) goto out; on top which is clearly equivalent but disables the warning. This patch uses uninitialized_var to disable the warning without increasing code size. Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1ca7164a74f1..24dd43d45ae4 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1238,7 +1238,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long data) { u64 *sptep; - struct rmap_iterator iter; + struct rmap_iterator uninitialized_var(iter); int young = 0; /* From 80feb89a0a1381642f1cce9036ef3bb22f13b40a Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 29 May 2012 23:54:26 +0900 Subject: [PATCH 17/93] KVM: MMU: Remove unused parameter from mmu_memory_cache_alloc() Size is not needed to return one from pre-allocated objects. Signed-off-by: Takuya Yoshikawa Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/mmu.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 24dd43d45ae4..b32a11dc884c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -652,8 +652,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) mmu_page_header_cache); } -static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, - size_t size) +static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) { void *p; @@ -664,8 +663,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) { - return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache, - sizeof(struct pte_list_desc)); + return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); } static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) @@ -1403,12 +1401,10 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte, int direct) { struct kvm_mmu_page *sp; - sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, - sizeof *sp); - sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); + sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); + sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); if (!direct) - sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, - PAGE_SIZE); + sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); set_page_private(virt_to_page(sp->spt), (unsigned long)sp); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM); From 00763e41138267a307531397f9745835aecb8c7b Mon Sep 17 00:00:00 2001 From: Xudong Hao Date: Thu, 7 Jun 2012 18:26:07 +0800 Subject: [PATCH 18/93] KVM: x86: change PT_FIRST_AVAIL_BITS_SHIFT to avoid conflict with EPT Dirty bit EPT Dirty bit use bit 9 as Intel SDM definition, to avoid conflict, change PT_FIRST_AVAIL_BITS_SHIFT to 10. Signed-off-by: Xudong Hao Signed-off-by: Xiantao Zhang Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b32a11dc884c..3b53d9e08bfc 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -90,7 +90,7 @@ module_param(dbg, bool, 0644); #define PTE_PREFETCH_NUM 8 -#define PT_FIRST_AVAIL_BITS_SHIFT 9 +#define PT_FIRST_AVAIL_BITS_SHIFT 10 #define PT64_SECOND_AVAIL_BITS_SHIFT 52 #define PT64_LEVEL_BITS 9 From 61bde82caee95426bf1ad53fefc8dc691b8ba37c Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 11 Jun 2012 16:06:57 +0200 Subject: [PATCH 19/93] KVM: s390: Set CPU in stopped state on initial cpu reset The initial cpu reset sets the cpu in the stopped state. Several places check for the cpu state (e.g. sigp set prefix) and not setting the STOPPED state triggered errors with newer guest kernels after reboot. Signed-off-by: Christian Borntraeger Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/kvm-s390.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 664766d0c83c..ace93603d861 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -347,6 +347,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) vcpu->arch.guest_fpregs.fpc = 0; asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); vcpu->arch.sie_block->gbea = 1; + atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) From cd1834591fe9564720ac4b0193bf1c790fe89f0d Mon Sep 17 00:00:00 2001 From: Heinz Graalfs Date: Mon, 11 Jun 2012 16:06:59 +0200 Subject: [PATCH 20/93] KVM: s390: Perform early event mask processing during boot For processing under KVM it is required to detect the actual SCLP console type in order to set it as preferred console. Signed-off-by: Heinz Graalfs Acked-by: Heiko Carstens Acked-by: Peter Oberparleiter Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- arch/s390/include/asm/sclp.h | 2 ++ arch/s390/kernel/setup.c | 12 ++++++++--- drivers/s390/char/sclp.c | 10 --------- drivers/s390/char/sclp.h | 10 +++++++++ drivers/s390/char/sclp_cmd.c | 38 +++++++++++++++++++++++++++++++++++ drivers/s390/kvm/kvm_virtio.c | 3 ++- 6 files changed, 61 insertions(+), 14 deletions(-) diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index bf238c55740b..173f07aaeb2b 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -55,5 +55,7 @@ int sclp_chp_configure(struct chp_id chpid); int sclp_chp_deconfigure(struct chp_id chpid); int sclp_chp_read_info(struct sclp_chp_info *info); void sclp_get_ipl_info(struct sclp_ipl_info *info); +bool sclp_has_linemode(void); +bool sclp_has_vt220(void); #endif /* _ASM_S390_SCLP_H */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 489d1d8d96b0..e86bca6f975c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "entry.h" long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | @@ -138,9 +139,14 @@ __setup("condev=", condev_setup); static void __init set_preferred_console(void) { - if (MACHINE_IS_KVM) - add_preferred_console("hvc", 0, NULL); - else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) + if (MACHINE_IS_KVM) { + if (sclp_has_vt220()) + add_preferred_console("ttyS", 1, NULL); + else if (sclp_has_linemode()) + add_preferred_console("ttyS", 0, NULL); + else + add_preferred_console("hvc", 0, NULL); + } else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) add_preferred_console("ttyS", 0, NULL); else if (CONSOLE_IS_3270) add_preferred_console("tty3270", 0, NULL); diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 30f29a0020a1..3fcc000efc53 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -654,16 +654,6 @@ sclp_remove_processed(struct sccb_header *sccb) EXPORT_SYMBOL(sclp_remove_processed); -struct init_sccb { - struct sccb_header header; - u16 _reserved; - u16 mask_length; - sccb_mask_t receive_mask; - sccb_mask_t send_mask; - sccb_mask_t sclp_receive_mask; - sccb_mask_t sclp_send_mask; -} __attribute__((packed)); - /* Prepare init mask request. Called while sclp_lock is locked. */ static inline void __sclp_make_init_req(u32 receive_mask, u32 send_mask) diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 49a1bb52bc87..d7e97ae9ef6d 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -88,6 +88,16 @@ struct sccb_header { u16 response_code; } __attribute__((packed)); +struct init_sccb { + struct sccb_header header; + u16 _reserved; + u16 mask_length; + sccb_mask_t receive_mask; + sccb_mask_t send_mask; + sccb_mask_t sclp_receive_mask; + sccb_mask_t sclp_send_mask; +} __attribute__((packed)); + extern u64 sclp_facilities; #define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) #define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 766cb7b19b40..71ea923c322d 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -48,6 +48,7 @@ struct read_info_sccb { u8 _reserved5[4096 - 112]; /* 112-4095 */ } __attribute__((packed, aligned(PAGE_SIZE))); +static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE); static struct read_info_sccb __initdata early_read_info_sccb; static int __initdata early_read_info_sccb_valid; @@ -104,6 +105,19 @@ static void __init sclp_read_info_early(void) } } +static void __init sclp_event_mask_early(void) +{ + struct init_sccb *sccb = &early_event_mask_sccb; + int rc; + + do { + memset(sccb, 0, sizeof(*sccb)); + sccb->header.length = sizeof(*sccb); + sccb->mask_length = sizeof(sccb_mask_t); + rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); + } while (rc == -EBUSY); +} + void __init sclp_facilities_detect(void) { struct read_info_sccb *sccb; @@ -119,6 +133,30 @@ void __init sclp_facilities_detect(void) rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; rzm <<= 20; + + sclp_event_mask_early(); +} + +bool __init sclp_has_linemode(void) +{ + struct init_sccb *sccb = &early_event_mask_sccb; + + if (sccb->header.response_code != 0x20) + return 0; + if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)) + return 1; + return 0; +} + +bool __init sclp_has_vt220(void) +{ + struct init_sccb *sccb = &early_event_mask_sccb; + + if (sccb->header.response_code != 0x20) + return 0; + if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK) + return 1; + return 0; } unsigned long long sclp_get_rnmax(void) diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index d74e9ae6dfb3..d3bdae49512f 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -468,7 +469,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) static int __init s390_virtio_console_init(void) { - if (!MACHINE_IS_KVM) + if (sclp_has_vt220() || sclp_has_linemode()) return -ENODEV; return virtio_cons_early_init(early_put_chars); } From dcce0489477f07ac1331aee71f18d6274e19a9c1 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Mon, 11 Jun 2012 18:39:50 +0200 Subject: [PATCH 21/93] KVM: trace events: update list of exit reasons The list of exit reasons for the kvm_userspace_exit event was missing recent additions; bring it into sync again. Reviewed-by: Christian Borntraeger Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- include/trace/events/kvm.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 46e3cd8e197a..3df5925fe641 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -13,7 +13,8 @@ ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\ - ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI) + ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \ + ERSN(S390_UCONTROL) TRACE_EVENT(kvm_userspace_exit, TP_PROTO(__u32 reason, int errno), From 9900b4b48b095895cf962054e45aafa49ef70f74 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 15 Jun 2012 15:07:02 -0400 Subject: [PATCH 22/93] KVM: use KVM_CAP_IRQ_ROUTING to protect the routing related code The KVM code sometimes uses CONFIG_HAVE_KVM_IRQCHIP to protect code that is related to IRQ routing, which not all in-kernel irqchips may support. Use KVM_CAP_IRQ_ROUTING instead. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Avi Kivity --- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 27ac8a4767fa..c7f77876c9b3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -802,7 +802,7 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se } #endif -#ifdef CONFIG_HAVE_KVM_IRQCHIP +#ifdef KVM_CAP_IRQ_ROUTING #define KVM_MAX_IRQ_ROUTES 1024 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 02cb440f802d..636bd08bb399 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2225,7 +2225,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg) case KVM_CAP_SIGNAL_MSI: #endif return 1; -#ifdef CONFIG_HAVE_KVM_IRQCHIP +#ifdef KVM_CAP_IRQ_ROUTING case KVM_CAP_IRQ_ROUTING: return KVM_MAX_IRQ_ROUTES; #endif From a1e4ccb990447df0fe83d164d9a7bc2e6c4b7db7 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 15 Jun 2012 15:07:13 -0400 Subject: [PATCH 23/93] KVM: Introduce __KVM_HAVE_IRQ_LINE This is a preparatory patch for the KVM/ARM implementation. KVM/ARM will use the KVM_IRQ_LINE ioctl, which is currently conditional on __KVM_HAVE_IOAPIC, but ARM obviously doesn't have any IOAPIC support and we need a separate define. Signed-off-by: Christoffer Dall Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm.h | 1 + arch/x86/include/asm/kvm.h | 1 + include/trace/events/kvm.h | 4 +++- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index b9f82c84f093..ec6c6b301238 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h @@ -26,6 +26,7 @@ /* Select x86 specific features in */ #define __KVM_HAVE_IOAPIC +#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_DEVICE_ASSIGNMENT /* Architectural interrupt line count. */ diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index e7d1c194d272..246617efd67f 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -12,6 +12,7 @@ /* Select x86 specific features in */ #define __KVM_HAVE_PIT #define __KVM_HAVE_IOAPIC +#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_DEVICE_ASSIGNMENT #define __KVM_HAVE_MSI #define __KVM_HAVE_USER_NMI diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 3df5925fe641..7ef9e759f499 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -37,7 +37,7 @@ TRACE_EVENT(kvm_userspace_exit, __entry->errno < 0 ? -__entry->errno : __entry->reason) ); -#if defined(__KVM_HAVE_IOAPIC) +#if defined(__KVM_HAVE_IRQ_LINE) TRACE_EVENT(kvm_set_irq, TP_PROTO(unsigned int gsi, int level, int irq_source_id), TP_ARGS(gsi, level, irq_source_id), @@ -57,7 +57,9 @@ TRACE_EVENT(kvm_set_irq, TP_printk("gsi %u level %d source %d", __entry->gsi, __entry->level, __entry->irq_source_id) ); +#endif +#if defined(__KVM_HAVE_IOAPIC) #define kvm_deliver_mode \ {0x0, "Fixed"}, \ {0x1, "LowPrio"}, \ From 9e40b67bf2bfaa40b28354c501a72fd001a1397a Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Tue, 19 Jun 2012 22:04:56 +0900 Subject: [PATCH 24/93] KVM: Use kvm_kvfree() to free memory allocated by kvm_kvzalloc() The following commit did not care about the error handling path: commit c1a7b32a14138f908df52d7c53b5ce3415ec6b50 KVM: Avoid wasting pages for small lpage_info arrays If memory allocation fails, vfree() will be called with the address returned by kzalloc(). This patch fixes this issue. Signed-off-by: Takuya Yoshikawa Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a01a4241bc6b..6ed5983f78ff 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6350,7 +6350,7 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) out_free: for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) { - vfree(slot->arch.lpage_info[i]); + kvm_kvfree(slot->arch.lpage_info[i]); slot->arch.lpage_info[i] = NULL; } return -ENOMEM; From 5eadf916dfa04c3c51397dbcb803ce8735bf191a Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 24 Jun 2012 19:24:19 +0300 Subject: [PATCH 25/93] KVM: document lapic regs field Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/kvm/lapic.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 6f4ce2575d09..d29da25ea525 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -13,6 +13,11 @@ struct kvm_lapic { u32 divide_count; struct kvm_vcpu *vcpu; bool irr_pending; + /** + * APIC register page. The layout matches the register layout seen by + * the guest 1:1, because it is accessed by the vmx microcode. + * Note: Only one register, the TPR, is used by the microcode. + */ void *regs; gpa_t vapic_addr; struct page *vapic_page; From 8680b94b0e6046af2644c17313287ec0cb5843dc Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 24 Jun 2012 19:24:26 +0300 Subject: [PATCH 26/93] KVM: optimize ISR lookups We perform ISR lookups twice: during interrupt injection and on EOI. Typical workloads only have a single bit set there. So we can avoid ISR scans by 1. counting bits as we set/clear them in ISR 2. on set, caching the injected vector number 3. on clear, invalidating the cache The real purpose of this is enabling PV EOI which needs to quickly validate the vector. But non PV guests also benefit: with this patch, and without interrupt nesting, apic_find_highest_isr will always return immediately without scanning ISR. Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/kvm/lapic.c | 53 ++++++++++++++++++++++++++++++++++++++++++-- arch/x86/kvm/lapic.h | 4 ++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 93c15743f1ee..805d887784f6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -107,6 +107,16 @@ static inline void apic_clear_vector(int vec, void *bitmap) clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); } +static inline int __apic_test_and_set_vector(int vec, void *bitmap) +{ + return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); +} + +static inline int __apic_test_and_clear_vector(int vec, void *bitmap) +{ + return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); +} + static inline int apic_hw_enabled(struct kvm_lapic *apic) { return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE; @@ -210,6 +220,16 @@ static int find_highest_vector(void *bitmap) return fls(word[word_offset << 2]) - 1 + (word_offset << 5); } +static u8 count_vectors(void *bitmap) +{ + u32 *word = bitmap; + int word_offset; + u8 count = 0; + for (word_offset = 0; word_offset < MAX_APIC_VECTOR >> 5; ++word_offset) + count += hweight32(word[word_offset << 2]); + return count; +} + static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) { apic->irr_pending = true; @@ -242,6 +262,27 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) apic->irr_pending = true; } +static inline void apic_set_isr(int vec, struct kvm_lapic *apic) +{ + if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) + ++apic->isr_count; + BUG_ON(apic->isr_count > MAX_APIC_VECTOR); + /* + * ISR (in service register) bit is set when injecting an interrupt. + * The highest vector is injected. Thus the latest bit set matches + * the highest bit in ISR. + */ + apic->highest_isr_cache = vec; +} + +static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) +{ + if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) + --apic->isr_count; + BUG_ON(apic->isr_count < 0); + apic->highest_isr_cache = -1; +} + int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; @@ -273,6 +314,10 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) static inline int apic_find_highest_isr(struct kvm_lapic *apic) { int result; + if (!apic->isr_count) + return -1; + if (likely(apic->highest_isr_cache != -1)) + return apic->highest_isr_cache; result = find_highest_vector(apic->regs + APIC_ISR); ASSERT(result == -1 || result >= 16); @@ -492,7 +537,7 @@ static void apic_set_eoi(struct kvm_lapic *apic) if (vector == -1) return; - apic_clear_vector(vector, apic->regs + APIC_ISR); + apic_clear_isr(vector, apic); apic_update_ppr(apic); if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && @@ -1081,6 +1126,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); } apic->irr_pending = false; + apic->isr_count = 0; + apic->highest_isr_cache = -1; update_divide_count(apic); atomic_set(&apic->lapic_timer.pending, 0); if (kvm_vcpu_is_bsp(vcpu)) @@ -1248,7 +1295,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) if (vector == -1) return -1; - apic_set_vector(vector, apic->regs + APIC_ISR); + apic_set_isr(vector, apic); apic_update_ppr(apic); apic_clear_irr(vector, apic); return vector; @@ -1267,6 +1314,8 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) update_divide_count(apic); start_apic_timer(apic); apic->irr_pending = true; + apic->isr_count = count_vectors(apic->regs + APIC_ISR); + apic->highest_isr_cache = -1; kvm_make_request(KVM_REQ_EVENT, vcpu); } diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index d29da25ea525..5ac9e5e2fedd 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -13,6 +13,10 @@ struct kvm_lapic { u32 divide_count; struct kvm_vcpu *vcpu; bool irr_pending; + /* Number of bits set in ISR. */ + s16 isr_count; + /* The highest vector set in ISR; if -1 - invalid, must scan ISR. */ + int highest_isr_cache; /** * APIC register page. The layout matches the register layout seen by * the guest 1:1, because it is accessed by the vmx microcode. From ab9cf4996bb989983e73da894b8dd0239aa2c3c2 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 24 Jun 2012 19:24:34 +0300 Subject: [PATCH 27/93] KVM guest: guest side for eoi avoidance The idea is simple: there's a bit, per APIC, in guest memory, that tells the guest that it does not need EOI. Guest tests it using a single est and clear operation - this is necessary so that host can detect interrupt nesting - and if set, it can skip the EOI MSR. I run a simple microbenchmark to show exit reduction (note: for testing, need to apply follow-up patch 'kvm: host side for eoi optimization' + a qemu patch I posted separately, on host): Before: Performance counter stats for 'sleep 1s': 47,357 kvm:kvm_entry [99.98%] 0 kvm:kvm_hypercall [99.98%] 0 kvm:kvm_hv_hypercall [99.98%] 5,001 kvm:kvm_pio [99.98%] 0 kvm:kvm_cpuid [99.98%] 22,124 kvm:kvm_apic [99.98%] 49,849 kvm:kvm_exit [99.98%] 21,115 kvm:kvm_inj_virq [99.98%] 0 kvm:kvm_inj_exception [99.98%] 0 kvm:kvm_page_fault [99.98%] 22,937 kvm:kvm_msr [99.98%] 0 kvm:kvm_cr [99.98%] 0 kvm:kvm_pic_set_irq [99.98%] 0 kvm:kvm_apic_ipi [99.98%] 22,207 kvm:kvm_apic_accept_irq [99.98%] 22,421 kvm:kvm_eoi [99.98%] 0 kvm:kvm_pv_eoi [99.99%] 0 kvm:kvm_nested_vmrun [99.99%] 0 kvm:kvm_nested_intercepts [99.99%] 0 kvm:kvm_nested_vmexit [99.99%] 0 kvm:kvm_nested_vmexit_inject [99.99%] 0 kvm:kvm_nested_intr_vmexit [99.99%] 0 kvm:kvm_invlpga [99.99%] 0 kvm:kvm_skinit [99.99%] 57 kvm:kvm_emulate_insn [99.99%] 0 kvm:vcpu_match_mmio [99.99%] 0 kvm:kvm_userspace_exit [99.99%] 2 kvm:kvm_set_irq [99.99%] 2 kvm:kvm_ioapic_set_irq [99.99%] 23,609 kvm:kvm_msi_set_irq [99.99%] 1 kvm:kvm_ack_irq [99.99%] 131 kvm:kvm_mmio [99.99%] 226 kvm:kvm_fpu [100.00%] 0 kvm:kvm_age_page [100.00%] 0 kvm:kvm_try_async_get_page [100.00%] 0 kvm:kvm_async_pf_doublefault [100.00%] 0 kvm:kvm_async_pf_not_present [100.00%] 0 kvm:kvm_async_pf_ready [100.00%] 0 kvm:kvm_async_pf_completed 1.002100578 seconds time elapsed After: Performance counter stats for 'sleep 1s': 28,354 kvm:kvm_entry [99.98%] 0 kvm:kvm_hypercall [99.98%] 0 kvm:kvm_hv_hypercall [99.98%] 1,347 kvm:kvm_pio [99.98%] 0 kvm:kvm_cpuid [99.98%] 1,931 kvm:kvm_apic [99.98%] 29,595 kvm:kvm_exit [99.98%] 24,884 kvm:kvm_inj_virq [99.98%] 0 kvm:kvm_inj_exception [99.98%] 0 kvm:kvm_page_fault [99.98%] 1,986 kvm:kvm_msr [99.98%] 0 kvm:kvm_cr [99.98%] 0 kvm:kvm_pic_set_irq [99.98%] 0 kvm:kvm_apic_ipi [99.99%] 25,953 kvm:kvm_apic_accept_irq [99.99%] 26,132 kvm:kvm_eoi [99.99%] 26,593 kvm:kvm_pv_eoi [99.99%] 0 kvm:kvm_nested_vmrun [99.99%] 0 kvm:kvm_nested_intercepts [99.99%] 0 kvm:kvm_nested_vmexit [99.99%] 0 kvm:kvm_nested_vmexit_inject [99.99%] 0 kvm:kvm_nested_intr_vmexit [99.99%] 0 kvm:kvm_invlpga [99.99%] 0 kvm:kvm_skinit [99.99%] 284 kvm:kvm_emulate_insn [99.99%] 68 kvm:vcpu_match_mmio [99.99%] 68 kvm:kvm_userspace_exit [99.99%] 2 kvm:kvm_set_irq [99.99%] 2 kvm:kvm_ioapic_set_irq [99.99%] 28,288 kvm:kvm_msi_set_irq [99.99%] 1 kvm:kvm_ack_irq [99.99%] 131 kvm:kvm_mmio [100.00%] 588 kvm:kvm_fpu [100.00%] 0 kvm:kvm_age_page [100.00%] 0 kvm:kvm_try_async_get_page [100.00%] 0 kvm:kvm_async_pf_doublefault [100.00%] 0 kvm:kvm_async_pf_not_present [100.00%] 0 kvm:kvm_async_pf_ready [100.00%] 0 kvm:kvm_async_pf_completed 1.002039622 seconds time elapsed We see that # of exits is almost halved. Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_para.h | 7 ++++ arch/x86/kernel/kvm.c | 57 +++++++++++++++++++++++++++++++-- 2 files changed, 61 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 63ab1661d00e..2f7712e08b1e 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -22,6 +22,7 @@ #define KVM_FEATURE_CLOCKSOURCE2 3 #define KVM_FEATURE_ASYNC_PF 4 #define KVM_FEATURE_STEAL_TIME 5 +#define KVM_FEATURE_PV_EOI 6 /* The last 8 bits are used to indicate how to interpret the flags field * in pvclock structure. If no bits are set, all flags are ignored. @@ -37,6 +38,7 @@ #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01 #define MSR_KVM_ASYNC_PF_EN 0x4b564d02 #define MSR_KVM_STEAL_TIME 0x4b564d03 +#define MSR_KVM_PV_EOI_EN 0x4b564d04 struct kvm_steal_time { __u64 steal; @@ -89,6 +91,11 @@ struct kvm_vcpu_pv_apf_data { __u32 enabled; }; +#define KVM_PV_EOI_BIT 0 +#define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT) +#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK +#define KVM_PV_EOI_DISABLED 0x0 + #ifdef __KERNEL__ #include diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index e554e5ad2fe8..75ab94c75c7a 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -39,6 +39,8 @@ #include #include #include +#include +#include static int kvmapf = 1; @@ -283,6 +285,22 @@ static void kvm_register_steal_time(void) cpu, __pa(st)); } +static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; + +static void kvm_guest_apic_eoi_write(u32 reg, u32 val) +{ + /** + * This relies on __test_and_clear_bit to modify the memory + * in a way that is atomic with respect to the local CPU. + * The hypervisor only accesses this memory from the local CPU so + * there's no need for lock or memory barriers. + * An optimization barrier is implied in apic write. + */ + if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) + return; + apic->write(APIC_EOI, APIC_EOI_ACK); +} + void __cpuinit kvm_guest_cpu_init(void) { if (!kvm_para_available()) @@ -300,11 +318,20 @@ void __cpuinit kvm_guest_cpu_init(void) smp_processor_id()); } + if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { + unsigned long pa; + /* Size alignment is implied but just to make it explicit. */ + BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); + __get_cpu_var(kvm_apic_eoi) = 0; + pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED; + wrmsrl(MSR_KVM_PV_EOI_EN, pa); + } + if (has_steal_clock) kvm_register_steal_time(); } -static void kvm_pv_disable_apf(void *unused) +static void kvm_pv_disable_apf(void) { if (!__get_cpu_var(apf_reason).enabled) return; @@ -316,11 +343,23 @@ static void kvm_pv_disable_apf(void *unused) smp_processor_id()); } +static void kvm_pv_guest_cpu_reboot(void *unused) +{ + /* + * We disable PV EOI before we load a new kernel by kexec, + * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. + * New kernel can re-enable when it boots. + */ + if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) + wrmsrl(MSR_KVM_PV_EOI_EN, 0); + kvm_pv_disable_apf(); +} + static int kvm_pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) { if (code == SYS_RESTART) - on_each_cpu(kvm_pv_disable_apf, NULL, 1); + on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); return NOTIFY_DONE; } @@ -371,7 +410,9 @@ static void __cpuinit kvm_guest_cpu_online(void *dummy) static void kvm_guest_cpu_offline(void *dummy) { kvm_disable_steal_time(); - kvm_pv_disable_apf(NULL); + if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) + wrmsrl(MSR_KVM_PV_EOI_EN, 0); + kvm_pv_disable_apf(); apf_task_wake_all(); } @@ -424,6 +465,16 @@ void __init kvm_guest_init(void) pv_time_ops.steal_clock = kvm_steal_clock; } + if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { + struct apic **drv; + + for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { + /* Should happen once for each apic */ + WARN_ON((*drv)->eoi_write == kvm_guest_apic_eoi_write); + (*drv)->eoi_write = kvm_guest_apic_eoi_write; + } + } + #ifdef CONFIG_SMP smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; register_cpu_notifier(&kvm_cpu_notifier); From d0a69d6321ca759bb8d47803d06ba8571ab42d07 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 24 Jun 2012 19:24:42 +0300 Subject: [PATCH 28/93] x86, bitops: note on __test_and_clear_bit atomicity __test_and_clear_bit is actually atomic with respect to the local CPU. Add a note saying that KVM on x86 relies on this behaviour so people don't accidentaly break it. Also warn not to rely on this in portable code. Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/include/asm/bitops.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index a6983b277220..72f5009deb5a 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -264,6 +264,13 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) * This operation is non-atomic and can be reordered. * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. + * + * Note: the operation is performed atomically with respect to + * the local CPU, but not other CPUs. Portable code should not + * rely on this behaviour. + * KVM relies on this behaviour on x86 for modifying memory that is also + * accessed from a hypervisor on the same CPU if running in a VM: don't change + * this without also updating arch/x86/kernel/kvm.c */ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { From c1af87dc96cd0f8f17694d0cd9be01b80b2c7a6a Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 24 Jun 2012 19:24:49 +0300 Subject: [PATCH 29/93] KVM: eoi msi documentation Document the new EOI MSR. Couldn't decide whether this change belongs conceptually on guest or host side, so a separate patch. Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- Documentation/virtual/kvm/msr.txt | 33 +++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt index 96b41bd97523..730471048583 100644 --- a/Documentation/virtual/kvm/msr.txt +++ b/Documentation/virtual/kvm/msr.txt @@ -223,3 +223,36 @@ MSR_KVM_STEAL_TIME: 0x4b564d03 steal: the amount of time in which this vCPU did not run, in nanoseconds. Time during which the vcpu is idle, will not be reported as steal time. + +MSR_KVM_EOI_EN: 0x4b564d04 + data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0 + when disabled. Bit 1 is reserved and must be zero. When PV end of + interrupt is enabled (bit 0 set), bits 63-2 hold a 4-byte aligned + physical address of a 4 byte memory area which must be in guest RAM and + must be zeroed. + + The first, least significant bit of 4 byte memory location will be + written to by the hypervisor, typically at the time of interrupt + injection. Value of 1 means that guest can skip writing EOI to the apic + (using MSR or MMIO write); instead, it is sufficient to signal + EOI by clearing the bit in guest memory - this location will + later be polled by the hypervisor. + Value of 0 means that the EOI write is required. + + It is always safe for the guest to ignore the optimization and perform + the APIC EOI write anyway. + + Hypervisor is guaranteed to only modify this least + significant bit while in the current VCPU context, this means that + guest does not need to use either lock prefix or memory ordering + primitives to synchronise with the hypervisor. + + However, hypervisor can set and clear this memory bit at any time: + therefore to make sure hypervisor does not interrupt the + guest and clear the least significant bit in the memory area + in the window between guest testing it to detect + whether it can skip EOI apic write and between guest + clearing it to signal EOI to the hypervisor, + guest must both read the least significant bit in the memory area and + clear it using a single CPU instruction, such as test and clear, or + compare and exchange. From 5cfb1d5a65dd96d2d3a0751a1e4e81dc84c1f08f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 24 Jun 2012 19:24:54 +0300 Subject: [PATCH 30/93] KVM: only sync when attention bits set Commit eb0dc6d0368072236dcd086d7fdc17fd3c4574d4 introduced apic attention bitmask but kvm still syncs lapic unconditionally. As that commit suggested and in anticipation of adding more attention bits, only sync lapic if(apic_attention). Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6ed5983f78ff..c1f870690a64 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5388,7 +5388,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (unlikely(vcpu->arch.tsc_always_catchup)) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); - kvm_lapic_sync_from_vapic(vcpu); + if (vcpu->arch.apic_attention) + kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); out: From d905c0693514e6f713b207377b67c9972c5d7d49 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 24 Jun 2012 19:25:00 +0300 Subject: [PATCH 31/93] KVM: rearrange injection cancelling code Each time we need to cancel injection we invoke same code (cancel_injection callback). Move it towards the end of function using the familiar goto on error pattern. Will make it easier to do more cleanups for PV EOI. Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c1f870690a64..7ea0f611bc89 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5296,8 +5296,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) r = kvm_mmu_reload(vcpu); if (unlikely(r)) { - kvm_x86_ops->cancel_injection(vcpu); - goto out; + goto cancel_injection; } preempt_disable(); @@ -5322,9 +5321,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) smp_wmb(); local_irq_enable(); preempt_enable(); - kvm_x86_ops->cancel_injection(vcpu); r = 1; - goto out; + goto cancel_injection; } srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); @@ -5392,6 +5390,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); + return r; + +cancel_injection: + kvm_x86_ops->cancel_injection(vcpu); out: return r; } From ae7a2a3fb6f8b784c2752863f4f1f20c656f76fb Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 24 Jun 2012 19:25:07 +0300 Subject: [PATCH 32/93] KVM: host side for eoi optimization Implementation of PV EOI using shared memory. This reduces the number of exits an interrupt causes as much as by half. The idea is simple: there's a bit, per APIC, in guest memory, that tells the guest that it does not need EOI. We set it before injecting an interrupt and clear before injecting a nested one. Guest tests it using a test and clear operation - this is necessary so that host can detect interrupt nesting - and if set, it can skip the EOI MSR. There's a new MSR to set the address of said register in guest memory. Otherwise not much changed: - Guest EOI is not required - Register is tested & ISR is automatically cleared on exit For testing results see description of previous patch 'kvm_para: guest side for eoi avoidance'. Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 12 +++ arch/x86/kvm/cpuid.c | 1 + arch/x86/kvm/lapic.c | 141 +++++++++++++++++++++++++++++++- arch/x86/kvm/lapic.h | 2 + arch/x86/kvm/trace.h | 34 ++++++++ arch/x86/kvm/x86.c | 7 ++ 6 files changed, 193 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index db7c1f2709a2..24b76474d9de 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -175,6 +175,13 @@ enum { /* apic attention bits */ #define KVM_APIC_CHECK_VAPIC 0 +/* + * The following bit is set with PV-EOI, unset on EOI. + * We detect PV-EOI changes by guest by comparing + * this bit with PV-EOI in guest memory. + * See the implementation in apic_update_pv_eoi. + */ +#define KVM_APIC_PV_EOI_PENDING 1 /* * We don't want allocation failures within the mmu code, so we preallocate @@ -484,6 +491,11 @@ struct kvm_vcpu_arch { u64 length; u64 status; } osvw; + + struct { + u64 msr_val; + struct gfn_to_hva_cache data; + } pv_eoi; }; struct kvm_lpage_info { diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 7df1c6d839fb..61ccbdf3d0ac 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -409,6 +409,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, (1 << KVM_FEATURE_NOP_IO_DELAY) | (1 << KVM_FEATURE_CLOCKSOURCE2) | (1 << KVM_FEATURE_ASYNC_PF) | + (1 << KVM_FEATURE_PV_EOI) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); if (sched_info_on()) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 805d887784f6..ce878788a39f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -311,6 +311,54 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) irq->level, irq->trig_mode); } +static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) +{ + + return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, + sizeof(val)); +} + +static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) +{ + + return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, + sizeof(*val)); +} + +static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; +} + +static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) +{ + u8 val; + if (pv_eoi_get_user(vcpu, &val) < 0) + apic_debug("Can't read EOI MSR value: 0x%llx\n", + (unsigned long long)vcpi->arch.pv_eoi.msr_val); + return val & 0x1; +} + +static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) +{ + if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { + apic_debug("Can't set EOI MSR value: 0x%llx\n", + (unsigned long long)vcpi->arch.pv_eoi.msr_val); + return; + } + __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); +} + +static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) +{ + if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { + apic_debug("Can't clear EOI MSR value: 0x%llx\n", + (unsigned long long)vcpi->arch.pv_eoi.msr_val); + return; + } + __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); +} + static inline int apic_find_highest_isr(struct kvm_lapic *apic) { int result; @@ -527,15 +575,18 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; } -static void apic_set_eoi(struct kvm_lapic *apic) +static int apic_set_eoi(struct kvm_lapic *apic) { int vector = apic_find_highest_isr(apic); + + trace_kvm_eoi(apic, vector); + /* * Not every write EOI will has corresponding ISR, * one example is when Kernel check timer on setup_IO_APIC */ if (vector == -1) - return; + return vector; apic_clear_isr(vector, apic); apic_update_ppr(apic); @@ -550,6 +601,7 @@ static void apic_set_eoi(struct kvm_lapic *apic) kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); } kvm_make_request(KVM_REQ_EVENT, apic->vcpu); + return vector; } static void apic_send_ipi(struct kvm_lapic *apic) @@ -1132,6 +1184,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) atomic_set(&apic->lapic_timer.pending, 0); if (kvm_vcpu_is_bsp(vcpu)) vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP; + vcpu->arch.pv_eoi.msr_val = 0; apic_update_ppr(apic); vcpu->arch.apic_arb_prio = 0; @@ -1332,11 +1385,51 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } +/* + * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt + * + * Detect whether guest triggered PV EOI since the + * last entry. If yes, set EOI on guests's behalf. + * Clear PV EOI in guest memory in any case. + */ +static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, + struct kvm_lapic *apic) +{ + bool pending; + int vector; + /* + * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host + * and KVM_PV_EOI_ENABLED in guest memory as follows: + * + * KVM_APIC_PV_EOI_PENDING is unset: + * -> host disabled PV EOI. + * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set: + * -> host enabled PV EOI, guest did not execute EOI yet. + * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset: + * -> host enabled PV EOI, guest executed EOI. + */ + BUG_ON(!pv_eoi_enabled(vcpu)); + pending = pv_eoi_get_pending(vcpu); + /* + * Clear pending bit in any case: it will be set again on vmentry. + * While this might not be ideal from performance point of view, + * this makes sure pv eoi is only enabled when we know it's safe. + */ + pv_eoi_clr_pending(vcpu); + if (pending) + return; + vector = apic_set_eoi(apic); + trace_kvm_pv_eoi(apic, vector); +} + void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) { u32 data; void *vapic; + if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) + apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); + if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; @@ -1347,17 +1440,44 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) apic_set_tpr(vcpu->arch.apic, data & 0xff); } +/* + * apic_sync_pv_eoi_to_guest - called before vmentry + * + * Detect whether it's safe to enable PV EOI and + * if yes do so. + */ +static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, + struct kvm_lapic *apic) +{ + if (!pv_eoi_enabled(vcpu) || + /* IRR set or many bits in ISR: could be nested. */ + apic->irr_pending || + /* Cache not set: could be safe but we don't bother. */ + apic->highest_isr_cache == -1 || + /* Need EOI to update ioapic. */ + kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) { + /* + * PV EOI was disabled by apic_sync_pv_eoi_from_guest + * so we need not do anything here. + */ + return; + } + + pv_eoi_set_pending(apic->vcpu); +} + void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) { u32 data, tpr; int max_irr, max_isr; - struct kvm_lapic *apic; + struct kvm_lapic *apic = vcpu->arch.apic; void *vapic; + apic_sync_pv_eoi_to_guest(vcpu, apic); + if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; - apic = vcpu->arch.apic; tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff; max_irr = apic_find_highest_irr(apic); if (max_irr < 0) @@ -1443,3 +1563,16 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) return 0; } + +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) +{ + u64 addr = data & ~KVM_MSR_ENABLED; + if (!IS_ALIGNED(addr, 4)) + return 1; + + vcpu->arch.pv_eoi.msr_val = data; + if (!pv_eoi_enabled(vcpu)) + return 0; + return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, + addr); +} diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 5ac9e5e2fedd..4af5405ae1e2 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -69,4 +69,6 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) { return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE; } + +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); #endif diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 911d2641f14c..851914e207fc 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -517,6 +517,40 @@ TRACE_EVENT(kvm_apic_accept_irq, __entry->coalesced ? " (coalesced)" : "") ); +TRACE_EVENT(kvm_eoi, + TP_PROTO(struct kvm_lapic *apic, int vector), + TP_ARGS(apic, vector), + + TP_STRUCT__entry( + __field( __u32, apicid ) + __field( int, vector ) + ), + + TP_fast_assign( + __entry->apicid = apic->vcpu->vcpu_id; + __entry->vector = vector; + ), + + TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) +); + +TRACE_EVENT(kvm_pv_eoi, + TP_PROTO(struct kvm_lapic *apic, int vector), + TP_ARGS(apic, vector), + + TP_STRUCT__entry( + __field( __u32, apicid ) + __field( int, vector ) + ), + + TP_fast_assign( + __entry->apicid = apic->vcpu->vcpu_id; + __entry->vector = vector; + ), + + TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) +); + /* * Tracepoint for nested VMRUN */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7ea0f611bc89..8eacb2e64560 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -795,6 +795,7 @@ static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, + MSR_KVM_PV_EOI_EN, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 @@ -1653,6 +1654,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); break; + case MSR_KVM_PV_EOI_EN: + if (kvm_lapic_enable_pv_eoi(vcpu, data)) + return 1; + break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: @@ -5394,6 +5399,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) cancel_injection: kvm_x86_ops->cancel_injection(vcpu); + if (unlikely(vcpu->arch.apic_attention)) + kvm_lapic_sync_from_vapic(vcpu); out: return r; } From f9808b7fd422b965cea52e05ba470e0a473c53d3 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 1 Jul 2012 18:05:06 +0300 Subject: [PATCH 33/93] apic: fix kvm build on UP without IOAPIC On UP i386, when APIC is disabled # CONFIG_X86_UP_APIC is not set # CONFIG_PCI_IOAPIC is not set code looking at apicdrivers never has any effect but it still gets compiled in. In particular, this causes build failures with kvm, but it generally bloats the kernel unnecessarily. Fix by defining both __apicdrivers and __apicdrivers_end to be NULL when CONFIG_X86_LOCAL_APIC is unset: I verified that as the result any loop scanning __apicdrivers gets optimized out by the compiler. Warning: a .config with apic disabled doesn't seem to boot for me (even without this patch). Still verifying why, meanwhile this patch is compile-tested only. Signed-off-by: Michael S. Tsirkin Reported-by: Randy Dunlap Acked-by: Randy Dunlap Acked-by: H. Peter Anvin Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/apic.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index eaff4790ed96..aa5b2eec3602 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -417,7 +417,12 @@ extern struct apic *apic; __aligned(sizeof(struct apic *)) \ __section(.apicdrivers) = { &sym1, &sym2 } +#ifdef CONFIG_X86_LOCAL_APIC extern struct apic *__apicdrivers[], *__apicdrivers_end[]; +#else +#define __apicdrivers ((struct apic **)NULL) +#define __apicdrivers_end ((struct apic **)NULL) +#endif /* * APIC functionality to boot other CPUs - only used on SMP: From 2106a548122ef0557dc51eae4f3f1a538cebfa79 Mon Sep 17 00:00:00 2001 From: Guo Chao Date: Fri, 15 Jun 2012 11:31:56 +0800 Subject: [PATCH 34/93] KVM: VMX: code clean for vmx_init() Signed-off-by: Guo Chao Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/vmx.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index eeeb4a25aed6..e10ec0e4d1c6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7290,23 +7290,21 @@ static int __init vmx_init(void) if (!vmx_io_bitmap_a) return -ENOMEM; + r = -ENOMEM; + vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); - if (!vmx_io_bitmap_b) { - r = -ENOMEM; + if (!vmx_io_bitmap_b) goto out; - } vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL); - if (!vmx_msr_bitmap_legacy) { - r = -ENOMEM; + if (!vmx_msr_bitmap_legacy) goto out1; - } + vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL); - if (!vmx_msr_bitmap_longmode) { - r = -ENOMEM; + if (!vmx_msr_bitmap_longmode) goto out2; - } + /* * Allow direct access to the PC debug port (it is often used for I/O From 36c1ed821bd11fb9a3f99a060b1553c114dc2d07 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 15 Jun 2012 15:07:24 -0400 Subject: [PATCH 35/93] KVM: Guard mmu_notifier specific code with CONFIG_MMU_NOTIFIER In order to avoid compilation failure when KVM is not compiled in, guard the mmu_notifier specific sections with both CONFIG_MMU_NOTIFIER and KVM_ARCH_WANT_MMU_NOTIFIER, like it is being done in the rest of the KVM code. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Marcelo Tosatti --- include/linux/kvm_host.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c7f77876c9b3..e3c86f8c86c9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -306,7 +306,7 @@ struct kvm { struct hlist_head irq_ack_notifier_list; #endif -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) struct mmu_notifier mmu_notifier; unsigned long mmu_notifier_seq; long mmu_notifier_count; @@ -780,7 +780,7 @@ struct kvm_stats_debugfs_item { extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) { if (unlikely(vcpu->kvm->mmu_notifier_count)) From 9d04edd23e6ab368d25ca0adde6f1676185d49f8 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 26 Jun 2012 16:06:36 +0200 Subject: [PATCH 36/93] s390/smp: remove redundant check condition code "status stored" for sigp sense running always implies that only the "not running" status bit is set. Therefore no need to check if it is set. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- arch/s390/kernel/smp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 15cca26ccb6c..c78074c6cc1d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -158,8 +158,8 @@ static inline int pcpu_running(struct pcpu *pcpu) if (__pcpu_sigp(pcpu->address, sigp_sense_running, 0, &pcpu->status) != sigp_status_stored) return 1; - /* Check for running status */ - return !(pcpu->status & 0x400); + /* Status stored condition code is equivalent to cpu not running. */ + return 0; } /* From 9b747530d928800c9eb7f0c131dffd2cc868c475 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 26 Jun 2012 16:06:37 +0200 Subject: [PATCH 37/93] s390/smp/kvm: unifiy sigp definitions The smp and the kvm code have different defines for the sigp order codes. Let's just have a single place where these are defined. Also move the sigp condition code and sigp cpu status bits to the new sigp.h header file. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- arch/s390/include/asm/sigp.h | 31 ++++++++++++++++ arch/s390/kernel/smp.c | 72 ++++++++++++------------------------ arch/s390/kvm/sigp.c | 46 +++++------------------ 3 files changed, 64 insertions(+), 85 deletions(-) create mode 100644 arch/s390/include/asm/sigp.h diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h new file mode 100644 index 000000000000..7306270b5b84 --- /dev/null +++ b/arch/s390/include/asm/sigp.h @@ -0,0 +1,31 @@ +#ifndef __S390_ASM_SIGP_H +#define __S390_ASM_SIGP_H + +/* SIGP order codes */ +#define SIGP_SENSE 1 +#define SIGP_EXTERNAL_CALL 2 +#define SIGP_EMERGENCY_SIGNAL 3 +#define SIGP_STOP 5 +#define SIGP_RESTART 6 +#define SIGP_STOP_AND_STORE_STATUS 9 +#define SIGP_INITIAL_CPU_RESET 11 +#define SIGP_SET_PREFIX 13 +#define SIGP_STORE_STATUS_AT_ADDRESS 14 +#define SIGP_SET_ARCHITECTURE 18 +#define SIGP_SENSE_RUNNING 21 + +/* SIGP condition codes */ +#define SIGP_CC_ORDER_CODE_ACCEPTED 0 +#define SIGP_CC_STATUS_STORED 1 +#define SIGP_CC_BUSY 2 +#define SIGP_CC_NOT_OPERATIONAL 3 + +/* SIGP cpu status bits */ + +#define SIGP_STATUS_CHECK_STOP 0x00000010UL +#define SIGP_STATUS_STOPPED 0x00000040UL +#define SIGP_STATUS_INVALID_PARAMETER 0x00000100UL +#define SIGP_STATUS_INCORRECT_STATE 0x00000200UL +#define SIGP_STATUS_NOT_RUNNING 0x00000400UL + +#endif /* __S390_ASM_SIGP_H */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c78074c6cc1d..6e4047e4b498 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -44,33 +44,9 @@ #include #include #include +#include #include "entry.h" -enum { - sigp_sense = 1, - sigp_external_call = 2, - sigp_emergency_signal = 3, - sigp_start = 4, - sigp_stop = 5, - sigp_restart = 6, - sigp_stop_and_store_status = 9, - sigp_initial_cpu_reset = 11, - sigp_cpu_reset = 12, - sigp_set_prefix = 13, - sigp_store_status_at_address = 14, - sigp_store_extended_status_at_address = 15, - sigp_set_architecture = 18, - sigp_conditional_emergency_signal = 19, - sigp_sense_running = 21, -}; - -enum { - sigp_order_code_accepted = 0, - sigp_status_stored = 1, - sigp_busy = 2, - sigp_not_operational = 3, -}; - enum { ec_schedule = 0, ec_call_function, @@ -124,7 +100,7 @@ static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) while (1) { cc = __pcpu_sigp(addr, order, parm, status); - if (cc != sigp_busy) + if (cc != SIGP_CC_BUSY) return cc; cpu_relax(); } @@ -136,7 +112,7 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) for (retry = 0; ; retry++) { cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status); - if (cc != sigp_busy) + if (cc != SIGP_CC_BUSY) break; if (retry >= 3) udelay(10); @@ -146,8 +122,8 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) static inline int pcpu_stopped(struct pcpu *pcpu) { - if (__pcpu_sigp(pcpu->address, sigp_sense, - 0, &pcpu->status) != sigp_status_stored) + if (__pcpu_sigp(pcpu->address, SIGP_SENSE, + 0, &pcpu->status) != SIGP_CC_STATUS_STORED) return 0; /* Check for stopped and check stop state */ return !!(pcpu->status & 0x50); @@ -155,8 +131,8 @@ static inline int pcpu_stopped(struct pcpu *pcpu) static inline int pcpu_running(struct pcpu *pcpu) { - if (__pcpu_sigp(pcpu->address, sigp_sense_running, - 0, &pcpu->status) != sigp_status_stored) + if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, + 0, &pcpu->status) != SIGP_CC_STATUS_STORED) return 1; /* Status stored condition code is equivalent to cpu not running. */ return 0; @@ -181,7 +157,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) set_bit(ec_bit, &pcpu->ec_mask); order = pcpu_running(pcpu) ? - sigp_external_call : sigp_emergency_signal; + SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; pcpu_sigp_retry(pcpu, order, 0); } @@ -214,7 +190,7 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) goto out; #endif lowcore_ptr[cpu] = lc; - pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc); + pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); return 0; out: if (pcpu != &pcpu_devices[0]) { @@ -229,7 +205,7 @@ out: static void pcpu_free_lowcore(struct pcpu *pcpu) { - pcpu_sigp_retry(pcpu, sigp_set_prefix, 0); + pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); lowcore_ptr[pcpu - pcpu_devices] = NULL; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { @@ -288,7 +264,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) lc->restart_fn = (unsigned long) func; lc->restart_data = (unsigned long) data; lc->restart_source = -1UL; - pcpu_sigp_retry(pcpu, sigp_restart, 0); + pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); } /* @@ -309,7 +285,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), if (pcpu->address == restart.source) func(data); /* should not return */ /* Stop target cpu (if func returns this stops the current cpu). */ - pcpu_sigp_retry(pcpu, sigp_stop, 0); + pcpu_sigp_retry(pcpu, SIGP_STOP, 0); /* Restart func on the target cpu and stop the current cpu. */ memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart)); asm volatile( @@ -388,8 +364,8 @@ void smp_emergency_stop(cpumask_t *cpumask) for_each_cpu(cpu, cpumask) { struct pcpu *pcpu = pcpu_devices + cpu; set_bit(ec_stop_cpu, &pcpu->ec_mask); - while (__pcpu_sigp(pcpu->address, sigp_emergency_signal, - 0, NULL) == sigp_busy && + while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, + 0, NULL) == SIGP_CC_BUSY && get_clock() < end) cpu_relax(); } @@ -425,7 +401,7 @@ void smp_send_stop(void) /* stop all processors */ for_each_cpu(cpu, &cpumask) { struct pcpu *pcpu = pcpu_devices + cpu; - pcpu_sigp_retry(pcpu, sigp_stop, 0); + pcpu_sigp_retry(pcpu, SIGP_STOP, 0); while (!pcpu_stopped(pcpu)) cpu_relax(); } @@ -436,7 +412,7 @@ void smp_send_stop(void) */ void smp_stop_cpu(void) { - pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); for (;;) ; } @@ -590,7 +566,7 @@ static void __init smp_get_save_area(int cpu, u16 address) } #endif /* Get the registers of a non-boot cpu. */ - __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL); + __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); } @@ -599,8 +575,8 @@ int smp_store_status(int cpu) struct pcpu *pcpu; pcpu = pcpu_devices + cpu; - if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status, - 0, NULL) != sigp_order_code_accepted) + if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, + 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; return 0; } @@ -621,8 +597,8 @@ static struct sclp_cpu_info *smp_get_cpu_info(void) if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { use_sigp_detection = 1; for (address = 0; address <= MAX_CPU_ADDRESS; address++) { - if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) == - sigp_not_operational) + if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == + SIGP_CC_NOT_OPERATIONAL) continue; info->cpu[info->configured].address = address; info->configured++; @@ -734,8 +710,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) pcpu = pcpu_devices + cpu; if (pcpu->state != CPU_STATE_CONFIGURED) return -EIO; - if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) != - sigp_order_code_accepted) + if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != + SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; rc = pcpu_alloc_lowcore(pcpu, cpu); @@ -795,7 +771,7 @@ void __cpu_die(unsigned int cpu) void __noreturn cpu_die(void) { idle_task_exit(); - pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); for (;;) ; } diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 0ad4cf238391..fda1d64f15ee 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -15,38 +15,10 @@ #include #include #include +#include #include "gaccess.h" #include "kvm-s390.h" -/* sigp order codes */ -#define SIGP_SENSE 0x01 -#define SIGP_EXTERNAL_CALL 0x02 -#define SIGP_EMERGENCY 0x03 -#define SIGP_START 0x04 -#define SIGP_STOP 0x05 -#define SIGP_RESTART 0x06 -#define SIGP_STOP_STORE_STATUS 0x09 -#define SIGP_INITIAL_CPU_RESET 0x0b -#define SIGP_CPU_RESET 0x0c -#define SIGP_SET_PREFIX 0x0d -#define SIGP_STORE_STATUS_ADDR 0x0e -#define SIGP_SET_ARCH 0x12 -#define SIGP_SENSE_RUNNING 0x15 - -/* cpu status bits */ -#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL -#define SIGP_STAT_NOT_RUNNING 0x00000400UL -#define SIGP_STAT_INCORRECT_STATE 0x00000200UL -#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL -#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL -#define SIGP_STAT_STOPPED 0x00000040UL -#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL -#define SIGP_STAT_CHECK_STOP 0x00000010UL -#define SIGP_STAT_INOPERATIVE 0x00000004UL -#define SIGP_STAT_INVALID_ORDER 0x00000002UL -#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL - - static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) { @@ -65,7 +37,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, rc = 1; /* status stored */ } else { *reg &= 0xffffffff00000000UL; - *reg |= SIGP_STAT_STOPPED; + *reg |= SIGP_STATUS_STOPPED; rc = 1; /* status stored */ } spin_unlock(&fi->lock); @@ -235,7 +207,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, address = address & 0x7fffe000u; if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { - *reg |= SIGP_STAT_INVALID_PARAMETER; + *reg |= SIGP_STATUS_INVALID_PARAMETER; return 1; /* invalid parameter */ } @@ -249,7 +221,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, if (li == NULL) { rc = 1; /* incorrect state */ - *reg &= SIGP_STAT_INCORRECT_STATE; + *reg &= SIGP_STATUS_INCORRECT_STATE; kfree(inti); goto out_fi; } @@ -258,7 +230,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, /* cpu must be in stopped state */ if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { rc = 1; /* incorrect state */ - *reg &= SIGP_STAT_INCORRECT_STATE; + *reg &= SIGP_STATUS_INCORRECT_STATE; kfree(inti); goto out_li; } @@ -300,7 +272,7 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, } else { /* not running */ *reg &= 0xffffffff00000000UL; - *reg |= SIGP_STAT_NOT_RUNNING; + *reg |= SIGP_STATUS_NOT_RUNNING; rc = 0; } } @@ -375,7 +347,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) vcpu->stat.instruction_sigp_external_call++; rc = __sigp_external_call(vcpu, cpu_addr); break; - case SIGP_EMERGENCY: + case SIGP_EMERGENCY_SIGNAL: vcpu->stat.instruction_sigp_emergency++; rc = __sigp_emergency(vcpu, cpu_addr); break; @@ -383,12 +355,12 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) vcpu->stat.instruction_sigp_stop++; rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP); break; - case SIGP_STOP_STORE_STATUS: + case SIGP_STOP_AND_STORE_STATUS: vcpu->stat.instruction_sigp_stop++; rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP | ACTION_STOP_ON_STOP); break; - case SIGP_SET_ARCH: + case SIGP_SET_ARCHITECTURE: vcpu->stat.instruction_sigp_arch++; rc = __sigp_set_arch(vcpu, parameter); break; From 7ba26c482fcb42b01785ee1f39871fcc636ab3fe Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 26 Jun 2012 16:06:38 +0200 Subject: [PATCH 38/93] KVM: s390: fix sigp sense running condition code handling Only if the sensed cpu is not running a status is stored, which is reflected by condition code 1. If the cpu is running, condition code 0 should be returned. Just the opposite of what the code is doing. Acked-by: Cornelia Huck Signed-off-by: Heiko Carstens Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/sigp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index fda1d64f15ee..caccc0ee9cee 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -268,12 +268,12 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, if (atomic_read(fi->local_int[cpu_addr]->cpuflags) & CPUSTAT_RUNNING) { /* running */ - rc = 1; + rc = 0; } else { /* not running */ *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_NOT_RUNNING; - rc = 0; + rc = 1; } } spin_unlock(&fi->lock); From 0744426e28490357855aafd2ca76c819231851c5 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 26 Jun 2012 16:06:39 +0200 Subject: [PATCH 39/93] KVM: s390: fix sigp set prefix status stored cases If an invalid parameter is passed or the addressed cpu is in an incorrect state sigp set prefix will store a status. This status must only have bits set as defined by the architecture. The current kvm implementation missed to clear bits and also did not set the intended status bit ("and" instead of "or" operation). Signed-off-by: Heiko Carstens Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/sigp.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index caccc0ee9cee..ca544d53557e 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -207,6 +207,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, address = address & 0x7fffe000u; if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { + *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INVALID_PARAMETER; return 1; /* invalid parameter */ } @@ -220,8 +221,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, li = fi->local_int[cpu_addr]; if (li == NULL) { + *reg &= 0xffffffff00000000UL; + *reg |= SIGP_STATUS_INCORRECT_STATE; rc = 1; /* incorrect state */ - *reg &= SIGP_STATUS_INCORRECT_STATE; kfree(inti); goto out_fi; } @@ -229,8 +231,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, spin_lock_bh(&li->lock); /* cpu must be in stopped state */ if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { + *reg &= 0xffffffff00000000UL; + *reg |= SIGP_STATUS_INCORRECT_STATE; rc = 1; /* incorrect state */ - *reg &= SIGP_STATUS_INCORRECT_STATE; kfree(inti); goto out_li; } From ea1918dd3d1a8fcb7ce26816fdf31a50f7d04689 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 26 Jun 2012 16:06:40 +0200 Subject: [PATCH 40/93] KVM: s390: use sigp condition code defines Just use the defines instead of using plain numbers and adding a comment behind each line. Signed-off-by: Heiko Carstens Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- arch/s390/kvm/sigp.c | 58 ++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index ca544d53557e..97c9f36a4533 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -26,19 +26,19 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, int rc; if (cpu_addr >= KVM_MAX_VCPUS) - return 3; /* not operational */ + return SIGP_CC_NOT_OPERATIONAL; spin_lock(&fi->lock); if (fi->local_int[cpu_addr] == NULL) - rc = 3; /* not operational */ + rc = SIGP_CC_NOT_OPERATIONAL; else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) & CPUSTAT_STOPPED)) { *reg &= 0xffffffff00000000UL; - rc = 1; /* status stored */ + rc = SIGP_CC_STATUS_STORED; } else { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_STOPPED; - rc = 1; /* status stored */ + rc = SIGP_CC_STATUS_STORED; } spin_unlock(&fi->lock); @@ -54,7 +54,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) int rc; if (cpu_addr >= KVM_MAX_VCPUS) - return 3; /* not operational */ + return SIGP_CC_NOT_OPERATIONAL; inti = kzalloc(sizeof(*inti), GFP_KERNEL); if (!inti) @@ -66,7 +66,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) spin_lock(&fi->lock); li = fi->local_int[cpu_addr]; if (li == NULL) { - rc = 3; /* not operational */ + rc = SIGP_CC_NOT_OPERATIONAL; kfree(inti); goto unlock; } @@ -77,7 +77,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) if (waitqueue_active(&li->wq)) wake_up_interruptible(&li->wq); spin_unlock_bh(&li->lock); - rc = 0; /* order accepted */ + rc = SIGP_CC_ORDER_CODE_ACCEPTED; VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); unlock: spin_unlock(&fi->lock); @@ -92,7 +92,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) int rc; if (cpu_addr >= KVM_MAX_VCPUS) - return 3; /* not operational */ + return SIGP_CC_NOT_OPERATIONAL; inti = kzalloc(sizeof(*inti), GFP_KERNEL); if (!inti) @@ -104,7 +104,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) spin_lock(&fi->lock); li = fi->local_int[cpu_addr]; if (li == NULL) { - rc = 3; /* not operational */ + rc = SIGP_CC_NOT_OPERATIONAL; kfree(inti); goto unlock; } @@ -115,7 +115,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) if (waitqueue_active(&li->wq)) wake_up_interruptible(&li->wq); spin_unlock_bh(&li->lock); - rc = 0; /* order accepted */ + rc = SIGP_CC_ORDER_CODE_ACCEPTED; VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr); unlock: spin_unlock(&fi->lock); @@ -143,7 +143,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) out: spin_unlock_bh(&li->lock); - return 0; /* order accepted */ + return SIGP_CC_ORDER_CODE_ACCEPTED; } static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) @@ -153,12 +153,12 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) int rc; if (cpu_addr >= KVM_MAX_VCPUS) - return 3; /* not operational */ + return SIGP_CC_NOT_OPERATIONAL; spin_lock(&fi->lock); li = fi->local_int[cpu_addr]; if (li == NULL) { - rc = 3; /* not operational */ + rc = SIGP_CC_NOT_OPERATIONAL; goto unlock; } @@ -182,11 +182,11 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) switch (parameter & 0xff) { case 0: - rc = 3; /* not operational */ + rc = SIGP_CC_NOT_OPERATIONAL; break; case 1: case 2: - rc = 0; /* order accepted */ + rc = SIGP_CC_ORDER_CODE_ACCEPTED; break; default: rc = -EOPNOTSUPP; @@ -209,12 +209,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INVALID_PARAMETER; - return 1; /* invalid parameter */ + return SIGP_CC_STATUS_STORED; } inti = kzalloc(sizeof(*inti), GFP_KERNEL); if (!inti) - return 2; /* busy */ + return SIGP_CC_BUSY; spin_lock(&fi->lock); if (cpu_addr < KVM_MAX_VCPUS) @@ -223,7 +223,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, if (li == NULL) { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; - rc = 1; /* incorrect state */ + rc = SIGP_CC_STATUS_STORED; kfree(inti); goto out_fi; } @@ -233,7 +233,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; - rc = 1; /* incorrect state */ + rc = SIGP_CC_STATUS_STORED; kfree(inti); goto out_li; } @@ -245,7 +245,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, atomic_set(&li->active, 1); if (waitqueue_active(&li->wq)) wake_up_interruptible(&li->wq); - rc = 0; /* order accepted */ + rc = SIGP_CC_ORDER_CODE_ACCEPTED; VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); out_li: @@ -262,21 +262,21 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; if (cpu_addr >= KVM_MAX_VCPUS) - return 3; /* not operational */ + return SIGP_CC_NOT_OPERATIONAL; spin_lock(&fi->lock); if (fi->local_int[cpu_addr] == NULL) - rc = 3; /* not operational */ + rc = SIGP_CC_NOT_OPERATIONAL; else { if (atomic_read(fi->local_int[cpu_addr]->cpuflags) & CPUSTAT_RUNNING) { /* running */ - rc = 0; + rc = SIGP_CC_ORDER_CODE_ACCEPTED; } else { /* not running */ *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_NOT_RUNNING; - rc = 1; + rc = SIGP_CC_STATUS_STORED; } } spin_unlock(&fi->lock); @@ -289,23 +289,23 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr) { - int rc = 0; struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li; + int rc = SIGP_CC_ORDER_CODE_ACCEPTED; if (cpu_addr >= KVM_MAX_VCPUS) - return 3; /* not operational */ + return SIGP_CC_NOT_OPERATIONAL; spin_lock(&fi->lock); li = fi->local_int[cpu_addr]; if (li == NULL) { - rc = 3; /* not operational */ + rc = SIGP_CC_NOT_OPERATIONAL; goto out; } spin_lock_bh(&li->lock); if (li->action_bits & ACTION_STOP_ON_STOP) - rc = 2; /* busy */ + rc = SIGP_CC_BUSY; else VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace", cpu_addr); @@ -380,7 +380,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) case SIGP_RESTART: vcpu->stat.instruction_sigp_restart++; rc = __sigp_restart(vcpu, cpu_addr); - if (rc == 2) /* busy */ + if (rc == SIGP_CC_BUSY) break; /* user space must know about restart */ default: From 21b26c08535c992802402c7ba2d789ca9e1a5707 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Tue, 26 Jun 2012 16:06:41 +0200 Subject: [PATCH 41/93] KVM: s390: Fix sigp sense handling. If sigp sense doesn't have any status bits to report, it should set cc 0 and leave the register as-is. Since we know about the external call pending bit, we should report it if it is set as well. Acked-by: Heiko Carstens Reviewed-by: Christian Borntraeger Signed-off-by: Cornelia Huck Signed-off-by: Marcelo Tosatti --- arch/s390/include/asm/sigp.h | 1 + arch/s390/kvm/sigp.c | 14 +++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index 7306270b5b84..5a87d16d3e7c 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -24,6 +24,7 @@ #define SIGP_STATUS_CHECK_STOP 0x00000010UL #define SIGP_STATUS_STOPPED 0x00000040UL +#define SIGP_STATUS_EXT_CALL_PENDING 0x00000080UL #define SIGP_STATUS_INVALID_PARAMETER 0x00000100UL #define SIGP_STATUS_INCORRECT_STATE 0x00000200UL #define SIGP_STATUS_NOT_RUNNING 0x00000400UL diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 97c9f36a4533..6ed8175ca7e7 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -32,12 +32,16 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, if (fi->local_int[cpu_addr] == NULL) rc = SIGP_CC_NOT_OPERATIONAL; else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) - & CPUSTAT_STOPPED)) { + & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) + rc = SIGP_CC_ORDER_CODE_ACCEPTED; + else { *reg &= 0xffffffff00000000UL; - rc = SIGP_CC_STATUS_STORED; - } else { - *reg &= 0xffffffff00000000UL; - *reg |= SIGP_STATUS_STOPPED; + if (atomic_read(fi->local_int[cpu_addr]->cpuflags) + & CPUSTAT_ECALL_PEND) + *reg |= SIGP_STATUS_EXT_CALL_PENDING; + if (atomic_read(fi->local_int[cpu_addr]->cpuflags) + & CPUSTAT_STOPPED) + *reg |= SIGP_STATUS_STOPPED; rc = SIGP_CC_STATUS_STORED; } spin_unlock(&fi->lock); From 5cfc2aabcb282f4554e7086c9893b386ad6ba9d4 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Tue, 19 Jun 2012 16:51:04 -0400 Subject: [PATCH 42/93] KVM: handle last_boosted_vcpu = 0 case If last_boosted_vcpu == 0, then we fall through all test cases and may end up with all VCPUs pouncing on vcpu 0. With a large enough guest, this can result in enormous runqueue lock contention, which can prevent vcpu0 from running, leading to a livelock. Changing < to <= makes sure we properly handle that case. Signed-off-by: Rik van Riel Signed-off-by: Marcelo Tosatti --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 636bd08bb399..b3ce91c623e2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1598,7 +1598,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) */ for (pass = 0; pass < 2 && !yielded; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { - if (!pass && i < last_boosted_vcpu) { + if (!pass && i <= last_boosted_vcpu) { i = last_boosted_vcpu; continue; } else if (pass && i > last_boosted_vcpu) From e676505ac96813e8b93170b1f5e5ffe0cf6a2348 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 8 Jul 2012 17:16:30 +0300 Subject: [PATCH 43/93] KVM: MMU: Force cr3 reload with two dimensional paging on mov cr3 emulation Currently the MMU's ->new_cr3() callback does nothing when guest paging is disabled or when two-dimentional paging (e.g. EPT on Intel) is active. This means that an emulated write to cr3 can be lost; kvm_set_cr3() will write vcpu-arch.cr3, but the GUEST_CR3 field in the VMCS will retain its old value and this is what the guest sees. This bug did not have any effect until now because: - with unrestricted guest, or with svm, we never emulate a mov cr3 instruction - without unrestricted guest, and with paging enabled, we also never emulate a mov cr3 instruction - without unrestricted guest, but with paging disabled, the guest's cr3 is ignored until the guest enables paging; at this point the value from arch.cr3 is loaded correctly my the mov cr0 instruction which turns on paging However, the patchset that enables big real mode causes us to emulate mov cr3 instructions in protected mode sometimes (when guest state is not virtualizable by vmx); this mov cr3 is effectively ignored and will crash the guest. The fix is to make nonpaging_new_cr3() call mmu_free_roots() to force a cr3 reload. This is awkward because now all the new_cr3 callbacks to the same thing, and because mmu_free_roots() is somewhat of an overkill; but fixing that is more complicated and will be done after this minimal fix. Observed in the Window XP 32-bit installer while bringing up secondary vcpus. Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 3b53d9e08bfc..569cd66ba24c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -188,6 +188,7 @@ static u64 __read_mostly shadow_dirty_mask; static u64 __read_mostly shadow_mmio_mask; static void mmu_spte_set(u64 *sptep, u64 spte); +static void mmu_free_roots(struct kvm_vcpu *vcpu); void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) { @@ -2401,6 +2402,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) { + mmu_free_roots(vcpu); } static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, From d881e6f6cffe3993245963143cab2528f918e071 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 6 Jun 2012 18:36:48 +0300 Subject: [PATCH 44/93] KVM: VMX: Return correct CPL during transition to protected mode In protected mode, the CPL is defined as the lower two bits of CS, as set by the last far jump. But during the transition to protected mode, there is no last far jump, so we need to return zero (the inherited real mode CPL). Fix by reading CPL from the cache during the transition. This isn't 100% correct since we don't set the CPL cache on a far jump, but since protected mode transition will always jump to a segment with RPL=0, it will always work. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e10ec0e4d1c6..486db2f9561b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3175,11 +3175,22 @@ static int __vmx_get_cpl(struct kvm_vcpu *vcpu) static int vmx_get_cpl(struct kvm_vcpu *vcpu) { + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations + * fail; use the cache instead. + */ + if (unlikely(vmx->emulation_required && emulate_invalid_guest_state)) { + return vmx->cpl; + } + if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); - to_vmx(vcpu)->cpl = __vmx_get_cpl(vcpu); + vmx->cpl = __vmx_get_cpl(vcpu); } - return to_vmx(vcpu)->cpl; + + return vmx->cpl; } From 62046e5a867cbff35e0beff42718dda41ff5d74b Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 7 Jun 2012 14:07:48 +0300 Subject: [PATCH 45/93] KVM: Split cpuid register access from computation Introduce kvm_cpuid() to perform the leaf limit check and calculate register values, and let kvm_emulate_cpuid() just handle reading and writing the registers from/to the vcpu. This allows us to reuse kvm_cpuid() in a context where directly reading and writing registers is not desired. Signed-off-by: Avi Kivity --- arch/x86/kvm/cpuid.c | 40 ++++++++++++++++++++++------------------ arch/x86/kvm/cpuid.h | 1 + 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 61ccbdf3d0ac..197afd53e3a4 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -640,33 +640,37 @@ static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); } -void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) +void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) { - u32 function, index; + u32 function = *eax, index = *ecx; struct kvm_cpuid_entry2 *best; - function = kvm_register_read(vcpu, VCPU_REGS_RAX); - index = kvm_register_read(vcpu, VCPU_REGS_RCX); - kvm_register_write(vcpu, VCPU_REGS_RAX, 0); - kvm_register_write(vcpu, VCPU_REGS_RBX, 0); - kvm_register_write(vcpu, VCPU_REGS_RCX, 0); - kvm_register_write(vcpu, VCPU_REGS_RDX, 0); best = kvm_find_cpuid_entry(vcpu, function, index); if (!best) best = check_cpuid_limit(vcpu, function, index); if (best) { - kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); - kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); - kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx); - kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); - } + *eax = best->eax; + *ebx = best->ebx; + *ecx = best->ecx; + *edx = best->edx; + } else + *eax = *ebx = *ecx = *edx = 0; +} + +void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) +{ + u32 function, eax, ebx, ecx, edx; + + function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX); + ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); + kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx); + kvm_register_write(vcpu, VCPU_REGS_RAX, eax); + kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); + kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); + kvm_register_write(vcpu, VCPU_REGS_RDX, edx); kvm_x86_ops->skip_emulated_instruction(vcpu); - trace_kvm_cpuid(function, - kvm_register_read(vcpu, VCPU_REGS_RAX), - kvm_register_read(vcpu, VCPU_REGS_RBX), - kvm_register_read(vcpu, VCPU_REGS_RCX), - kvm_register_read(vcpu, VCPU_REGS_RDX)); + trace_kvm_cpuid(function, eax, ebx, ecx, edx); } EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 26d1fb437eb5..f449edc35e24 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -17,6 +17,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); +void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) From 0017f93a2776597b798ec1a9594e41dfd96d3c11 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 7 Jun 2012 14:10:16 +0300 Subject: [PATCH 46/93] KVM: x86 emulator: change ->get_cpuid() accessor to use the x86 semantics Instead of getting an exact leaf, follow the spec and fall back to the last main leaf instead. This lets us easily emulate the cpuid instruction in the emulator. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_emulate.h | 4 +-- arch/x86/kvm/emulate.c | 51 +++++++++++++++--------------- arch/x86/kvm/x86.c | 20 ++---------- 3 files changed, 29 insertions(+), 46 deletions(-) diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 1ac46c22dd50..cd5c96b2496e 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -192,8 +192,8 @@ struct x86_emulate_ops { struct x86_instruction_info *info, enum x86_intercept_stage stage); - bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, - u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); + void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, + u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); }; typedef u32 __attribute__((vector_size(16))) sse128_t; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index f95d242ee9f7..ba1f8ecaab52 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1993,8 +1993,8 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt) u32 eax, ebx, ecx, edx; eax = ecx = 0; - return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx) - && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; } @@ -2013,32 +2013,31 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) eax = 0x00000000; ecx = 0x00000000; - if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { - /* - * Intel ("GenuineIntel") - * remark: Intel CPUs only support "syscall" in 64bit - * longmode. Also an 64bit guest with a - * 32bit compat-app running will #UD !! While this - * behaviour can be fixed (by emulating) into AMD - * response - CPUs of AMD can't behave like Intel. - */ - if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && - ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && - edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) - return false; + ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + /* + * Intel ("GenuineIntel") + * remark: Intel CPUs only support "syscall" in 64bit + * longmode. Also an 64bit guest with a + * 32bit compat-app running will #UD !! While this + * behaviour can be fixed (by emulating) into AMD + * response - CPUs of AMD can't behave like Intel. + */ + if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && + ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && + edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) + return false; - /* AMD ("AuthenticAMD") */ - if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && - ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && - edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) - return true; + /* AMD ("AuthenticAMD") */ + if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && + ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && + edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) + return true; - /* AMD ("AMDisbetter!") */ - if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && - ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && - edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) - return true; - } + /* AMD ("AMDisbetter!") */ + if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && + ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && + edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) + return true; /* default: (not Intel, not AMD), apply Intel's stricter rules... */ return false; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8eacb2e64560..ff0b487e7254 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4302,26 +4302,10 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } -static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, +static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) { - struct kvm_cpuid_entry2 *cpuid = NULL; - - if (eax && ecx) - cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt), - *eax, *ecx); - - if (cpuid) { - *eax = cpuid->eax; - *ecx = cpuid->ecx; - if (ebx) - *ebx = cpuid->ebx; - if (edx) - *edx = cpuid->edx; - return true; - } - - return false; + kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); } static struct x86_emulate_ops emulate_ops = { From 6d6eede4a0492c7478d44d7c8fae80c3bcf529d9 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 7 Jun 2012 14:11:36 +0300 Subject: [PATCH 47/93] KVM: x86 emulator: emulate cpuid Opcode 0F A2. Used by Linux during the mode change trampoline while in a state that is not virtualizable on vmx without unrestricted_guest, so we need to emulate it is emulate_invalid_guest_state=1. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index ba1f8ecaab52..db95a55d5936 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3142,6 +3142,20 @@ static int em_bsr(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_cpuid(struct x86_emulate_ctxt *ctxt) +{ + u32 eax, ebx, ecx, edx; + + eax = ctxt->regs[VCPU_REGS_RAX]; + ecx = ctxt->regs[VCPU_REGS_RCX]; + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + ctxt->regs[VCPU_REGS_RAX] = eax; + ctxt->regs[VCPU_REGS_RBX] = ebx; + ctxt->regs[VCPU_REGS_RCX] = ecx; + ctxt->regs[VCPU_REGS_RDX] = edx; + return X86EMUL_CONTINUE; +} + static bool valid_cr(int nr) { switch (nr) { @@ -3634,7 +3648,7 @@ static struct opcode twobyte_table[256] = { X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), /* 0xA0 - 0xA7 */ I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), - DI(ImplicitOps, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt), + II(ImplicitOps, em_cpuid, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt), D(DstMem | SrcReg | Src2ImmByte | ModRM), D(DstMem | SrcReg | Src2CL | ModRM), N, N, /* 0xA8 - 0xAF */ From 79d5b4c3cd809c770d4bf9812635647016c56011 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 7 Jun 2012 17:03:42 +0300 Subject: [PATCH 48/93] KVM: x86 emulator: allow loading null SS in long mode Null SS is valid in long mode; allow loading it. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index db95a55d5936..fe4340f62137 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1324,8 +1324,14 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, goto load; } - /* NULL selector is not valid for TR, CS and SS */ - if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR) + rpl = selector & 3; + cpl = ctxt->ops->cpl(ctxt); + + /* NULL selector is not valid for TR, CS and SS (except for long mode) */ + if ((seg == VCPU_SREG_CS + || (seg == VCPU_SREG_SS + && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) + || seg == VCPU_SREG_TR) && null_selector) goto exception; @@ -1352,9 +1358,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, goto exception; } - rpl = selector & 3; dpl = seg_desc.dpl; - cpl = ctxt->ops->cpl(ctxt); switch (seg) { case VCPU_SREG_SS: From 510425ff3344df03a1f94bce49e659ae302e0d34 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 7 Jun 2012 17:04:36 +0300 Subject: [PATCH 49/93] KVM: x86 emulator: fix LIDT/LGDT in long mode The operand size for these instructions is 8 bytes in long mode, even without a REX prefix. Set it explicitly. Triggered while booting Linux with emulate_invalid_guest_state=1. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index fe4340f62137..24c84251648b 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2997,6 +2997,8 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt) struct desc_ptr desc_ptr; int rc; + if (ctxt->mode == X86EMUL_MODE_PROT64) + ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); @@ -3024,6 +3026,8 @@ static int em_lidt(struct x86_emulate_ctxt *ctxt) struct desc_ptr desc_ptr; int rc; + if (ctxt->mode == X86EMUL_MODE_PROT64) + ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); From f0495f9b9992f80f82b14306946444b287193390 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 7 Jun 2012 17:06:10 +0300 Subject: [PATCH 50/93] KVM: VMX: Relax check on unusable segment Some userspace (e.g. QEMU 1.1) munge the d and g bits of segment descriptors, causing us not to recognize them as unusable segments with emulate_invalid_guest_state=1. Relax the check by testing for segment not present (a non-present segment cannot be usable). Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 486db2f9561b..82ab1fb2683e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3198,7 +3198,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; - if (var->unusable) + if (var->unusable || !var->present) ar = 1 << 16; else { ar = var->type & 15; @@ -3210,8 +3210,6 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var) ar |= (var->db & 1) << 14; ar |= (var->g & 1) << 15; } - if (ar == 0) /* a 0 value means unusable */ - ar = AR_UNUSABLE_MASK; return ar; } From b8405c184b4ef3abcebc5cf2211215944d6e2acc Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 7 Jun 2012 17:08:48 +0300 Subject: [PATCH 51/93] KVM: VMX: Limit iterations with emulator_invalid_guest_state Otherwise, if the guest ends up looping, we never exit the srcu critical section, which causes synchronize_srcu() to hang. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 82ab1fb2683e..debac4984347 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4977,11 +4977,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) int ret = 1; u32 cpu_exec_ctrl; bool intr_window_requested; + unsigned count = 130; cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; - while (!guest_state_valid(vcpu)) { + while (!guest_state_valid(vcpu) && count-- != 0) { if (intr_window_requested && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF)) return handle_interrupt_window(&vmx->vcpu); From f47cfa3174ad8bd39e56524b36e79c463bf820b1 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 7 Jun 2012 17:49:24 +0300 Subject: [PATCH 52/93] KVM: x86 emulator: emulate LEAVE Opcode c9; used by some variants of Windows during boot, in big real mode. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 24c84251648b..33ccd757cb11 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -433,11 +433,27 @@ static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, return ctxt->ops->intercept(ctxt, &info, stage); } +static void assign_masked(ulong *dest, ulong src, ulong mask) +{ + *dest = (*dest & ~mask) | (src & mask); +} + static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) { return (1UL << (ctxt->ad_bytes << 3)) - 1; } +static ulong stack_mask(struct x86_emulate_ctxt *ctxt) +{ + u16 sel; + struct desc_struct ss; + + if (ctxt->mode == X86EMUL_MODE_PROT64) + return ~0UL; + ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); + return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ +} + /* Access/update address held in a register, based on addressing mode. */ static inline unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) @@ -1560,6 +1576,13 @@ static int em_popf(struct x86_emulate_ctxt *ctxt) return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); } +static int em_leave(struct x86_emulate_ctxt *ctxt) +{ + assign_masked(&ctxt->regs[VCPU_REGS_RSP], ctxt->regs[VCPU_REGS_RBP], + stack_mask(ctxt)); + return emulate_pop(ctxt, &ctxt->regs[VCPU_REGS_RBP], ctxt->op_bytes); +} + static int em_push_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; @@ -3582,7 +3605,7 @@ static struct opcode opcode_table[256] = { I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ - N, N, N, I(ImplicitOps | Stack, em_ret_far), + N, I(Stack, em_leave), N, I(ImplicitOps | Stack, em_ret_far), D(ImplicitOps), DI(SrcImmByte, intn), D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ From cbd27ee783f1e56d56415e8c5f2492ccedd565c4 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 10 Jun 2012 17:11:00 +0300 Subject: [PATCH 53/93] KVM: x86 emulator: initialize memop memop is not initialized; this can lead to a two-byte operation following a 4-byte operation to see garbage values. Usually truncation fixes things fot us later on, but at least in one case (call abs) it doesn't. Fix by moving memop to the auto-initialized field area. Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_emulate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index cd5c96b2496e..c764f43b71c5 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -280,9 +280,9 @@ struct x86_emulate_ctxt { u8 modrm_seg; bool rip_relative; unsigned long _eip; + struct operand memop; /* Fields above regs are cleared together. */ unsigned long regs[NR_VCPU_REGS]; - struct operand memop; struct operand *memopp; struct fetch_cache fetch; struct read_cache io_read; From a6e3407bb1570ac5d8d7fc471bca07d531d1dde7 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 10 Jun 2012 17:15:39 +0300 Subject: [PATCH 54/93] KVM: Fix SS default ESP/EBP based addressing We correctly default to SS when BP is used as a base in 16-bit address mode, but we don't do that for 32-bit mode. Fix by adjusting the default to SS when either ESP or EBP is used as the base register. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 33ccd757cb11..7552c0ac6e76 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -974,6 +974,12 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt, op->orig_val = op->val; } +static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) +{ + if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) + ctxt->modrm_seg = VCPU_SREG_SS; +} + static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op) { @@ -1077,15 +1083,20 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) modrm_ea += insn_fetch(s32, ctxt); - else + else { modrm_ea += ctxt->regs[base_reg]; + adjust_modrm_seg(ctxt, base_reg); + } if (index_reg != 4) modrm_ea += ctxt->regs[index_reg] << scale; } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->rip_relative = 1; - } else - modrm_ea += ctxt->regs[ctxt->modrm_rm]; + } else { + base_reg = ctxt->modrm_rm; + modrm_ea += ctxt->regs[base_reg]; + adjust_modrm_seg(ctxt, base_reg); + } switch (ctxt->modrm_mod) { case 0: if (ctxt->modrm_rm == 5) From 96051572c819194c37a8367624b285be10297eca Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 10 Jun 2012 17:21:18 +0300 Subject: [PATCH 55/93] KVM: x86 emulator: emulate SGDT/SIDT Opcodes 0F 01 /0 and 0F 01 /1 Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 7552c0ac6e76..5053e9efb140 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3026,6 +3026,35 @@ static int em_vmcall(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, + void (*get)(struct x86_emulate_ctxt *ctxt, + struct desc_ptr *ptr)) +{ + struct desc_ptr desc_ptr; + + if (ctxt->mode == X86EMUL_MODE_PROT64) + ctxt->op_bytes = 8; + get(ctxt, &desc_ptr); + if (ctxt->op_bytes == 2) { + ctxt->op_bytes = 4; + desc_ptr.address &= 0x00ffffff; + } + /* Disable writeback. */ + ctxt->dst.type = OP_NONE; + return segmented_write(ctxt, ctxt->dst.addr.mem, + &desc_ptr, 2 + ctxt->op_bytes); +} + +static int em_sgdt(struct x86_emulate_ctxt *ctxt) +{ + return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); +} + +static int em_sidt(struct x86_emulate_ctxt *ctxt) +{ + return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); +} + static int em_lgdt(struct x86_emulate_ctxt *ctxt) { struct desc_ptr desc_ptr; @@ -3485,8 +3514,8 @@ static struct opcode group6[] = { }; static struct group_dual group7 = { { - DI(Mov | DstMem | Priv, sgdt), - DI(Mov | DstMem | Priv, sidt), + II(Mov | DstMem | Priv, em_sgdt, sgdt), + II(Mov | DstMem | Priv, em_sidt, sidt), II(SrcMem | Priv, em_lgdt, lgdt), II(SrcMem | Priv, em_lidt, lidt), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, From bdea48e305389b3c8c0f786a317f9da984c16604 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 10 Jun 2012 18:07:57 +0300 Subject: [PATCH 56/93] KVM: VMX: Fix interrupt exit condition during emulation Checking EFLAGS.IF is incorrect as we might be in interrupt shadow. If that is the case, the main loop will notice that and not inject the interrupt, causing an endless loop. Fix by using vmx_interrupt_allowed() to check if we can inject an interrupt instead. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index debac4984347..6cdb40457695 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4983,8 +4983,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; while (!guest_state_valid(vcpu) && count-- != 0) { - if (intr_window_requested - && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF)) + if (intr_window_requested && vmx_interrupt_allowed(vcpu)) return handle_interrupt_window(&vmx->vcpu); err = emulate_instruction(vcpu, 0); From 7c068e45587a83d4235dda55d35a7d305c58e0e5 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 10 Jun 2012 18:09:27 +0300 Subject: [PATCH 57/93] KVM: VMX: Continue emulating after batch exhausted If we return early from an invalid guest state emulation loop, make sure we return to it later if the guest state is still invalid. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6cdb40457695..2e51e7c6d2a8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5002,7 +5002,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) schedule(); } - vmx->emulation_required = 0; + vmx->emulation_required = !guest_state_valid(vcpu); out: return ret; } From 2dd7caa092f0b1200a885a418e5d33b222183a71 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 11 Jun 2012 13:09:07 +0300 Subject: [PATCH 58/93] KVM: x86 emulator: emulate LAHF Opcode 9F. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 5053e9efb140..90b549ed8997 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3227,6 +3227,13 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_lahf(struct x86_emulate_ctxt *ctxt) +{ + ctxt->regs[VCPU_REGS_RAX] &= ~0xff00UL; + ctxt->regs[VCPU_REGS_RAX] |= (ctxt->eflags & 0xff) << 8; + return X86EMUL_CONTINUE; +} + static bool valid_cr(int nr) { switch (nr) { @@ -3622,7 +3629,7 @@ static struct opcode opcode_table[256] = { D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), I(SrcImmFAddr | No64, em_call_far), N, II(ImplicitOps | Stack, em_pushf, pushf), - II(ImplicitOps | Stack, em_popf, popf), N, N, + II(ImplicitOps | Stack, em_popf, popf), N, I(ImplicitOps, em_lahf), /* 0xA0 - 0xA7 */ I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), From 361cad2b50a2c92b91b6f568db860fabad3bf149 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 11 Jun 2012 19:40:15 +0300 Subject: [PATCH 59/93] KVM: x86 emulator: fix byte-sized MOVZX/MOVSX Commit 2adb5ad9fe1 removed ByteOp from MOVZX/MOVSX, replacing them by SrcMem8, but neglected to fix the dependency in the emulation code on ByteOp. This caused the instruction not to have any effect in some circumstances. Fix by replacing the check for ByteOp with the equivalent src.op_bytes == 1. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 90b549ed8997..30f4912c6a67 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -4517,12 +4517,12 @@ twobyte_insn: break; case 0xb6 ... 0xb7: /* movzx */ ctxt->dst.bytes = ctxt->op_bytes; - ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val + ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val : (u16) ctxt->src.val; break; case 0xbe ... 0xbf: /* movsx */ ctxt->dst.bytes = ctxt->op_bytes; - ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val : + ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : (s16) ctxt->src.val; break; case 0xc0 ... 0xc1: /* xadd */ From 51ddff50cbd77568fe40e17a966b3a2ef1231b36 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 12 Jun 2012 20:19:40 +0300 Subject: [PATCH 60/93] KVM: x86 emulator: split push logic from push opcode emulation This allows us to reuse the code without populating ctxt->src and overriding ctxt->op_bytes. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 30f4912c6a67..acc647d63701 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1505,17 +1505,22 @@ static int writeback(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int em_push(struct x86_emulate_ctxt *ctxt) +static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) { struct segmented_address addr; - register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes); + register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes); addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); addr.seg = VCPU_SREG_SS; + return segmented_write(ctxt, addr, data, bytes); +} + +static int em_push(struct x86_emulate_ctxt *ctxt) +{ /* Disable writeback. */ ctxt->dst.type = OP_NONE; - return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes); + return push(ctxt, &ctxt->src.val, ctxt->op_bytes); } static int emulate_pop(struct x86_emulate_ctxt *ctxt, From 612e89f01569f562dfa76cd5b76310a42b34a214 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 12 Jun 2012 20:03:23 +0300 Subject: [PATCH 61/93] KVM: x86 emulator: implement ENTER Opcode C8. Only ENTER with lexical nesting depth 0 is implemented, since others are very rare. We'll fail emulation if nonzero lexical depth is used so data is not corrupted. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index acc647d63701..b4b326ebc835 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -454,6 +454,11 @@ static ulong stack_mask(struct x86_emulate_ctxt *ctxt) return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ } +static int stack_size(struct x86_emulate_ctxt *ctxt) +{ + return (__fls(stack_mask(ctxt)) + 1) >> 3; +} + /* Access/update address held in a register, based on addressing mode. */ static inline unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) @@ -1592,6 +1597,26 @@ static int em_popf(struct x86_emulate_ctxt *ctxt) return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); } +static int em_enter(struct x86_emulate_ctxt *ctxt) +{ + int rc; + unsigned frame_size = ctxt->src.val; + unsigned nesting_level = ctxt->src2.val & 31; + + if (nesting_level) + return X86EMUL_UNHANDLEABLE; + + rc = push(ctxt, &ctxt->regs[VCPU_REGS_RBP], stack_size(ctxt)); + if (rc != X86EMUL_CONTINUE) + return rc; + assign_masked(&ctxt->regs[VCPU_REGS_RBP], ctxt->regs[VCPU_REGS_RSP], + stack_mask(ctxt)); + assign_masked(&ctxt->regs[VCPU_REGS_RSP], + ctxt->regs[VCPU_REGS_RSP] - frame_size, + stack_mask(ctxt)); + return X86EMUL_CONTINUE; +} + static int em_leave(struct x86_emulate_ctxt *ctxt) { assign_masked(&ctxt->regs[VCPU_REGS_RSP], ctxt->regs[VCPU_REGS_RBP], @@ -3657,7 +3682,8 @@ static struct opcode opcode_table[256] = { I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ - N, I(Stack, em_leave), N, I(ImplicitOps | Stack, em_ret_far), + I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), + N, I(ImplicitOps | Stack, em_ret_far), D(ImplicitOps), DI(SrcImmByte, intn), D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ From de87dcddc70ec6a90adfcc81f0ad7d84a892ffce Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 12 Jun 2012 20:21:38 +0300 Subject: [PATCH 62/93] KVM: VMX: Stop invalid guest state emulation on pending event Process the event, possibly injecting an interrupt, before continuing. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2e51e7c6d2a8..a62f92ab1be2 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4986,6 +4986,9 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (intr_window_requested && vmx_interrupt_allowed(vcpu)) return handle_interrupt_window(&vmx->vcpu); + if (test_bit(KVM_REQ_EVENT, &vcpu->requests)) + return 1; + err = emulate_instruction(vcpu, 0); if (err == EMULATE_DO_MMIO) { From de5f70e0c65fcd0472a412a7a9690afcd3ee4526 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 12 Jun 2012 20:22:28 +0300 Subject: [PATCH 63/93] KVM: VMX: Improve error reporting during invalid guest state emulation If instruction emulation fails, report it properly to userspace. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a62f92ab1be2..c61eb34a39e7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4996,8 +4996,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) goto out; } - if (err != EMULATE_DONE) + if (err != EMULATE_DONE) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; return 0; + } if (signal_pending(current)) goto out; From 9299836e6379d5703826a540fb3c704223fac520 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 13 Jun 2012 12:25:06 +0300 Subject: [PATCH 64/93] KVM: x86 emulator: emulate BSWAP Opcodes 0F C8 - 0F CF. Used by the SeaBIOS cdrom code (though not in big real mode). Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b4b326ebc835..cfa5cc30c1d9 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3264,6 +3264,21 @@ static int em_lahf(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_bswap(struct x86_emulate_ctxt *ctxt) +{ + switch (ctxt->op_bytes) { +#ifdef CONFIG_X86_64 + case 8: + asm("bswap %0" : "+r"(ctxt->dst.val)); + break; +#endif + default: + asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); + break; + } + return X86EMUL_CONTINUE; +} + static bool valid_cr(int nr) { switch (nr) { @@ -3780,11 +3795,12 @@ static struct opcode twobyte_table[256] = { I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), - /* 0xC0 - 0xCF */ + /* 0xC0 - 0xC7 */ D2bv(DstMem | SrcReg | ModRM | Lock), N, D(DstMem | SrcReg | ModRM | Mov), N, N, N, GD(0, &group9), - N, N, N, N, N, N, N, N, + /* 0xC8 - 0xCF */ + X8(I(DstReg, em_bswap)), /* 0xD0 - 0xDF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0xE0 - 0xEF */ From a14e579f224ba929fe2f1d9bbbff688ae67e2ec4 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 13 Jun 2012 12:28:33 +0300 Subject: [PATCH 65/93] KVM: x86 emulator: emulate LLDT Opcode 0F 00 /2. Used by isolinux durign the protected mode transition. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index cfa5cc30c1d9..7b575adaf1fd 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3015,6 +3015,15 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); } +static int em_lldt(struct x86_emulate_ctxt *ctxt) +{ + u16 sel = ctxt->src.val; + + /* Disable writeback. */ + ctxt->dst.type = OP_NONE; + return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); +} + static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; @@ -3560,7 +3569,7 @@ static struct opcode group5[] = { static struct opcode group6[] = { DI(Prot, sldt), DI(Prot, str), - DI(Prot | Priv, lldt), + II(Prot | Priv | SrcMem16, em_lldt, lldt), DI(Prot | Priv, ltr), N, N, N, N, }; From e919464b53ea29aed46ff10f7d6416268678bdb9 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 13 Jun 2012 16:29:39 +0300 Subject: [PATCH 66/93] KVM: x86 emulator: make read_segment_descriptor() return the address Some operations want to modify the descriptor later on, so save the address for future use. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 7b575adaf1fd..99e3df2bf880 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1296,7 +1296,8 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, /* allowed just for 8 bytes segments */ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, - u16 selector, struct desc_struct *desc) + u16 selector, struct desc_struct *desc, + ulong *desc_addr_p) { struct desc_ptr dt; u16 index = selector >> 3; @@ -1307,7 +1308,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); - addr = dt.address + index * 8; + *desc_addr_p = addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } @@ -1339,6 +1340,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ + ulong desc_addr; int ret; memset(&seg_desc, 0, sizeof seg_desc); @@ -1374,7 +1376,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (null_selector) /* for NULL selector skip all following checks */ goto load; - ret = read_segment_descriptor(ctxt, selector, &seg_desc); + ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; @@ -2614,13 +2616,14 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ulong old_tss_base = ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); u32 desc_limit; + ulong desc_addr; /* FIXME: old_tss_base == ~0 ? */ - ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc); + ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; - ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); + ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; From 869be99c7579c857885643ba2aed87ced339c6a2 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 13 Jun 2012 16:30:53 +0300 Subject: [PATCH 67/93] KVM: x86 emulator: make loading TR set the busy bit Guest software doesn't actually depend on it, but vmx will refuse us entry if we don't. Set the bit in both the cached segment and memory, just to be nice. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 99e3df2bf880..92a1adde0b44 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1335,7 +1335,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg) { - struct desc_struct seg_desc; + struct desc_struct seg_desc, old_desc; u8 dpl, rpl, cpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; @@ -1422,6 +1422,12 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; + old_desc = seg_desc; + seg_desc.type |= 2; /* busy */ + ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, + sizeof(seg_desc), &ctxt->exception); + if (ret != X86EMUL_CONTINUE) + return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) From 80890006167ec3e570bfd7cee7a05db17d339726 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 13 Jun 2012 16:33:29 +0300 Subject: [PATCH 68/93] KVM: x86 emulator: implement LTR Opcode 0F 00 /3. Encountered during Windows XP secondary processor bringup. Signed-off-by: Avi Kivity --- arch/x86/kvm/emulate.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 92a1adde0b44..97d9a9914ba8 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3033,6 +3033,15 @@ static int em_lldt(struct x86_emulate_ctxt *ctxt) return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); } +static int em_ltr(struct x86_emulate_ctxt *ctxt) +{ + u16 sel = ctxt->src.val; + + /* Disable writeback. */ + ctxt->dst.type = OP_NONE; + return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); +} + static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; @@ -3579,7 +3588,7 @@ static struct opcode group6[] = { DI(Prot, sldt), DI(Prot, str), II(Prot | Priv | SrcMem16, em_lldt, lldt), - DI(Prot | Priv, ltr), + II(Prot | Priv | SrcMem16, em_ltr, ltr), N, N, N, N, }; From a27685c33acccce91268ddef88d7896e3205fda5 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 12 Jun 2012 20:30:18 +0300 Subject: [PATCH 69/93] KVM: VMX: Emulate invalid guest state by default Our emulation should be complete enough that we can emulate guests while they are in big real mode, or in a mode transition that is not virtualizable without unrestricted guest support. Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c61eb34a39e7..a2b9dd9af620 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -74,7 +74,7 @@ module_param_named(unrestricted_guest, static bool __read_mostly enable_ept_ad_bits = 1; module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); -static bool __read_mostly emulate_invalid_guest_state = 0; +static bool __read_mostly emulate_invalid_guest_state = true; module_param(emulate_invalid_guest_state, bool, S_IRUGO); static bool __read_mostly vmm_exclusive = 1; From 2f84569f978cd7d54970d893b4c4e68ef24dc1ec Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 15:56:53 +0800 Subject: [PATCH 70/93] KVM: MMU: return bool in __rmap_write_protect The reture value of __rmap_write_protect is either 1 or 0, use true/false instead of these Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 569cd66ba24c..5dd22427251d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1050,11 +1050,12 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) rmap_remove(kvm, sptep); } -static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) +static bool +__rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) { u64 *sptep; struct rmap_iterator iter; - int write_protected = 0; + bool write_protected = false; for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { BUG_ON(!(*sptep & PT_PRESENT_MASK)); @@ -1075,7 +1076,7 @@ static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level sptep = rmap_get_first(*rmapp, &iter); } - write_protected = 1; + write_protected = true; } return write_protected; @@ -1106,12 +1107,12 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, } } -static int rmap_write_protect(struct kvm *kvm, u64 gfn) +static bool rmap_write_protect(struct kvm *kvm, u64 gfn) { struct kvm_memory_slot *slot; unsigned long *rmapp; int i; - int write_protected = 0; + bool write_protected = false; slot = gfn_to_memslot(kvm, gfn); @@ -1700,7 +1701,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, kvm_mmu_pages_init(parent, &parents, &pages); while (mmu_unsync_walk(parent, &pages)) { - int protected = 0; + bool protected = false; for_each_sp(pages, sp, parents, i) protected |= rmap_write_protect(vcpu->kvm, sp->gfn); From d13bc5b5a1f9eafd59331baa1d1d32e1867f57b5 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 15:57:15 +0800 Subject: [PATCH 71/93] KVM: MMU: abstract spte write-protect Introduce a common function to abstract spte write-protect to cleanup the code Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 58 +++++++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5dd22427251d..d04d6305a725 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1050,36 +1050,48 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) rmap_remove(kvm, sptep); } +/* Return true if the spte is dropped. */ +static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) +{ + u64 spte = *sptep; + + if (!is_writable_pte(spte)) + return false; + + rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); + + *flush |= true; + if (is_large_pte(spte)) { + WARN_ON(page_header(__pa(sptep))->role.level == + PT_PAGE_TABLE_LEVEL); + drop_spte(kvm, sptep); + --kvm->stat.lpages; + return true; + } + + spte = spte & ~PT_WRITABLE_MASK; + mmu_spte_update(sptep, spte); + return false; +} + static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) { u64 *sptep; struct rmap_iterator iter; - bool write_protected = false; + bool flush = false; for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { BUG_ON(!(*sptep & PT_PRESENT_MASK)); - rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); - - if (!is_writable_pte(*sptep)) { - sptep = rmap_get_next(&iter); + if (spte_write_protect(kvm, sptep, &flush)) { + sptep = rmap_get_first(*rmapp, &iter); continue; } - if (level == PT_PAGE_TABLE_LEVEL) { - mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK); - sptep = rmap_get_next(&iter); - } else { - BUG_ON(!is_large_pte(*sptep)); - drop_spte(kvm, sptep); - --kvm->stat.lpages; - sptep = rmap_get_first(*rmapp, &iter); - } - - write_protected = true; + sptep = rmap_get_next(&iter); } - return write_protected; + return flush; } /** @@ -3886,6 +3898,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu) void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) { struct kvm_mmu_page *sp; + bool flush = false; list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { int i; @@ -3900,16 +3913,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) !is_last_spte(pt[i], sp->role.level)) continue; - if (is_large_pte(pt[i])) { - drop_spte(kvm, &pt[i]); - --kvm->stat.lpages; - continue; - } - - /* avoid RMW */ - if (is_writable_pte(pt[i])) - mmu_spte_update(&pt[i], - pt[i] & ~PT_WRITABLE_MASK); + spte_write_protect(kvm, &pt[i], &flush); } } kvm_flush_remote_tlbs(kvm); From 8e22f955fb65c5930cc4c5a863cce4e27d0e4a3c Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 15:57:39 +0800 Subject: [PATCH 72/93] KVM: MMU: cleanup spte_write_protect Use __drop_large_spte to cleanup this function and comment spte_write_protect Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 45 +++++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d04d6305a725..ed9e96806082 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1050,7 +1050,33 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) rmap_remove(kvm, sptep); } -/* Return true if the spte is dropped. */ + +static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) +{ + if (is_large_pte(*sptep)) { + WARN_ON(page_header(__pa(sptep))->role.level == + PT_PAGE_TABLE_LEVEL); + drop_spte(kvm, sptep); + --kvm->stat.lpages; + return true; + } + + return false; +} + +static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) +{ + if (__drop_large_spte(vcpu->kvm, sptep)) + kvm_flush_remote_tlbs(vcpu->kvm); +} + +/* + * Write-protect on the specified @sptep due to dirty page logging or + * protecting shadow page table. @flush indicates whether tlb need be + * flushed. + * + * Return true if the spte is dropped. + */ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) { u64 spte = *sptep; @@ -1061,13 +1087,9 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); *flush |= true; - if (is_large_pte(spte)) { - WARN_ON(page_header(__pa(sptep))->role.level == - PT_PAGE_TABLE_LEVEL); - drop_spte(kvm, sptep); - --kvm->stat.lpages; + + if (__drop_large_spte(kvm, sptep)) return true; - } spte = spte & ~PT_WRITABLE_MASK; mmu_spte_update(sptep, spte); @@ -1878,15 +1900,6 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) mmu_spte_set(sptep, spte); } -static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) -{ - if (is_large_pte(*sptep)) { - drop_spte(vcpu->kvm, sptep); - --vcpu->kvm->stat.lpages; - kvm_flush_remote_tlbs(vcpu->kvm); - } -} - static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned direct_access) { From 4f5982a56a70a4a8b7966ef458d3fcdd27aa16cf Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 15:58:04 +0800 Subject: [PATCH 73/93] KVM: VMX: export PFEC.P bit on ept Export the present bit of page fault error code, the later patch will use it Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a2b9dd9af620..5c52a6d29908 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4838,6 +4838,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; gpa_t gpa; + u32 error_code; int gla_validity; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); @@ -4862,7 +4863,13 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); trace_kvm_page_fault(gpa, exit_qualification); - return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0); + + /* It is a write fault? */ + error_code = exit_qualification & (1U << 1); + /* ept page table is present? */ + error_code |= (exit_qualification >> 3) & 0x1; + + return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } static u64 ept_rsvd_mask(u64 spte, int level) From 6e7d035407dc402a313e466c4f7ccb21aaed0da2 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 15:58:33 +0800 Subject: [PATCH 74/93] KVM: MMU: fold tlb flush judgement into mmu_spte_update mmu_spte_update() is the common function, we can easily audit the path Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ed9e96806082..a2fc65ba76a5 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -479,15 +479,24 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte) /* Rules for using mmu_spte_update: * Update the state bits, it means the mapped pfn is not changged. + * + * Whenever we overwrite a writable spte with a read-only one we + * should flush remote TLBs. Otherwise rmap_write_protect + * will find a read-only spte, even though the writable spte + * might be cached on a CPU's TLB, the return value indicates this + * case. */ -static void mmu_spte_update(u64 *sptep, u64 new_spte) +static bool mmu_spte_update(u64 *sptep, u64 new_spte) { u64 mask, old_spte = *sptep; + bool ret = false; WARN_ON(!is_rmap_spte(new_spte)); - if (!is_shadow_present_pte(old_spte)) - return mmu_spte_set(sptep, new_spte); + if (!is_shadow_present_pte(old_spte)) { + mmu_spte_set(sptep, new_spte); + return ret; + } new_spte |= old_spte & shadow_dirty_mask; @@ -500,13 +509,18 @@ static void mmu_spte_update(u64 *sptep, u64 new_spte) else old_spte = __update_clear_spte_slow(sptep, new_spte); + if (is_writable_pte(old_spte) && !is_writable_pte(new_spte)) + ret = true; + if (!shadow_accessed_mask) - return; + return ret; if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask)) kvm_set_pfn_accessed(spte_to_pfn(old_spte)); if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask)) kvm_set_pfn_dirty(spte_to_pfn(old_spte)); + + return ret; } /* @@ -2268,7 +2282,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, pfn_t pfn, bool speculative, bool can_unsync, bool host_writable) { - u64 spte, entry = *sptep; + u64 spte; int ret = 0; if (set_mmio_spte(sptep, gfn, pfn, pte_access)) @@ -2346,14 +2360,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, mark_page_dirty(vcpu->kvm, gfn); set_pte: - mmu_spte_update(sptep, spte); - /* - * If we overwrite a writable spte with a read-only one we - * should flush remote TLBs. Otherwise rmap_write_protect - * will find a read-only spte, even though the writable spte - * might be cached on a CPU's TLB. - */ - if (is_writable_pte(entry) && !is_writable_pte(*sptep)) + if (mmu_spte_update(sptep, spte)) kvm_flush_remote_tlbs(vcpu->kvm); done: return ret; From 49fde3406f3266c5af9430467672c20b63a31e83 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 15:58:58 +0800 Subject: [PATCH 75/93] KVM: MMU: introduce SPTE_MMU_WRITEABLE bit This bit indicates whether the spte can be writable on MMU, that means the corresponding gpte is writable and the corresponding gfn is not protected by shadow page protection In the later path, SPTE_MMU_WRITEABLE will indicates whether the spte can be locklessly updated Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 57 ++++++++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a2fc65ba76a5..b160652f7eee 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -145,7 +145,8 @@ module_param(dbg, bool, 0644); #define CREATE_TRACE_POINTS #include "mmutrace.h" -#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) +#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) +#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) @@ -1084,34 +1085,51 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) kvm_flush_remote_tlbs(vcpu->kvm); } +static bool spte_is_locklessly_modifiable(u64 spte) +{ + return !(~spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)); +} + /* - * Write-protect on the specified @sptep due to dirty page logging or - * protecting shadow page table. @flush indicates whether tlb need be - * flushed. + * Write-protect on the specified @sptep, @pt_protect indicates whether + * spte writ-protection is caused by protecting shadow page table. + * @flush indicates whether tlb need be flushed. + * + * Note: write protection is difference between drity logging and spte + * protection: + * - for dirty logging, the spte can be set to writable at anytime if + * its dirty bitmap is properly set. + * - for spte protection, the spte can be writable only after unsync-ing + * shadow page. * * Return true if the spte is dropped. */ -static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush) +static bool +spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect) { u64 spte = *sptep; - if (!is_writable_pte(spte)) + if (!is_writable_pte(spte) && + !(pt_protect && spte_is_locklessly_modifiable(spte))) return false; rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); - *flush |= true; - - if (__drop_large_spte(kvm, sptep)) + if (__drop_large_spte(kvm, sptep)) { + *flush |= true; return true; + } + if (pt_protect) + spte &= ~SPTE_MMU_WRITEABLE; spte = spte & ~PT_WRITABLE_MASK; - mmu_spte_update(sptep, spte); + + *flush |= mmu_spte_update(sptep, spte); return false; } -static bool -__rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) +static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, + int level, bool pt_protect) { u64 *sptep; struct rmap_iterator iter; @@ -1119,7 +1137,7 @@ __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { BUG_ON(!(*sptep & PT_PRESENT_MASK)); - if (spte_write_protect(kvm, sptep, &flush)) { + if (spte_write_protect(kvm, sptep, &flush, pt_protect)) { sptep = rmap_get_first(*rmapp, &iter); continue; } @@ -1148,7 +1166,7 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, while (mask) { rmapp = &slot->rmap[gfn_offset + __ffs(mask)]; - __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL); + __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL, false); /* clear the first set bit */ mask &= mask - 1; @@ -1167,7 +1185,7 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn) for (i = PT_PAGE_TABLE_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { rmapp = __gfn_to_rmap(gfn, i, slot); - write_protected |= __rmap_write_protect(kvm, rmapp, i); + write_protected |= __rmap_write_protect(kvm, rmapp, i, true); } return write_protected; @@ -2296,8 +2314,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= shadow_x_mask; else spte |= shadow_nx_mask; + if (pte_access & ACC_USER_MASK) spte |= shadow_user_mask; + if (level > PT_PAGE_TABLE_LEVEL) spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) @@ -2322,7 +2342,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, goto done; } - spte |= PT_WRITABLE_MASK; + spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; if (!vcpu->arch.mmu.direct_map && !(pte_access & ACC_WRITE_MASK)) { @@ -2351,8 +2371,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, __func__, gfn); ret = 1; pte_access &= ~ACC_WRITE_MASK; - if (is_writable_pte(spte)) - spte &= ~PT_WRITABLE_MASK; + spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); } } @@ -3933,7 +3952,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) !is_last_spte(pt[i], sp->role.level)) continue; - spte_write_protect(kvm, &pt[i], &flush); + spte_write_protect(kvm, &pt[i], &flush, false); } } kvm_flush_remote_tlbs(kvm); From c7ba5b48cc8ddc015a9e0463813ca1e60bc42c59 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 15:59:18 +0800 Subject: [PATCH 76/93] KVM: MMU: fast path of handling guest page fault If the the present bit of page fault error code is set, it indicates the shadow page is populated on all levels, it means what we do is only modify the access bit which can be done out of mmu-lock Currently, in order to simplify the code, we only fix the page fault caused by write-protect on the fast path Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 144 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 127 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b160652f7eee..8637bffbdb4a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -446,8 +446,22 @@ static bool __check_direct_spte_mmio_pf(u64 spte) } #endif +static bool spte_is_locklessly_modifiable(u64 spte) +{ + return !(~spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)); +} + static bool spte_has_volatile_bits(u64 spte) { + /* + * Always atomicly update spte if it can be updated + * out of mmu-lock, it can ensure dirty bit is not lost, + * also, it can help us to get a stable is_writable_pte() + * to ensure tlb flush is not missed. + */ + if (spte_is_locklessly_modifiable(spte)) + return true; + if (!shadow_accessed_mask) return false; @@ -489,7 +503,7 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte) */ static bool mmu_spte_update(u64 *sptep, u64 new_spte) { - u64 mask, old_spte = *sptep; + u64 old_spte = *sptep; bool ret = false; WARN_ON(!is_rmap_spte(new_spte)); @@ -499,17 +513,16 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) return ret; } - new_spte |= old_spte & shadow_dirty_mask; - - mask = shadow_accessed_mask; - if (is_writable_pte(old_spte)) - mask |= shadow_dirty_mask; - - if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask) + if (!spte_has_volatile_bits(old_spte)) __update_clear_spte_fast(sptep, new_spte); else old_spte = __update_clear_spte_slow(sptep, new_spte); + /* + * For the spte updated out of mmu-lock is safe, since + * we always atomicly update it, see the comments in + * spte_has_volatile_bits(). + */ if (is_writable_pte(old_spte) && !is_writable_pte(new_spte)) ret = true; @@ -1085,11 +1098,6 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) kvm_flush_remote_tlbs(vcpu->kvm); } -static bool spte_is_locklessly_modifiable(u64 spte) -{ - return !(~spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)); -} - /* * Write-protect on the specified @sptep, @pt_protect indicates whether * spte writ-protection is caused by protecting shadow page table. @@ -2677,18 +2685,114 @@ exit: return ret; } +static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code) +{ + /* + * #PF can be fast only if the shadow page table is present and it + * is caused by write-protect, that means we just need change the + * W bit of the spte which can be done out of mmu-lock. + */ + if (!(error_code & PFERR_PRESENT_MASK) || + !(error_code & PFERR_WRITE_MASK)) + return false; + + return true; +} + +static bool +fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 spte) +{ + struct kvm_mmu_page *sp = page_header(__pa(sptep)); + gfn_t gfn; + + WARN_ON(!sp->role.direct); + + /* + * The gfn of direct spte is stable since it is calculated + * by sp->gfn. + */ + gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); + + if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) + mark_page_dirty(vcpu->kvm, gfn); + + return true; +} + +/* + * Return value: + * - true: let the vcpu to access on the same address again. + * - false: let the real page fault path to fix it. + */ +static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, + u32 error_code) +{ + struct kvm_shadow_walk_iterator iterator; + bool ret = false; + u64 spte = 0ull; + + if (!page_fault_can_be_fast(vcpu, error_code)) + return false; + + walk_shadow_page_lockless_begin(vcpu); + for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) + if (!is_shadow_present_pte(spte) || iterator.level < level) + break; + + /* + * If the mapping has been changed, let the vcpu fault on the + * same address again. + */ + if (!is_rmap_spte(spte)) { + ret = true; + goto exit; + } + + if (!is_last_spte(spte, level)) + goto exit; + + /* + * Check if it is a spurious fault caused by TLB lazily flushed. + * + * Need not check the access of upper level table entries since + * they are always ACC_ALL. + */ + if (is_writable_pte(spte)) { + ret = true; + goto exit; + } + + /* + * Currently, to simplify the code, only the spte write-protected + * by dirty-log can be fast fixed. + */ + if (!spte_is_locklessly_modifiable(spte)) + goto exit; + + /* + * Currently, fast page fault only works for direct mapping since + * the gfn is not stable for indirect shadow page. + * See Documentation/virtual/kvm/locking.txt to get more detail. + */ + ret = fast_pf_fix_direct_spte(vcpu, iterator.sptep, spte); +exit: + walk_shadow_page_lockless_end(vcpu); + + return ret; +} + static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gva_t gva, pfn_t *pfn, bool write, bool *writable); -static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, - bool prefault) +static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, + gfn_t gfn, bool prefault) { int r; int level; int force_pt_level; pfn_t pfn; unsigned long mmu_seq; - bool map_writable; + bool map_writable, write = error_code & PFERR_WRITE_MASK; force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); if (likely(!force_pt_level)) { @@ -2705,6 +2809,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, } else level = PT_PAGE_TABLE_LEVEL; + if (fast_page_fault(vcpu, v, level, error_code)) + return 0; + mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); @@ -3093,7 +3200,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, gfn = gva >> PAGE_SHIFT; return nonpaging_map(vcpu, gva & PAGE_MASK, - error_code & PFERR_WRITE_MASK, gfn, prefault); + error_code, gfn, prefault); } static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) @@ -3173,6 +3280,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, } else level = PT_PAGE_TABLE_LEVEL; + if (fast_page_fault(vcpu, gpa, level, error_code)) + return 0; + mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); From a72faf2504dfc12ff9bfb486a42f2761296666ff Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 15:59:41 +0800 Subject: [PATCH 77/93] KVM: MMU: trace fast page fault To see what happen on this path and help us to optimize it Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 2 ++ arch/x86/kvm/mmutrace.h | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8637bffbdb4a..28c8fbcc6763 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2776,6 +2776,8 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, */ ret = fast_pf_fix_direct_spte(vcpu, iterator.sptep, spte); exit: + trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, + spte, ret); walk_shadow_page_lockless_end(vcpu); return ret; diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 89fb0e81322a..c364abc8d034 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -243,6 +243,44 @@ TRACE_EVENT( TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn, __entry->access) ); + +#define __spte_satisfied(__spte) \ + (__entry->retry && is_writable_pte(__entry->__spte)) + +TRACE_EVENT( + fast_page_fault, + TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, + u64 *sptep, u64 old_spte, bool retry), + TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry), + + TP_STRUCT__entry( + __field(int, vcpu_id) + __field(gva_t, gva) + __field(u32, error_code) + __field(u64 *, sptep) + __field(u64, old_spte) + __field(u64, new_spte) + __field(bool, retry) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; + __entry->gva = gva; + __entry->error_code = error_code; + __entry->sptep = sptep; + __entry->old_spte = old_spte; + __entry->new_spte = *sptep; + __entry->retry = retry; + ), + + TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx" + " new %llx spurious %d fixed %d", __entry->vcpu_id, + __entry->gva, __print_flags(__entry->error_code, "|", + kvm_mmu_trace_pferr_flags), __entry->sptep, + __entry->old_spte, __entry->new_spte, + __spte_satisfied(old_spte), __spte_satisfied(new_spte) + ) +); #endif /* _TRACE_KVMMMU_H */ #undef TRACE_INCLUDE_PATH From 6fbc277053836a4d80c72a0843bcbc7595b31e87 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 16:00:00 +0800 Subject: [PATCH 78/93] KVM: MMU: fix kvm_mmu_pagetable_walk tracepoint The P bit of page fault error code is missed in this tracepoint, fix it by passing the full error code Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- arch/x86/kvm/mmutrace.h | 7 +++---- arch/x86/kvm/paging_tmpl.h | 3 +-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index c364abc8d034..cd6e98333ba3 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -54,8 +54,8 @@ */ TRACE_EVENT( kvm_mmu_pagetable_walk, - TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault), - TP_ARGS(addr, write_fault, user_fault, fetch_fault), + TP_PROTO(u64 addr, u32 pferr), + TP_ARGS(addr, pferr), TP_STRUCT__entry( __field(__u64, addr) @@ -64,8 +64,7 @@ TRACE_EVENT( TP_fast_assign( __entry->addr = addr; - __entry->pferr = (!!write_fault << 1) | (!!user_fault << 2) - | (!!fetch_fault << 4); + __entry->pferr = pferr; ), TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr, diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 34f970937ef1..bb7cf01cae76 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -154,8 +154,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, const int fetch_fault = access & PFERR_FETCH_MASK; u16 errcode = 0; - trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, - fetch_fault); + trace_kvm_mmu_pagetable_walk(addr, access); retry_walk: eperm = false; walker->level = mmu->root_level; From 58d8b1728ea3da391ef01c43a384ea06ce4b7c8a Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Jun 2012 16:00:26 +0800 Subject: [PATCH 79/93] KVM: MMU: document mmu-lock and fast page fault Document fast page fault and mmu-lock in locking.txt Signed-off-by: Xiao Guangrong Signed-off-by: Avi Kivity --- Documentation/virtual/kvm/locking.txt | 130 +++++++++++++++++++++++++- 1 file changed, 129 insertions(+), 1 deletion(-) diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt index 3b4cd3bf5631..41b7ac9884b5 100644 --- a/Documentation/virtual/kvm/locking.txt +++ b/Documentation/virtual/kvm/locking.txt @@ -6,7 +6,129 @@ KVM Lock Overview (to be written) -2. Reference +2: Exception +------------ + +Fast page fault: + +Fast page fault is the fast path which fixes the guest page fault out of +the mmu-lock on x86. Currently, the page fault can be fast only if the +shadow page table is present and it is caused by write-protect, that means +we just need change the W bit of the spte. + +What we use to avoid all the race is the SPTE_HOST_WRITEABLE bit and +SPTE_MMU_WRITEABLE bit on the spte: +- SPTE_HOST_WRITEABLE means the gfn is writable on host. +- SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when + the gfn is writable on guest mmu and it is not write-protected by shadow + page write-protection. + +On fast page fault path, we will use cmpxchg to atomically set the spte W +bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, this +is safe because whenever changing these bits can be detected by cmpxchg. + +But we need carefully check these cases: +1): The mapping from gfn to pfn +The mapping from gfn to pfn may be changed since we can only ensure the pfn +is not changed during cmpxchg. This is a ABA problem, for example, below case +will happen: + +At the beginning: +gpte = gfn1 +gfn1 is mapped to pfn1 on host +spte is the shadow page table entry corresponding with gpte and +spte = pfn1 + + VCPU 0 VCPU0 +on fast page fault path: + + old_spte = *spte; + pfn1 is swapped out: + spte = 0; + + pfn1 is re-alloced for gfn2. + + gpte is changed to point to + gfn2 by the guest: + spte = pfn1; + + if (cmpxchg(spte, old_spte, old_spte+W) + mark_page_dirty(vcpu->kvm, gfn1) + OOPS!!! + +We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap. + +For direct sp, we can easily avoid it since the spte of direct sp is fixed +to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic() +to pin gfn to pfn, because after gfn_to_pfn_atomic(): +- We have held the refcount of pfn that means the pfn can not be freed and + be reused for another gfn. +- The pfn is writable that means it can not be shared between different gfns + by KSM. + +Then, we can ensure the dirty bitmaps is correctly set for a gfn. + +Currently, to simplify the whole things, we disable fast page fault for +indirect shadow page. + +2): Dirty bit tracking +In the origin code, the spte can be fast updated (non-atomically) if the +spte is read-only and the Accessed bit has already been set since the +Accessed bit and Dirty bit can not be lost. + +But it is not true after fast page fault since the spte can be marked +writable between reading spte and updating spte. Like below case: + +At the beginning: +spte.W = 0 +spte.Accessed = 1 + + VCPU 0 VCPU0 +In mmu_spte_clear_track_bits(): + + old_spte = *spte; + + /* 'if' condition is satisfied. */ + if (old_spte.Accssed == 1 && + old_spte.W == 0) + spte = 0ull; + on fast page fault path: + spte.W = 1 + memory write on the spte: + spte.Dirty = 1 + + + else + old_spte = xchg(spte, 0ull) + + + if (old_spte.Accssed == 1) + kvm_set_pfn_accessed(spte.pfn); + if (old_spte.Dirty == 1) + kvm_set_pfn_dirty(spte.pfn); + OOPS!!! + +The Dirty bit is lost in this case. + +In order to avoid this kind of issue, we always treat the spte as "volatile" +if it can be updated out of mmu-lock, see spte_has_volatile_bits(), it means, +the spte is always atomicly updated in this case. + +3): flush tlbs due to spte updated +If the spte is updated from writable to readonly, we should flush all TLBs, +otherwise rmap_write_protect will find a read-only spte, even though the +writable spte might be cached on a CPU's TLB. + +As mentioned before, the spte can be updated to writable out of mmu-lock on +fast page fault path, in order to easily audit the path, we see if TLBs need +be flushed caused by this reason in mmu_spte_update() since this is a common +function to update spte (present -> present). + +Since the spte is "volatile" if it can be updated out of mmu-lock, we always +atomicly update the spte, the race caused by fast page fault can be avoided, +See the comments in spte_has_volatile_bits() and mmu_spte_update(). + +3. Reference ------------ Name: kvm_lock @@ -23,3 +145,9 @@ Arch: x86 Protects: - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset} - tsc offset in vmcb Comment: 'raw' because updating the tsc offsets must not be preempted. + +Name: kvm->mmu_lock +Type: spinlock_t +Arch: any +Protects: -shadow page/shadow tlb entry +Comment: it is a spinlock since it is used in mmu notifier. From 6328e593c3df5e81ad7d18a2752ef2235391e9cc Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Wed, 20 Jun 2012 05:56:53 +0000 Subject: [PATCH 80/93] booke/bookehv: Add host crit-watchdog exception support Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/hw_irq.h | 2 ++ arch/powerpc/kvm/booke.c | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index c9aac24b02e2..2aadb47efaec 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -34,6 +34,8 @@ extern void __replay_interrupt(unsigned int vector); extern void timer_interrupt(struct pt_regs *); extern void performance_monitor_exception(struct pt_regs *regs); +extern void WatchdogException(struct pt_regs *regs); +extern void unknown_exception(struct pt_regs *regs); #ifdef CONFIG_PPC64 #include diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 86681eec60b1..d25a097c852b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -612,6 +612,12 @@ static void kvmppc_fill_pt_regs(struct pt_regs *regs) regs->link = lr; } +/* + * For interrupts needed to be handled by host interrupt handlers, + * corresponding host handler are called from here in similar way + * (but not exact) as they are called from low level handler + * (such as from arch/powerpc/kernel/head_fsl_booke.S). + */ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, unsigned int exit_nr) { @@ -639,6 +645,17 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, kvmppc_fill_pt_regs(®s); performance_monitor_exception(®s); break; + case BOOKE_INTERRUPT_WATCHDOG: + kvmppc_fill_pt_regs(®s); +#ifdef CONFIG_BOOKE_WDT + WatchdogException(®s); +#else + unknown_exception(®s); +#endif + break; + case BOOKE_INTERRUPT_CRITICAL: + unknown_exception(®s); + break; } } @@ -683,6 +700,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; + case BOOKE_INTERRUPT_WATCHDOG: + r = RESUME_GUEST; + break; + case BOOKE_INTERRUPT_DOORBELL: kvmppc_account_exit(vcpu, DBELL_EXITS); r = RESUME_GUEST; From 75c44bbb20807b5148eae19642a0fdb8db20c344 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Wed, 20 Jun 2012 05:56:54 +0000 Subject: [PATCH 81/93] booke: Added crit/mc exception handler for e500v2 Watchdog is taken at critical exception level. So this patch is tested with host watchdog exception happening when guest is running. Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/kvm/booke_interrupts.S | 55 ++++++++++++++--------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 8feec2ff3928..09456c4719f3 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -53,16 +53,21 @@ (1< Date: Mon, 18 Jun 2012 12:14:55 +0000 Subject: [PATCH 82/93] KVM: PPC: bookehv64: Add support for std/ld emulation. Add support for std/ld emulation. Signed-off-by: Varun Sethi Signed-off-by: Alexander Graf --- arch/powerpc/kvm/emulate.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index f90e86dea7a2..ee04abaefe23 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -59,11 +59,13 @@ #define OP_31_XOP_STHBRX 918 #define OP_LWZ 32 +#define OP_LD 58 #define OP_LWZU 33 #define OP_LBZ 34 #define OP_LBZU 35 #define OP_STW 36 #define OP_STWU 37 +#define OP_STD 62 #define OP_STB 38 #define OP_STBU 39 #define OP_LHZ 40 @@ -392,6 +394,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; + /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ + case OP_LD: + rt = get_rt(inst); + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + break; + case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); @@ -412,6 +420,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 4, 1); break; + /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ + case OP_STD: + rs = get_rs(inst); + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), + 8, 1); + break; + case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), From 9997782ed5c3d831d23b3252db3af2d83a5aded9 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Fri, 22 Jun 2012 13:33:12 +0000 Subject: [PATCH 83/93] KVM: PPC: bookehv: Add ESR flag to Data Storage Interrupt ESR register is required by Data Storage Interrupt handling code. Add the specific flag to the interrupt handler. Signed-off-by: Mihai Caraman Signed-off-by: Alexander Graf --- arch/powerpc/kvm/bookehv_interrupts.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 6048a00515d7..0fa2ef7df036 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -267,7 +267,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ - SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) + SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ From c7ba7771c3dd01183d3155640129ec7c9707f082 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Mon, 25 Jun 2012 02:26:19 +0000 Subject: [PATCH 84/93] KVM: PPC64: booke: Set interrupt computation mode for 64-bit host 64-bit host needs to remain in 64-bit mode when an exception take place. Set interrupt computaion mode in EPCR register. Signed-off-by: Mihai Caraman Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500mc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index fe6c1de6b701..db97ee3a0827 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved. + * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved. * * Author: Varun Sethi, * @@ -183,6 +183,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ SPRN_EPCR_DUVD; +#ifdef CONFIG_64BIT + vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; +#endif vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP; vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT); vcpu->arch.epsc = vcpu->arch.eplc; From 66c9897d9d7675bfb8f4cc4d57ceb00b6a12a2e8 Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Mon, 25 Jun 2012 02:26:26 +0000 Subject: [PATCH 85/93] KVM: PPC: e500mc: Fix tlbilx emulation for 64-bit guests tlbilxva emulation was using an u32 variable for guest effective address. Replace it with gva_t type to handle 64-bit guests. Signed-off-by: Mihai Caraman Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500mc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index db97ee3a0827..1f89d26e65fb 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -57,7 +57,8 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, struct kvm_book3e_206_tlb_entry *gtlbe) { unsigned int tid, ts; - u32 val, eaddr, lpid; + gva_t eaddr; + u32 val, lpid; unsigned long flags; ts = get_tlb_ts(gtlbe); From 0c1fc3c3c496f8719fbea7fd3668491413ad420e Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Wed, 27 Jun 2012 19:37:31 +0000 Subject: [PATCH 86/93] KVM: PPC: Critical interrupt emulation support rfci instruction and CSRR0/1 registers are emulated. Signed-off-by: Scott Wood Signed-off-by: Stuart Yoder Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/kvm/booke_emulate.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 9eb9809eb13e..12834bb608ab 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -24,6 +24,7 @@ #include "booke.h" #define OP_19_XOP_RFI 50 +#define OP_19_XOP_RFCI 51 #define OP_31_XOP_MFMSR 83 #define OP_31_XOP_WRTEE 131 @@ -36,6 +37,12 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); } +static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) +{ + vcpu->arch.pc = vcpu->arch.csrr0; + kvmppc_set_msr(vcpu, vcpu->arch.csrr1); +} + int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { @@ -52,6 +59,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, *advance = 0; break; + case OP_19_XOP_RFCI: + kvmppc_emul_rfci(vcpu); + kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS); + *advance = 0; + break; + default: emulated = EMULATE_FAIL; break; @@ -113,6 +126,12 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_ESR: vcpu->arch.shared->esr = spr_val; break; + case SPRN_CSRR0: + vcpu->arch.csrr0 = spr_val; + break; + case SPRN_CSRR1: + vcpu->arch.csrr1 = spr_val; + break; case SPRN_DBCR0: vcpu->arch.dbcr0 = spr_val; break; @@ -232,6 +251,12 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) case SPRN_ESR: *spr_val = vcpu->arch.shared->esr; break; + case SPRN_CSRR0: + *spr_val = vcpu->arch.csrr0; + break; + case SPRN_CSRR1: + *spr_val = vcpu->arch.csrr1; + break; case SPRN_DBCR0: *spr_val = vcpu->arch.dbcr0; break; From fc73373b33f5f965f2f82bfbc40ef8e6072e986d Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Fri, 6 Jul 2012 13:47:39 -0400 Subject: [PATCH 87/93] KVM: Add x86_hyper_kvm to complete detect_hypervisor_platform check While debugging I noticed that unlike all the other hypervisor code in the kernel, kvm does not have an entry for x86_hyper which is used in detect_hypervisor_platform() which results in a nice printk in the syslog. This is only really a stub function but it does make kvm more consistent with the other hypervisors. Signed-off-by: Prarit Bhargava Cc: Avi Kivity Cc: Gleb Natapov Cc: Alex Williamson Cc: Konrad Rzeszutek Wilk Cc: Marcelo Tostatti Cc: kvm@vger.kernel.org Signed-off-by: Avi Kivity --- arch/x86/include/asm/hypervisor.h | 1 + arch/x86/kernel/cpu/hypervisor.c | 1 + arch/x86/kernel/kvm.c | 14 ++++++++++++++ 3 files changed, 16 insertions(+) diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 7a15153c675d..b518c7509933 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -49,6 +49,7 @@ extern const struct hypervisor_x86 *x86_hyper; extern const struct hypervisor_x86 x86_hyper_vmware; extern const struct hypervisor_x86 x86_hyper_ms_hyperv; extern const struct hypervisor_x86 x86_hyper_xen_hvm; +extern const struct hypervisor_x86 x86_hyper_kvm; static inline bool hypervisor_x2apic_available(void) { diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 755f64fb0743..6d6dd7afb229 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -37,6 +37,7 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = #endif &x86_hyper_vmware, &x86_hyper_ms_hyperv, + &x86_hyper_kvm, }; const struct hypervisor_x86 *x86_hyper; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 75ab94c75c7a..299cf1470923 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -41,6 +41,7 @@ #include #include #include +#include static int kvmapf = 1; @@ -483,6 +484,19 @@ void __init kvm_guest_init(void) #endif } +static bool __init kvm_detect(void) +{ + if (!kvm_para_available()) + return false; + return true; +} + +const struct hypervisor_x86 x86_hyper_kvm __refconst = { + .name = "KVM", + .detect = kvm_detect, +}; +EXPORT_SYMBOL_GPL(x86_hyper_kvm); + static __init int activate_jump_labels(void) { if (has_steal_clock) { From ad756a1603c5fac207758faaac7f01c34c9d0b7b Mon Sep 17 00:00:00 2001 From: "Mao, Junjie" Date: Mon, 2 Jul 2012 01:18:48 +0000 Subject: [PATCH 88/93] KVM: VMX: Implement PCID/INVPCID for guests with EPT This patch handles PCID/INVPCID for guests. Process-context identifiers (PCIDs) are a facility by which a logical processor may cache information for multiple linear-address spaces so that the processor may retain cached information when software switches to a different linear address space. Refer to section 4.10.1 in IA32 Intel Software Developer's Manual Volume 3A for details. For guests with EPT, the PCID feature is enabled and INVPCID behaves as running natively. For guests without EPT, the PCID feature is disabled and INVPCID triggers #UD. Signed-off-by: Junjie Mao Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 4 ++- arch/x86/include/asm/processor-flags.h | 2 ++ arch/x86/include/asm/vmx.h | 2 ++ arch/x86/kvm/cpuid.c | 5 ++-- arch/x86/kvm/cpuid.h | 8 ++++++ arch/x86/kvm/svm.c | 6 +++++ arch/x86/kvm/vmx.c | 34 +++++++++++++++++++++++++- arch/x86/kvm/x86.c | 23 ++++++++++++++--- 8 files changed, 77 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 24b76474d9de..a3e9409e90b6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -48,12 +48,13 @@ #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) +#define CR3_PCID_ENABLED_RESERVED_BITS 0xFFFFFF0000000000ULL #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 0xFFFFFF0000000000ULL) #define CR4_RESERVED_BITS \ (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ - | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ + | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \ | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) @@ -673,6 +674,7 @@ struct kvm_x86_ops { u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); int (*get_lpage_level)(void); bool (*rdtscp_supported)(void); + bool (*invpcid_supported)(void); void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host); void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index f8ab3eaad128..aea1d1d848c7 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h @@ -44,6 +44,7 @@ */ #define X86_CR3_PWT 0x00000008 /* Page Write Through */ #define X86_CR3_PCD 0x00000010 /* Page Cache Disable */ +#define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */ /* * Intel CPU features in CR4 @@ -61,6 +62,7 @@ #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ #define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */ +#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */ #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index de007c272730..74fcb963595b 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -60,6 +60,7 @@ #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 +#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 #define PIN_BASED_EXT_INTR_MASK 0x00000001 @@ -281,6 +282,7 @@ enum vmcs_field { #define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_WBINVD 54 #define EXIT_REASON_XSETBV 55 +#define EXIT_REASON_INVPCID 58 /* * Interruption-information format diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 197afd53e3a4..0595f1397b7c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -201,6 +201,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, unsigned f_lm = 0; #endif unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; + unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; /* cpuid 1.edx */ const u32 kvm_supported_word0_x86_features = @@ -228,7 +229,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, 0 /* DS-CPL, VMX, SMX, EST */ | 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ | - 0 /* Reserved, DCA */ | F(XMM4_1) | + F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | F(F16C) | F(RDRAND); @@ -248,7 +249,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.ebx */ const u32 kvm_supported_word9_x86_features = F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | - F(BMI2) | F(ERMS) | F(RTM); + F(BMI2) | F(ERMS) | f_invpcid | F(RTM); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index f449edc35e24..a10e46016851 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -52,4 +52,12 @@ static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu) return best && (best->ecx & bit(X86_FEATURE_OSVW)); } +static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 1, 0); + return best && (best->ecx & bit(X86_FEATURE_PCID)); +} + #endif diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 7a418783259d..baead950d6c8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4044,6 +4044,11 @@ static bool svm_rdtscp_supported(void) return false; } +static bool svm_invpcid_supported(void) +{ + return false; +} + static bool svm_has_wbinvd_exit(void) { return true; @@ -4312,6 +4317,7 @@ static struct kvm_x86_ops svm_x86_ops = { .cpuid_update = svm_cpuid_update, .rdtscp_supported = svm_rdtscp_supported, + .invpcid_supported = svm_invpcid_supported, .set_supported_cpuid = svm_set_supported_cpuid, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5c52a6d29908..c39b60707e02 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -861,6 +861,12 @@ static inline bool cpu_has_vmx_rdtscp(void) SECONDARY_EXEC_RDTSCP; } +static inline bool cpu_has_vmx_invpcid(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_ENABLE_INVPCID; +} + static inline bool cpu_has_virtual_nmis(void) { return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; @@ -1751,6 +1757,11 @@ static bool vmx_rdtscp_supported(void) return cpu_has_vmx_rdtscp(); } +static bool vmx_invpcid_supported(void) +{ + return cpu_has_vmx_invpcid() && enable_ept; +} + /* * Swap MSR entry in host/guest MSR entry array. */ @@ -2470,7 +2481,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_PAUSE_LOOP_EXITING | - SECONDARY_EXEC_RDTSCP; + SECONDARY_EXEC_RDTSCP | + SECONDARY_EXEC_ENABLE_INVPCID; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) @@ -3800,6 +3812,8 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) if (!enable_ept) { exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; enable_unrestricted_guest = 0; + /* Enable INVPCID for non-ept guests may cause performance regression. */ + exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; } if (!enable_unrestricted_guest) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; @@ -6550,6 +6564,23 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) } } } + + exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + /* Exposing INVPCID only when PCID is exposed */ + best = kvm_find_cpuid_entry(vcpu, 0x7, 0); + if (vmx_invpcid_supported() && + best && (best->ecx & bit(X86_FEATURE_INVPCID)) && + guest_cpuid_has_pcid(vcpu)) { + exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, + exec_control); + } else { + exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, + exec_control); + if (best) + best->ecx &= ~bit(X86_FEATURE_INVPCID); + } } static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) @@ -7284,6 +7315,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .cpuid_update = vmx_cpuid_update, .rdtscp_supported = vmx_rdtscp_supported, + .invpcid_supported = vmx_invpcid_supported, .set_supported_cpuid = vmx_set_supported_cpuid, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ff0b487e7254..59b59508ff07 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -528,6 +528,9 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) return 1; } + if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) + return 1; + kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) { @@ -604,10 +607,20 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) kvm_read_cr3(vcpu))) return 1; + if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { + if (!guest_cpuid_has_pcid(vcpu)) + return 1; + + /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ + if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) + return 1; + } + if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; - if ((cr4 ^ old_cr4) & pdptr_bits) + if (((cr4 ^ old_cr4) & pdptr_bits) || + (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) @@ -626,8 +639,12 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) } if (is_long_mode(vcpu)) { - if (cr3 & CR3_L_MODE_RESERVED_BITS) - return 1; + if (kvm_read_cr4(vcpu) & X86_CR4_PCIDE) { + if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS) + return 1; + } else + if (cr3 & CR3_L_MODE_RESERVED_BITS) + return 1; } else { if (is_pae(vcpu)) { if (cr3 & CR3_PAE_RESERVED_BITS) From 1551df646dd42122e17401013dba7a509d0f1b0d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 15 Jul 2012 15:56:46 +0300 Subject: [PATCH 89/93] apic: add apic_set_eoi_write for PV use KVM PV EOI optimization overrides eoi_write apic op with its own version. Add an API for this to avoid meddling with core x86 apic driver data structures directly. For KVM use, we don't need any guarantees about when the switch to the new op will take place, so it could in theory use this API after SMP init, but it currently doesn't, and restricting callers to early init makes it clear that it's safe as it won't race with actual APIC driver use. Signed-off-by: Michael S. Tsirkin Acked-by: Ingo Molnar Signed-off-by: Avi Kivity --- arch/x86/include/asm/apic.h | 3 +++ arch/x86/kernel/apic/apic.c | 17 +++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index aa5b2eec3602..ff8dff645e80 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -469,6 +469,8 @@ static inline u32 safe_apic_wait_icr_idle(void) return apic->safe_wait_icr_idle(); } +extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)); + #else /* CONFIG_X86_LOCAL_APIC */ static inline u32 apic_read(u32 reg) { return 0; } @@ -478,6 +480,7 @@ static inline u64 apic_icr_read(void) { return 0; } static inline void apic_icr_write(u32 low, u32 high) { } static inline void apic_wait_icr_idle(void) { } static inline u32 safe_apic_wait_icr_idle(void) { return 0; } +static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {} #endif /* CONFIG_X86_LOCAL_APIC */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 39a222e094af..c7520b6184ef 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2123,6 +2123,23 @@ void default_init_apic_ldr(void) apic_write(APIC_LDR, val); } +/* + * Override the generic EOI implementation with an optimized version. + * Only called during early boot when only one CPU is active and with + * interrupts disabled, so we know this does not race with actual APIC driver + * use. + */ +void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) +{ + struct apic **drv; + + for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { + /* Should happen once for each apic */ + WARN_ON((*drv)->eoi_write == eoi_write); + (*drv)->eoi_write = eoi_write; + } +} + /* * Power management */ From 90536664063641bf87d8ac9e109f3f109f804d3e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 15 Jul 2012 15:56:52 +0300 Subject: [PATCH 90/93] KVM guest: switch to apic_set_eoi_write, apic_write Use apic_set_eoi_write, apic_write to avoid meedling in core apic driver data structures directly. Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/kernel/kvm.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 299cf1470923..c1d61ee4b4f1 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -299,7 +299,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val) */ if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) return; - apic->write(APIC_EOI, APIC_EOI_ACK); + apic_write(APIC_EOI, APIC_EOI_ACK); } void __cpuinit kvm_guest_cpu_init(void) @@ -466,15 +466,8 @@ void __init kvm_guest_init(void) pv_time_ops.steal_clock = kvm_steal_clock; } - if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { - struct apic **drv; - - for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) { - /* Should happen once for each apic */ - WARN_ON((*drv)->eoi_write == kvm_guest_apic_eoi_write); - (*drv)->eoi_write = kvm_guest_apic_eoi_write; - } - } + if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) + apic_set_eoi_write(kvm_guest_apic_eoi_write); #ifdef CONFIG_SMP smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; From ebf7d2e9939f6571ef0b7381e7f95eb10f0c686b Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 15 Jul 2012 15:56:58 +0300 Subject: [PATCH 91/93] Revert "apic: fix kvm build on UP without IOAPIC" This reverts commit f9808b7fd422b965cea52e05ba470e0a473c53d3. After commit 'kvm: switch to apic_set_eoi_write, apic_write' the stubs are no longer needed as kvm does not look at apicdrivers anymore. Signed-off-by: Michael S. Tsirkin Signed-off-by: Avi Kivity --- arch/x86/include/asm/apic.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index ff8dff645e80..839b8f58a272 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -417,12 +417,7 @@ extern struct apic *apic; __aligned(sizeof(struct apic *)) \ __section(.apicdrivers) = { &sym1, &sym2 } -#ifdef CONFIG_X86_LOCAL_APIC extern struct apic *__apicdrivers[], *__apicdrivers_end[]; -#else -#define __apicdrivers ((struct apic **)NULL) -#define __apicdrivers_end ((struct apic **)NULL) -#endif /* * APIC functionality to boot other CPUs - only used on SMP: From d63d3e6217c49b81d74141b7920bbe5950532432 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 18 Jul 2012 11:48:50 +0300 Subject: [PATCH 92/93] x86, hyper: fix build with !CONFIG_KVM_GUEST Signed-off-by: Avi Kivity Signed-off-by: Marcelo Tosatti --- arch/x86/kernel/cpu/hypervisor.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 6d6dd7afb229..a8f8fa9769d6 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -37,7 +37,9 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = #endif &x86_hyper_vmware, &x86_hyper_ms_hyperv, +#ifdef CONFIG_KVM_GUEST &x86_hyper_kvm, +#endif }; const struct hypervisor_x86 *x86_hyper; From 1a577b72475d161b6677c05abe57301362023bb2 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 19 Jul 2012 13:45:20 +0300 Subject: [PATCH 93/93] KVM: fix race with level interrupts When more than 1 source id is in use for the same GSI, we have the following race related to handling irq_states race: CPU 0 clears bit 0. CPU 0 read irq_state as 0. CPU 1 sets level to 1. CPU 1 calls kvm_ioapic_set_irq(1). CPU 0 calls kvm_ioapic_set_irq(0). Now ioapic thinks the level is 0 but irq_state is not 0. Fix by performing all irq_states bitmap handling under pic/ioapic lock. This also removes the need for atomics with irq_states handling. Reported-by: Gleb Natapov Signed-off-by: Michael S. Tsirkin Signed-off-by: Marcelo Tosatti --- arch/x86/include/asm/kvm_host.h | 15 ++++++++++++++- arch/x86/kvm/i8259.c | 17 ++++++++++++++--- virt/kvm/ioapic.c | 19 ++++++++++++++++--- virt/kvm/ioapic.h | 4 +++- virt/kvm/irq_comm.c | 31 ++++--------------------------- 5 files changed, 51 insertions(+), 35 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a3e9409e90b6..2c75b400e40c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -816,7 +816,20 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); -int kvm_pic_set_irq(void *opaque, int irq, int level); +static inline int __kvm_irq_line_state(unsigned long *irq_state, + int irq_source_id, int level) +{ + /* Logical OR for level trig interrupt */ + if (level) + __set_bit(irq_source_id, irq_state); + else + __clear_bit(irq_source_id, irq_state); + + return !!(*irq_state); +} + +int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); +void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); void kvm_inject_nmi(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 81cf4fa4a2be..1df8fb9e1d5d 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -188,14 +188,15 @@ void kvm_pic_update_irq(struct kvm_pic *s) pic_unlock(s); } -int kvm_pic_set_irq(void *opaque, int irq, int level) +int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level) { - struct kvm_pic *s = opaque; int ret = -1; pic_lock(s); if (irq >= 0 && irq < PIC_NUM_PINS) { - ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); + int irq_level = __kvm_irq_line_state(&s->irq_states[irq], + irq_source_id, level); + ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level); pic_update_irq(s); trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, s->pics[irq >> 3].imr, ret == 0); @@ -205,6 +206,16 @@ int kvm_pic_set_irq(void *opaque, int irq, int level) return ret; } +void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id) +{ + int i; + + pic_lock(s); + for (i = 0; i < PIC_NUM_PINS; i++) + __clear_bit(irq_source_id, &s->irq_states[i]); + pic_unlock(s); +} + /* * acknowledge interrupt 'irq' */ diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 26fd54dc459e..ef61d529a6c4 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -191,7 +191,8 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); } -int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) +int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, + int level) { u32 old_irr; u32 mask = 1 << irq; @@ -201,9 +202,11 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) spin_lock(&ioapic->lock); old_irr = ioapic->irr; if (irq >= 0 && irq < IOAPIC_NUM_PINS) { + int irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq], + irq_source_id, level); entry = ioapic->redirtbl[irq]; - level ^= entry.fields.polarity; - if (!level) + irq_level ^= entry.fields.polarity; + if (!irq_level) ioapic->irr &= ~mask; else { int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); @@ -221,6 +224,16 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) return ret; } +void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id) +{ + int i; + + spin_lock(&ioapic->lock); + for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) + __clear_bit(irq_source_id, &ioapic->irq_states[i]); + spin_unlock(&ioapic->lock); +} + static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, int trigger_mode) { diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 32872a09b63f..a30abfe6ed16 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h @@ -74,7 +74,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector); int kvm_ioapic_init(struct kvm *kvm); void kvm_ioapic_destroy(struct kvm *kvm); -int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); +int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, + int level); +void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); void kvm_ioapic_reset(struct kvm_ioapic *ioapic); int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq); diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index a6a0365475ed..cc59c68da032 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -33,26 +33,12 @@ #include "ioapic.h" -static inline int kvm_irq_line_state(unsigned long *irq_state, - int irq_source_id, int level) -{ - /* Logical OR for level trig interrupt */ - if (level) - set_bit(irq_source_id, irq_state); - else - clear_bit(irq_source_id, irq_state); - - return !!(*irq_state); -} - static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level) { #ifdef CONFIG_X86 struct kvm_pic *pic = pic_irqchip(kvm); - level = kvm_irq_line_state(&pic->irq_states[e->irqchip.pin], - irq_source_id, level); - return kvm_pic_set_irq(pic, e->irqchip.pin, level); + return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); #else return -1; #endif @@ -62,10 +48,7 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; - level = kvm_irq_line_state(&ioapic->irq_states[e->irqchip.pin], - irq_source_id, level); - - return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, level); + return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level); } inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq) @@ -249,8 +232,6 @@ unlock: void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) { - int i; - ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); mutex_lock(&kvm->irq_lock); @@ -263,14 +244,10 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) if (!irqchip_in_kernel(kvm)) goto unlock; - for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) { - clear_bit(irq_source_id, &kvm->arch.vioapic->irq_states[i]); - if (i >= 16) - continue; + kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); #ifdef CONFIG_X86 - clear_bit(irq_source_id, &pic_irqchip(kvm)->irq_states[i]); + kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id); #endif - } unlock: mutex_unlock(&kvm->irq_lock); }