Merge branch 'kvm-updates/2.6.31' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.31' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: shut up uninit compiler warning in paging_tmpl.h KVM: Ignore reads to K7 EVNTSEL MSRs KVM: VMX: Handle vmx instruction vmexits KVM: s390: Allow stfle instruction in the guest KVM: kvm/x86_emulate.c toggle_interruptibility() should be static KVM: ia64: fix ia64 build due to missing kallsyms_lookup() and double export KVM: protect concurrent make_all_cpus_request KVM: MMU: Allow 4K ptes with bit 7 (PAT) set KVM: Fix dirty bit tracking for slots with large pages
This commit is contained in:
commit
9a8fb9ee7a
@ -11,5 +11,11 @@
|
||||
*
|
||||
*/
|
||||
#undef CONFIG_MODULES
|
||||
#include <linux/module.h>
|
||||
#undef CONFIG_KALLSYMS
|
||||
#undef EXPORT_SYMBOL
|
||||
#undef EXPORT_SYMBOL_GPL
|
||||
#define EXPORT_SYMBOL(sym)
|
||||
#define EXPORT_SYMBOL_GPL(sym)
|
||||
#include "../../../lib/vsprintf.c"
|
||||
#include "../../../lib/ctype.c"
|
||||
|
@ -99,7 +99,9 @@ struct kvm_s390_sie_block {
|
||||
__u8 reservedd0[48]; /* 0x00d0 */
|
||||
__u64 gcr[16]; /* 0x0100 */
|
||||
__u64 gbea; /* 0x0180 */
|
||||
__u8 reserved188[120]; /* 0x0188 */
|
||||
__u8 reserved188[24]; /* 0x0188 */
|
||||
__u32 fac; /* 0x01a0 */
|
||||
__u8 reserved1a4[92]; /* 0x01a4 */
|
||||
} __attribute__((packed));
|
||||
|
||||
struct kvm_vcpu_stat {
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/system.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
|
||||
@ -69,6 +70,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static unsigned long long *facilities;
|
||||
|
||||
/* Section: not file related */
|
||||
void kvm_arch_hardware_enable(void *garbage)
|
||||
@ -288,6 +290,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
|
||||
vcpu->arch.sie_block->ecb = 2;
|
||||
vcpu->arch.sie_block->eca = 0xC1002001U;
|
||||
vcpu->arch.sie_block->fac = (int) (long) facilities;
|
||||
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
|
||||
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
|
||||
(unsigned long) vcpu);
|
||||
@ -739,11 +742,29 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
|
||||
static int __init kvm_s390_init(void)
|
||||
{
|
||||
return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
|
||||
int ret;
|
||||
ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* guests can ask for up to 255+1 double words, we need a full page
|
||||
* to hold the maximum amount of facilites. On the other hand, we
|
||||
* only set facilities that are known to work in KVM.
|
||||
*/
|
||||
facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
|
||||
if (!facilities) {
|
||||
kvm_exit();
|
||||
return -ENOMEM;
|
||||
}
|
||||
stfle(facilities, 1);
|
||||
facilities[0] &= 0xff00fff3f0700000ULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit kvm_s390_exit(void)
|
||||
{
|
||||
free_page((unsigned long) facilities);
|
||||
kvm_exit();
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
|
||||
|
||||
vcpu->stat.instruction_stfl++;
|
||||
/* only pass the facility bits, which we can handle */
|
||||
facility_list &= 0xfe00fff3;
|
||||
facility_list &= 0xff00fff3;
|
||||
|
||||
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
|
||||
&facility_list, sizeof(facility_list));
|
||||
|
@ -2157,7 +2157,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
|
||||
else
|
||||
/* 32 bits PSE 4MB page */
|
||||
context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
|
||||
context->rsvd_bits_mask[1][0] = ~0ull;
|
||||
context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
|
||||
break;
|
||||
case PT32E_ROOT_LEVEL:
|
||||
context->rsvd_bits_mask[0][2] =
|
||||
@ -2170,7 +2170,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
|
||||
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
|
||||
rsvd_bits(maxphyaddr, 62) |
|
||||
rsvd_bits(13, 20); /* large page */
|
||||
context->rsvd_bits_mask[1][0] = ~0ull;
|
||||
context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
|
||||
break;
|
||||
case PT64_ROOT_LEVEL:
|
||||
context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
|
||||
@ -2186,7 +2186,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
|
||||
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
|
||||
rsvd_bits(maxphyaddr, 51) |
|
||||
rsvd_bits(13, 20); /* large page */
|
||||
context->rsvd_bits_mask[1][0] = ~0ull;
|
||||
context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
{
|
||||
unsigned access = gw->pt_access;
|
||||
struct kvm_mmu_page *shadow_page;
|
||||
u64 spte, *sptep;
|
||||
u64 spte, *sptep = NULL;
|
||||
int direct;
|
||||
gfn_t table_gfn;
|
||||
int r;
|
||||
|
@ -3012,6 +3012,12 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
{
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
{
|
||||
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
||||
@ -3198,6 +3204,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
|
||||
[EXIT_REASON_HLT] = handle_halt,
|
||||
[EXIT_REASON_INVLPG] = handle_invlpg,
|
||||
[EXIT_REASON_VMCALL] = handle_vmcall,
|
||||
[EXIT_REASON_VMCLEAR] = handle_vmx_insn,
|
||||
[EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
|
||||
[EXIT_REASON_VMPTRLD] = handle_vmx_insn,
|
||||
[EXIT_REASON_VMPTRST] = handle_vmx_insn,
|
||||
[EXIT_REASON_VMREAD] = handle_vmx_insn,
|
||||
[EXIT_REASON_VMRESUME] = handle_vmx_insn,
|
||||
[EXIT_REASON_VMWRITE] = handle_vmx_insn,
|
||||
[EXIT_REASON_VMOFF] = handle_vmx_insn,
|
||||
[EXIT_REASON_VMON] = handle_vmx_insn,
|
||||
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
|
||||
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
|
||||
[EXIT_REASON_WBINVD] = handle_wbinvd,
|
||||
|
@ -898,6 +898,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
case MSR_VM_HSAVE_PA:
|
||||
case MSR_P6_EVNTSEL0:
|
||||
case MSR_P6_EVNTSEL1:
|
||||
case MSR_K7_EVNTSEL0:
|
||||
data = 0;
|
||||
break;
|
||||
case MSR_MTRRcap:
|
||||
|
@ -1361,7 +1361,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
|
||||
static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
|
||||
{
|
||||
u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask);
|
||||
/*
|
||||
|
@ -125,6 +125,7 @@ struct kvm_kernel_irq_routing_entry {
|
||||
struct kvm {
|
||||
struct mutex lock; /* protects the vcpus array and APIC accesses */
|
||||
spinlock_t mmu_lock;
|
||||
spinlock_t requests_lock;
|
||||
struct rw_semaphore slots_lock;
|
||||
struct mm_struct *mm; /* userspace tied to this vm */
|
||||
int nmemslots;
|
||||
|
@ -746,6 +746,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
||||
cpumask_clear(cpus);
|
||||
|
||||
me = get_cpu();
|
||||
spin_lock(&kvm->requests_lock);
|
||||
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
||||
vcpu = kvm->vcpus[i];
|
||||
if (!vcpu)
|
||||
@ -762,6 +763,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
||||
smp_call_function_many(cpus, ack_flush, NULL, 1);
|
||||
else
|
||||
called = false;
|
||||
spin_unlock(&kvm->requests_lock);
|
||||
put_cpu();
|
||||
free_cpumask_var(cpus);
|
||||
return called;
|
||||
@ -982,6 +984,7 @@ static struct kvm *kvm_create_vm(void)
|
||||
kvm->mm = current->mm;
|
||||
atomic_inc(&kvm->mm->mm_count);
|
||||
spin_lock_init(&kvm->mmu_lock);
|
||||
spin_lock_init(&kvm->requests_lock);
|
||||
kvm_io_bus_init(&kvm->pio_bus);
|
||||
mutex_init(&kvm->lock);
|
||||
kvm_io_bus_init(&kvm->mmio_bus);
|
||||
@ -1194,6 +1197,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
||||
if (!new.dirty_bitmap)
|
||||
goto out_free;
|
||||
memset(new.dirty_bitmap, 0, dirty_bytes);
|
||||
if (old.npages)
|
||||
kvm_arch_flush_shadow(kvm);
|
||||
}
|
||||
#endif /* not defined CONFIG_S390 */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user