More bugfixes, including a few remaining "make W=1" issues such
as too large frame sizes on some configurations. On the ARM side, the compiler was messing up shadow stacks between EL1 and EL2 code, which is easily fixed with __always_inline. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJeXAT4AAoJEL/70l94x66DWywH/1kv4MmeGo6PI0Nxk/yvA7X8 78iqIBchtxZX0v/9kqpTB7bYmHyTgmZHM+IkwtIUANDSaOvWqJwU+TLUfduOiuXF NxBHcZDyuMoftX5CSQ+bJ5PwxKijAdJsIkCZ13CnsTCkwcfamSGypFUCK8LacPeq WHvV5Ws5pFc51xrP3CH1DrRhLoulaBmt5xxqK9fxWtslrlsnm1uNza5vs8As8CzM apnmdRIf5p4v91Zic3PFH7/GXES0m1tjIBKdtZ4YHb8yrXV/kBsEVhhTjqE9mrUq qtRRl5waOFoP4yc9ey52PAbMm1x1Ho/pyunpM0xh40Yq8OPFwqXBPTnWfobSoiM= =LNQc -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "More bugfixes, including a few remaining "make W=1" issues such as too large frame sizes on some configurations. On the ARM side, the compiler was messing up shadow stacks between EL1 and EL2 code, which is easily fixed with __always_inline" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: VMX: check descriptor table exits on instruction emulation kvm: x86: Limit the number of "kvm: disabled by bios" messages KVM: x86: avoid useless copy of cpufreq policy KVM: allow disabling -Werror KVM: x86: allow compiling as non-module with W=1 KVM: Pre-allocate 1 cpumask variable per cpu for both pv tlb and pv ipis KVM: Introduce pv check helpers KVM: let declaration of kvm_get_running_vcpus match implementation KVM: SVM: allocate AVIC data structures based on kvm_amd module parameter arm64: Ask the compiler to __always_inline functions used by KVM at HYP KVM: arm64: Define our own swab32() to avoid a uapi static inline KVM: arm64: Ask the compiler to __always_inline functions used at HYP kvm: arm/arm64: Fold VHE entry/exit work into kvm_vcpu_run_vhe() KVM: arm/arm64: Fix up includes for trace.h
This commit is contained in:
commit
f853ed90e2
@ -392,9 +392,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
|
|||||||
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
|
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
|
||||||
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
|
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
|
||||||
|
|
||||||
static inline void kvm_arm_vhe_guest_enter(void) {}
|
|
||||||
static inline void kvm_arm_vhe_guest_exit(void) {}
|
|
||||||
|
|
||||||
#define KVM_BP_HARDEN_UNKNOWN -1
|
#define KVM_BP_HARDEN_UNKNOWN -1
|
||||||
#define KVM_BP_HARDEN_WA_NEEDED 0
|
#define KVM_BP_HARDEN_WA_NEEDED 0
|
||||||
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
||||||
|
@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq)
|
|||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void gic_write_dir(u32 irq)
|
static __always_inline void gic_write_dir(u32 irq)
|
||||||
{
|
{
|
||||||
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
|
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
|
||||||
isb();
|
isb();
|
||||||
|
@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void)
|
|||||||
return test_bit(ICACHEF_ALIASING, &__icache_flags);
|
return test_bit(ICACHEF_ALIASING, &__icache_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int icache_is_vpipt(void)
|
static __always_inline int icache_is_vpipt(void)
|
||||||
{
|
{
|
||||||
return test_bit(ICACHEF_VPIPT, &__icache_flags);
|
return test_bit(ICACHEF_VPIPT, &__icache_flags);
|
||||||
}
|
}
|
||||||
|
@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
|
|||||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
extern void flush_dcache_page(struct page *);
|
extern void flush_dcache_page(struct page *);
|
||||||
|
|
||||||
static inline void __flush_icache_all(void)
|
static __always_inline void __flush_icache_all(void)
|
||||||
{
|
{
|
||||||
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
|
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
|
||||||
return;
|
return;
|
||||||
|
@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field)
|
|||||||
return cpuid_feature_extract_signed_field_width(features, field, 4);
|
return cpuid_feature_extract_signed_field_width(features, field, 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int __attribute_const__
|
static __always_inline unsigned int __attribute_const__
|
||||||
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
|
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
|
||||||
{
|
{
|
||||||
return (u64)(features << (64 - width - field)) >> (64 - width);
|
return (u64)(features << (64 - width - field)) >> (64 - width);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int __attribute_const__
|
static __always_inline unsigned int __attribute_const__
|
||||||
cpuid_feature_extract_unsigned_field(u64 features, int field)
|
cpuid_feature_extract_unsigned_field(u64 features, int field)
|
||||||
{
|
{
|
||||||
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
|
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
|
||||||
@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void)
|
|||||||
return val == 0x1;
|
return val == 0x1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool system_supports_fpsimd(void)
|
static __always_inline bool system_supports_fpsimd(void)
|
||||||
{
|
{
|
||||||
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
|
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
|
||||||
}
|
}
|
||||||
@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void)
|
|||||||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool system_supports_sve(void)
|
static __always_inline bool system_supports_sve(void)
|
||||||
{
|
{
|
||||||
return IS_ENABLED(CONFIG_ARM64_SVE) &&
|
return IS_ENABLED(CONFIG_ARM64_SVE) &&
|
||||||
cpus_have_const_cap(ARM64_SVE);
|
cpus_have_const_cap(ARM64_SVE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool system_supports_cnp(void)
|
static __always_inline bool system_supports_cnp(void)
|
||||||
{
|
{
|
||||||
return IS_ENABLED(CONFIG_ARM64_CNP) &&
|
return IS_ENABLED(CONFIG_ARM64_CNP) &&
|
||||||
cpus_have_const_cap(ARM64_HAS_CNP);
|
cpus_have_const_cap(ARM64_HAS_CNP);
|
||||||
|
@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_writel __raw_writel
|
#define __raw_writel __raw_writel
|
||||||
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
|
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
|
||||||
}
|
}
|
||||||
@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_readl __raw_readl
|
#define __raw_readl __raw_readl
|
||||||
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||||
{
|
{
|
||||||
u32 val;
|
u32 val;
|
||||||
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
|
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
|
||||||
|
@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu);
|
|||||||
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||||
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||||
|
|
||||||
static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return !(vcpu->arch.hcr_el2 & HCR_RW);
|
return !(vcpu->arch.hcr_el2 & HCR_RW);
|
||||||
}
|
}
|
||||||
@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
|
|||||||
vcpu->arch.vsesr_el2 = vsesr;
|
vcpu->arch.vsesr_el2 = vsesr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
|
static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
|
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
|
||||||
}
|
}
|
||||||
@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long
|
|||||||
*__vcpu_elr_el1(vcpu) = v;
|
*__vcpu_elr_el1(vcpu) = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
|
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
|
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (vcpu_mode_is_32bit(vcpu))
|
if (vcpu_mode_is_32bit(vcpu))
|
||||||
return kvm_condition_valid32(vcpu);
|
return kvm_condition_valid32(vcpu);
|
||||||
@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
|||||||
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
|
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
|
||||||
* AArch32 with banked registers.
|
* AArch32 with banked registers.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
|
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
|
||||||
u8 reg_num)
|
u8 reg_num)
|
||||||
{
|
{
|
||||||
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
|
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
||||||
unsigned long val)
|
unsigned long val)
|
||||||
{
|
{
|
||||||
if (reg_num != 31)
|
if (reg_num != 31)
|
||||||
@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
|||||||
return mode != PSR_MODE_EL0t;
|
return mode != PSR_MODE_EL0t;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.fault.esr_el2;
|
return vcpu->arch.fault.esr_el2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||||
|
|
||||||
@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
|
static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return vcpu->arch.fault.far_el2;
|
return vcpu->arch.fault.far_el2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
|
static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
|
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
|
||||||
}
|
}
|
||||||
@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
|
|||||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
|
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
|
static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
|
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
|
||||||
}
|
}
|
||||||
@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
|||||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
|
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
|
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
|
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
|
||||||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
|
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
|
||||||
@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
|
|||||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
|
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
|
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This one is not specific to Data Abort */
|
/* This one is not specific to Data Abort */
|
||||||
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
|
static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
|
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
|
static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
|
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
|
||||||
}
|
}
|
||||||
@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
|||||||
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
|
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
|
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
|
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
|
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
switch (kvm_vcpu_trap_get_fault(vcpu)) {
|
switch (kvm_vcpu_trap_get_fault(vcpu)) {
|
||||||
case FSC_SEA:
|
case FSC_SEA:
|
||||||
@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
u32 esr = kvm_vcpu_get_hsr(vcpu);
|
||||||
return ESR_ELx_SYS64_ISS_RT(esr);
|
return ESR_ELx_SYS64_ISS_RT(esr);
|
||||||
@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
|
|||||||
return data; /* Leave LE untouched */
|
return data; /* Leave LE untouched */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||||
{
|
{
|
||||||
if (vcpu_mode_is_32bit(vcpu))
|
if (vcpu_mode_is_32bit(vcpu))
|
||||||
kvm_skip_instr32(vcpu, is_wide_instr);
|
kvm_skip_instr32(vcpu, is_wide_instr);
|
||||||
@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
|||||||
* Skip an instruction which has been emulated at hyp while most guest sysregs
|
* Skip an instruction which has been emulated at hyp while most guest sysregs
|
||||||
* are live.
|
* are live.
|
||||||
*/
|
*/
|
||||||
static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
|
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
|
||||||
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||||
|
@ -626,38 +626,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
|
|||||||
static inline void kvm_clr_pmu_events(u32 clr) {}
|
static inline void kvm_clr_pmu_events(u32 clr) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void kvm_arm_vhe_guest_enter(void)
|
|
||||||
{
|
|
||||||
local_daif_mask();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Having IRQs masked via PMR when entering the guest means the GIC
|
|
||||||
* will not signal the CPU of interrupts of lower priority, and the
|
|
||||||
* only way to get out will be via guest exceptions.
|
|
||||||
* Naturally, we want to avoid this.
|
|
||||||
*
|
|
||||||
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
|
|
||||||
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
|
|
||||||
*/
|
|
||||||
pmr_sync();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void kvm_arm_vhe_guest_exit(void)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* local_daif_restore() takes care to properly restore PSTATE.DAIF
|
|
||||||
* and the GIC PMR if the host is using IRQ priorities.
|
|
||||||
*/
|
|
||||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* When we exit from the guest we change a number of CPU configuration
|
|
||||||
* parameters, such as traps. Make sure these changes take effect
|
|
||||||
* before running the host or additional guests.
|
|
||||||
*/
|
|
||||||
isb();
|
|
||||||
}
|
|
||||||
|
|
||||||
#define KVM_BP_HARDEN_UNKNOWN -1
|
#define KVM_BP_HARDEN_UNKNOWN -1
|
||||||
#define KVM_BP_HARDEN_WA_NEEDED 0
|
#define KVM_BP_HARDEN_WA_NEEDED 0
|
||||||
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
||||||
|
@ -47,6 +47,13 @@
|
|||||||
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
|
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
|
||||||
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
|
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
|
||||||
|
* static inline can allow the compiler to out-of-line this. KVM always wants
|
||||||
|
* the macro version as its always inlined.
|
||||||
|
*/
|
||||||
|
#define __kvm_swab32(x) ___constant_swab32(x)
|
||||||
|
|
||||||
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
||||||
|
@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt,
|
|||||||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||||
void kvm_compute_layout(void);
|
void kvm_compute_layout(void);
|
||||||
|
|
||||||
static inline unsigned long __kern_hyp_va(unsigned long v)
|
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
||||||
{
|
{
|
||||||
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
|
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
|
||||||
"ror %0, %0, #1\n"
|
"ror %0, %0, #1\n"
|
||||||
@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
|||||||
extern void *__kvm_bp_vect_base;
|
extern void *__kvm_bp_vect_base;
|
||||||
extern int __kvm_harden_el2_vector_slot;
|
extern int __kvm_harden_el2_vector_slot;
|
||||||
|
|
||||||
|
/* This is only called on a VHE system */
|
||||||
static inline void *kvm_get_hyp_vector(void)
|
static inline void *kvm_get_hyp_vector(void)
|
||||||
{
|
{
|
||||||
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
|
||||||
|
@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void)
|
|||||||
return read_sysreg(CurrentEL) == CurrentEL_EL2;
|
return read_sysreg(CurrentEL) == CurrentEL_EL2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool has_vhe(void)
|
static __always_inline bool has_vhe(void)
|
||||||
{
|
{
|
||||||
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
|
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
|
||||||
return true;
|
return true;
|
||||||
|
@ -625,7 +625,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Switch to the guest for VHE systems running in EL2 */
|
/* Switch to the guest for VHE systems running in EL2 */
|
||||||
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct kvm_cpu_context *host_ctxt;
|
struct kvm_cpu_context *host_ctxt;
|
||||||
struct kvm_cpu_context *guest_ctxt;
|
struct kvm_cpu_context *guest_ctxt;
|
||||||
@ -678,7 +678,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
return exit_code;
|
return exit_code;
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
|
NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
|
||||||
|
|
||||||
|
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
local_daif_mask();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Having IRQs masked via PMR when entering the guest means the GIC
|
||||||
|
* will not signal the CPU of interrupts of lower priority, and the
|
||||||
|
* only way to get out will be via guest exceptions.
|
||||||
|
* Naturally, we want to avoid this.
|
||||||
|
*
|
||||||
|
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
|
||||||
|
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
|
||||||
|
*/
|
||||||
|
pmr_sync();
|
||||||
|
|
||||||
|
ret = __kvm_vcpu_run_vhe(vcpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* local_daif_restore() takes care to properly restore PSTATE.DAIF
|
||||||
|
* and the GIC PMR if the host is using IRQ priorities.
|
||||||
|
*/
|
||||||
|
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When we exit from the guest we change a number of CPU configuration
|
||||||
|
* parameters, such as traps. Make sure these changes take effect
|
||||||
|
* before running the host or additional guests.
|
||||||
|
*/
|
||||||
|
isb();
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Switch to the guest for legacy non-VHE systems */
|
/* Switch to the guest for legacy non-VHE systems */
|
||||||
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
|
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
|
||||||
|
@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
|
|||||||
u32 data = vcpu_get_reg(vcpu, rd);
|
u32 data = vcpu_get_reg(vcpu, rd);
|
||||||
if (__is_be(vcpu)) {
|
if (__is_be(vcpu)) {
|
||||||
/* guest pre-swabbed data, undo this for writel() */
|
/* guest pre-swabbed data, undo this for writel() */
|
||||||
data = swab32(data);
|
data = __kvm_swab32(data);
|
||||||
}
|
}
|
||||||
writel_relaxed(data, addr);
|
writel_relaxed(data, addr);
|
||||||
} else {
|
} else {
|
||||||
u32 data = readl_relaxed(addr);
|
u32 data = readl_relaxed(addr);
|
||||||
if (__is_be(vcpu)) {
|
if (__is_be(vcpu)) {
|
||||||
/* guest expects swabbed data */
|
/* guest expects swabbed data */
|
||||||
data = swab32(data);
|
data = __kvm_swab32(data);
|
||||||
}
|
}
|
||||||
vcpu_set_reg(vcpu, rd, data);
|
vcpu_set_reg(vcpu, rd, data);
|
||||||
}
|
}
|
||||||
|
@ -425,7 +425,29 @@ static void __init sev_map_percpu_data(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool pv_tlb_flush_supported(void)
|
||||||
|
{
|
||||||
|
return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
||||||
|
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||||
|
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
|
static bool pv_ipi_supported(void)
|
||||||
|
{
|
||||||
|
return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool pv_sched_yield_supported(void)
|
||||||
|
{
|
||||||
|
return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
|
||||||
|
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
||||||
|
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
|
||||||
|
}
|
||||||
|
|
||||||
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
|
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
|
||||||
|
|
||||||
static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
static void __send_ipi_mask(const struct cpumask *mask, int vector)
|
||||||
@ -490,12 +512,12 @@ static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
|
|||||||
static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
|
static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
|
||||||
{
|
{
|
||||||
unsigned int this_cpu = smp_processor_id();
|
unsigned int this_cpu = smp_processor_id();
|
||||||
struct cpumask new_mask;
|
struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
|
||||||
const struct cpumask *local_mask;
|
const struct cpumask *local_mask;
|
||||||
|
|
||||||
cpumask_copy(&new_mask, mask);
|
cpumask_copy(new_mask, mask);
|
||||||
cpumask_clear_cpu(this_cpu, &new_mask);
|
cpumask_clear_cpu(this_cpu, new_mask);
|
||||||
local_mask = &new_mask;
|
local_mask = new_mask;
|
||||||
__send_ipi_mask(local_mask, vector);
|
__send_ipi_mask(local_mask, vector);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -575,7 +597,6 @@ static void __init kvm_apf_trap_init(void)
|
|||||||
update_intr_gate(X86_TRAP_PF, async_page_fault);
|
update_intr_gate(X86_TRAP_PF, async_page_fault);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
|
|
||||||
|
|
||||||
static void kvm_flush_tlb_others(const struct cpumask *cpumask,
|
static void kvm_flush_tlb_others(const struct cpumask *cpumask,
|
||||||
const struct flush_tlb_info *info)
|
const struct flush_tlb_info *info)
|
||||||
@ -583,7 +604,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
|
|||||||
u8 state;
|
u8 state;
|
||||||
int cpu;
|
int cpu;
|
||||||
struct kvm_steal_time *src;
|
struct kvm_steal_time *src;
|
||||||
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
|
struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
|
||||||
|
|
||||||
cpumask_copy(flushmask, cpumask);
|
cpumask_copy(flushmask, cpumask);
|
||||||
/*
|
/*
|
||||||
@ -619,11 +640,10 @@ static void __init kvm_guest_init(void)
|
|||||||
pv_ops.time.steal_clock = kvm_steal_clock;
|
pv_ops.time.steal_clock = kvm_steal_clock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
if (pv_tlb_flush_supported()) {
|
||||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
|
||||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
|
||||||
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
|
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
|
||||||
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
|
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
|
||||||
|
pr_info("KVM setup pv remote TLB flush\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
||||||
@ -632,9 +652,7 @@ static void __init kvm_guest_init(void)
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
|
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
|
||||||
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
|
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
|
||||||
if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
|
if (pv_sched_yield_supported()) {
|
||||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
|
||||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
|
||||||
smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
|
smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
|
||||||
pr_info("KVM setup pv sched yield\n");
|
pr_info("KVM setup pv sched yield\n");
|
||||||
}
|
}
|
||||||
@ -700,7 +718,7 @@ static uint32_t __init kvm_detect(void)
|
|||||||
static void __init kvm_apic_init(void)
|
static void __init kvm_apic_init(void)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_SMP)
|
#if defined(CONFIG_SMP)
|
||||||
if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
|
if (pv_ipi_supported())
|
||||||
kvm_setup_pv_ipi();
|
kvm_setup_pv_ipi();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -732,26 +750,31 @@ static __init int activate_jump_labels(void)
|
|||||||
}
|
}
|
||||||
arch_initcall(activate_jump_labels);
|
arch_initcall(activate_jump_labels);
|
||||||
|
|
||||||
static __init int kvm_setup_pv_tlb_flush(void)
|
static __init int kvm_alloc_cpumask(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
bool alloc = false;
|
||||||
|
|
||||||
if (!kvm_para_available() || nopv)
|
if (!kvm_para_available() || nopv)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
|
if (pv_tlb_flush_supported())
|
||||||
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
|
alloc = true;
|
||||||
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
|
||||||
|
#if defined(CONFIG_SMP)
|
||||||
|
if (pv_ipi_supported())
|
||||||
|
alloc = true;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (alloc)
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
|
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
|
||||||
GFP_KERNEL, cpu_to_node(cpu));
|
GFP_KERNEL, cpu_to_node(cpu));
|
||||||
}
|
}
|
||||||
pr_info("KVM setup pv remote TLB flush\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
arch_initcall(kvm_setup_pv_tlb_flush);
|
arch_initcall(kvm_alloc_cpumask);
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||||
|
|
||||||
|
@ -59,6 +59,19 @@ config KVM
|
|||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
config KVM_WERROR
|
||||||
|
bool "Compile KVM with -Werror"
|
||||||
|
# KASAN may cause the build to fail due to larger frames
|
||||||
|
default y if X86_64 && !KASAN
|
||||||
|
# We use the dependency on !COMPILE_TEST to not be enabled
|
||||||
|
# blindly in allmodconfig or allyesconfig configurations
|
||||||
|
depends on (X86_64 && !KASAN) || !COMPILE_TEST
|
||||||
|
depends on EXPERT
|
||||||
|
help
|
||||||
|
Add -Werror to the build flags for (and only for) i915.ko.
|
||||||
|
|
||||||
|
If in doubt, say "N".
|
||||||
|
|
||||||
config KVM_INTEL
|
config KVM_INTEL
|
||||||
tristate "KVM for Intel (and compatible) processors support"
|
tristate "KVM for Intel (and compatible) processors support"
|
||||||
depends on KVM && IA32_FEAT_CTL
|
depends on KVM && IA32_FEAT_CTL
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
ccflags-y += -Iarch/x86/kvm
|
ccflags-y += -Iarch/x86/kvm
|
||||||
|
ccflags-$(CONFIG_KVM_WERROR) += -Werror
|
||||||
|
|
||||||
KVM := ../../../virt/kvm
|
KVM := ../../../virt/kvm
|
||||||
|
|
||||||
|
@ -57,11 +57,13 @@
|
|||||||
MODULE_AUTHOR("Qumranet");
|
MODULE_AUTHOR("Qumranet");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
|
#ifdef MODULE
|
||||||
static const struct x86_cpu_id svm_cpu_id[] = {
|
static const struct x86_cpu_id svm_cpu_id[] = {
|
||||||
X86_FEATURE_MATCH(X86_FEATURE_SVM),
|
X86_FEATURE_MATCH(X86_FEATURE_SVM),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
|
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
|
||||||
|
#endif
|
||||||
|
|
||||||
#define IOPM_ALLOC_ORDER 2
|
#define IOPM_ALLOC_ORDER 2
|
||||||
#define MSRPM_ALLOC_ORDER 1
|
#define MSRPM_ALLOC_ORDER 1
|
||||||
@ -2194,8 +2196,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||||||
static int avic_init_vcpu(struct vcpu_svm *svm)
|
static int avic_init_vcpu(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||||
|
|
||||||
if (!kvm_vcpu_apicv_active(&svm->vcpu))
|
if (!avic || !irqchip_in_kernel(vcpu->kvm))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = avic_init_backing_page(&svm->vcpu);
|
ret = avic_init_backing_page(&svm->vcpu);
|
||||||
|
@ -64,11 +64,13 @@
|
|||||||
MODULE_AUTHOR("Qumranet");
|
MODULE_AUTHOR("Qumranet");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
|
#ifdef MODULE
|
||||||
static const struct x86_cpu_id vmx_cpu_id[] = {
|
static const struct x86_cpu_id vmx_cpu_id[] = {
|
||||||
X86_FEATURE_MATCH(X86_FEATURE_VMX),
|
X86_FEATURE_MATCH(X86_FEATURE_VMX),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
|
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
|
||||||
|
#endif
|
||||||
|
|
||||||
bool __read_mostly enable_vpid = 1;
|
bool __read_mostly enable_vpid = 1;
|
||||||
module_param_named(vpid, enable_vpid, bool, 0444);
|
module_param_named(vpid, enable_vpid, bool, 0444);
|
||||||
@ -7175,6 +7177,7 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
|
|||||||
else
|
else
|
||||||
intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
|
intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
|
||||||
|
|
||||||
|
/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
|
||||||
return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
|
return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7204,6 +7207,20 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
|
|||||||
case x86_intercept_outs:
|
case x86_intercept_outs:
|
||||||
return vmx_check_intercept_io(vcpu, info);
|
return vmx_check_intercept_io(vcpu, info);
|
||||||
|
|
||||||
|
case x86_intercept_lgdt:
|
||||||
|
case x86_intercept_lidt:
|
||||||
|
case x86_intercept_lldt:
|
||||||
|
case x86_intercept_ltr:
|
||||||
|
case x86_intercept_sgdt:
|
||||||
|
case x86_intercept_sidt:
|
||||||
|
case x86_intercept_sldt:
|
||||||
|
case x86_intercept_str:
|
||||||
|
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
|
||||||
|
return X86EMUL_CONTINUE;
|
||||||
|
|
||||||
|
/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
|
||||||
|
break;
|
||||||
|
|
||||||
/* TODO: check more intercepts... */
|
/* TODO: check more intercepts... */
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -7190,15 +7190,15 @@ static void kvm_timer_init(void)
|
|||||||
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
||||||
#ifdef CONFIG_CPU_FREQ
|
#ifdef CONFIG_CPU_FREQ
|
||||||
struct cpufreq_policy policy;
|
struct cpufreq_policy *policy;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
memset(&policy, 0, sizeof(policy));
|
|
||||||
cpu = get_cpu();
|
cpu = get_cpu();
|
||||||
cpufreq_get_policy(&policy, cpu);
|
policy = cpufreq_cpu_get(cpu);
|
||||||
if (policy.cpuinfo.max_freq)
|
if (policy && policy->cpuinfo.max_freq)
|
||||||
max_tsc_khz = policy.cpuinfo.max_freq;
|
max_tsc_khz = policy->cpuinfo.max_freq;
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
cpufreq_cpu_put(policy);
|
||||||
#endif
|
#endif
|
||||||
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
|
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
|
||||||
CPUFREQ_TRANSITION_NOTIFIER);
|
CPUFREQ_TRANSITION_NOTIFIER);
|
||||||
@ -7308,12 +7308,12 @@ int kvm_arch_init(void *opaque)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!ops->cpu_has_kvm_support()) {
|
if (!ops->cpu_has_kvm_support()) {
|
||||||
printk(KERN_ERR "kvm: no hardware support\n");
|
pr_err_ratelimited("kvm: no hardware support\n");
|
||||||
r = -EOPNOTSUPP;
|
r = -EOPNOTSUPP;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (ops->disabled_by_bios()) {
|
if (ops->disabled_by_bios()) {
|
||||||
printk(KERN_ERR "kvm: disabled by bios\n");
|
pr_err_ratelimited("kvm: disabled by bios\n");
|
||||||
r = -EOPNOTSUPP;
|
r = -EOPNOTSUPP;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -1344,7 +1344,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
|
|||||||
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
|
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
|
||||||
|
|
||||||
struct kvm_vcpu *kvm_get_running_vcpu(void);
|
struct kvm_vcpu *kvm_get_running_vcpu(void);
|
||||||
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
|
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
|
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
|
||||||
bool kvm_arch_has_irq_bypass(void);
|
bool kvm_arch_has_irq_bypass(void);
|
||||||
|
@ -742,9 +742,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||||||
guest_enter_irqoff();
|
guest_enter_irqoff();
|
||||||
|
|
||||||
if (has_vhe()) {
|
if (has_vhe()) {
|
||||||
kvm_arm_vhe_guest_enter();
|
|
||||||
ret = kvm_vcpu_run_vhe(vcpu);
|
ret = kvm_vcpu_run_vhe(vcpu);
|
||||||
kvm_arm_vhe_guest_exit();
|
|
||||||
} else {
|
} else {
|
||||||
ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
|
ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include <kvm/arm_arch_timer.h>
|
#include <kvm/arm_arch_timer.h>
|
||||||
#include <linux/tracepoint.h>
|
#include <linux/tracepoint.h>
|
||||||
|
#include <asm/kvm_arm.h>
|
||||||
|
|
||||||
#undef TRACE_SYSTEM
|
#undef TRACE_SYSTEM
|
||||||
#define TRACE_SYSTEM kvm
|
#define TRACE_SYSTEM kvm
|
||||||
|
Loading…
Reference in New Issue
Block a user