The nVHE KVM hyp drains and disables the SPE buffer, before entering the guest, as the EL1&0 translation regime is going to be loaded with that of the guest. But this operation is performed way too late, because : - The owning translation regime of the SPE buffer is transferred to EL2. (MDCR_EL2_E2PB == 0) - The guest Stage1 is loaded. Thus the flush could use the host EL1 virtual address, but use the EL2 translations instead of host EL1, for writing out any cached data. Fix this by moving the SPE buffer handling early enough. The restore path is doing the right thing. Fixes: 014c4c77aad7 ("KVM: arm64: Improve debug register save/restore flow") Cc: stable@vger.kernel.org Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Alexandru Elisei <alexandru.elisei@arm.com> Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210302120345.3102874-1-suzuki.poulose@arm.com Message-Id: <20210305185254.3730990-2-maz@kernel.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
109 lines
3.6 KiB
C
109 lines
3.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2015 - ARM Ltd
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
*/
|
|
|
|
#ifndef __ARM64_KVM_HYP_H__
|
|
#define __ARM64_KVM_HYP_H__
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <asm/alternative.h>
|
|
#include <asm/sysreg.h>
|
|
|
|
DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
|
DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
|
|
DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
|
|
|
#define read_sysreg_elx(r,nvh,vh) \
|
|
({ \
|
|
u64 reg; \
|
|
asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \
|
|
__mrs_s("%0", r##vh), \
|
|
ARM64_HAS_VIRT_HOST_EXTN) \
|
|
: "=r" (reg)); \
|
|
reg; \
|
|
})
|
|
|
|
#define write_sysreg_elx(v,r,nvh,vh) \
|
|
do { \
|
|
u64 __val = (u64)(v); \
|
|
asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \
|
|
__msr_s(r##vh, "%x0"), \
|
|
ARM64_HAS_VIRT_HOST_EXTN) \
|
|
: : "rZ" (__val)); \
|
|
} while (0)
|
|
|
|
/*
|
|
* Unified accessors for registers that have a different encoding
|
|
* between VHE and non-VHE. They must be specified without their "ELx"
|
|
* encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
|
|
*/
|
|
|
|
#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
|
|
#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
|
|
#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
|
|
#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
|
|
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
|
|
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
|
|
|
|
/*
|
|
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
|
|
* static inline can allow the compiler to out-of-line this. KVM always wants
|
|
* the macro version as its always inlined.
|
|
*/
|
|
#define __kvm_swab32(x) ___constant_swab32(x)
|
|
|
|
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
|
|
|
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
|
|
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
|
|
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
|
|
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
|
|
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
|
|
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
|
|
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
|
|
|
#ifdef __KVM_NVHE_HYPERVISOR__
|
|
void __timer_enable_traps(struct kvm_vcpu *vcpu);
|
|
void __timer_disable_traps(struct kvm_vcpu *vcpu);
|
|
#endif
|
|
|
|
#ifdef __KVM_NVHE_HYPERVISOR__
|
|
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
|
|
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
|
|
#else
|
|
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
|
|
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
|
|
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
|
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
|
#endif
|
|
|
|
void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
|
|
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
|
|
|
|
#ifdef __KVM_NVHE_HYPERVISOR__
|
|
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
|
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
|
#endif
|
|
|
|
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
|
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
|
|
|
#ifndef __KVM_NVHE_HYPERVISOR__
|
|
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
|
|
void deactivate_traps_vhe_put(void);
|
|
#endif
|
|
|
|
u64 __guest_enter(struct kvm_vcpu *vcpu);
|
|
|
|
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
|
|
|
|
void __noreturn hyp_panic(void);
|
|
#ifdef __KVM_NVHE_HYPERVISOR__
|
|
void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
|
#endif
|
|
|
|
#endif /* __ARM64_KVM_HYP_H__ */
|