Merge branch kvm-arm64/nv-eret-pauth into kvmarm-master/next
* kvm-arm64/nv-eret-pauth: : . : Add NV support for the ERETAA/ERETAB instructions. From the cover letter: : : "Although the current upstream NV support has *some* support for : correctly emulating ERET, that support is only partial as it doesn't : support the ERETAA and ERETAB variants. : : Supporting these instructions was cast aside for a long time as it : involves implementing some form of PAuth emulation, something I wasn't : overly keen on. But I have reached a point where enough of the : infrastructure is there that it actually makes sense. So here it is!" : . KVM: arm64: nv: Work around lack of pauth support in old toolchains KVM: arm64: Drop trapping of PAuth instructions/keys KVM: arm64: nv: Advertise support for PAuth KVM: arm64: nv: Handle ERETA[AB] instructions KVM: arm64: nv: Add emulation for ERETAx instructions KVM: arm64: nv: Add kvm_has_pauth() helper KVM: arm64: nv: Reinject PAC exceptions caused by HCR_EL2.API==0 KVM: arm64: nv: Handle HCR_EL2.{API,APK} independently KVM: arm64: nv: Honor HFGITR_EL2.ERET being set KVM: arm64: nv: Fast-track 'InHost' exception returns KVM: arm64: nv: Add trap forwarding for ERET and SMC KVM: arm64: nv: Configure HCR_EL2 for FEAT_NV2 KVM: arm64: nv: Drop VCPU_HYP_CONTEXT flag KVM: arm64: Constraint PAuth support to consistent implementations KVM: arm64: Add helpers for ESR_ELx_ERET_ISS_ERET* KVM: arm64: Harden __ctxt_sys_reg() against out-of-range values Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
2d38f43930
@ -404,6 +404,18 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
|
||||
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS;
|
||||
}
|
||||
|
||||
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
|
||||
static inline bool esr_iss_is_eretax(unsigned long esr)
|
||||
{
|
||||
return esr & ESR_ELx_ERET_ISS_ERET;
|
||||
}
|
||||
|
||||
/* Indicate which key is used for ERETAx (false: A-Key, true: B-Key) */
|
||||
static inline bool esr_iss_is_eretab(unsigned long esr)
|
||||
{
|
||||
return esr & ESR_ELx_ERET_ISS_ERETA;
|
||||
}
|
||||
|
||||
const char *esr_get_class_string(unsigned long esr);
|
||||
#endif /* __ASSEMBLY */
|
||||
|
||||
|
@ -125,16 +125,6 @@ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.hcr_el2 |= HCR_TWI;
|
||||
}
|
||||
|
||||
static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
|
||||
}
|
||||
|
||||
static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
|
||||
}
|
||||
|
||||
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.vsesr_el2;
|
||||
|
@ -830,8 +830,6 @@ struct kvm_vcpu_arch {
|
||||
#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
|
||||
/* Save TRBE context if active */
|
||||
#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
|
||||
/* vcpu running in HYP context */
|
||||
#define VCPU_HYP_CONTEXT __vcpu_single_flag(iflags, BIT(7))
|
||||
|
||||
/* SVE enabled for host EL0 */
|
||||
#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
|
||||
@ -909,7 +907,7 @@ struct kvm_vcpu_arch {
|
||||
* Don't bother with VNCR-based accesses in the nVHE code, it has no
|
||||
* business dealing with NV.
|
||||
*/
|
||||
static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
|
||||
static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
|
||||
{
|
||||
#if !defined (__KVM_NVHE_HYPERVISOR__)
|
||||
if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
|
||||
@ -919,6 +917,13 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
|
||||
return (u64 *)&ctxt->sys_regs[r];
|
||||
}
|
||||
|
||||
#define __ctxt_sys_reg(c,r) \
|
||||
({ \
|
||||
BUILD_BUG_ON(__builtin_constant_p(r) && \
|
||||
(r) >= NR_SYS_REGS); \
|
||||
___ctxt_sys_reg(c, r); \
|
||||
})
|
||||
|
||||
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
|
||||
|
||||
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
|
||||
@ -1370,4 +1375,19 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
|
||||
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
|
||||
get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
|
||||
|
||||
/* Check for a given level of PAuth support */
|
||||
#define kvm_has_pauth(k, l) \
|
||||
({ \
|
||||
bool pa, pi, pa3; \
|
||||
\
|
||||
pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \
|
||||
pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \
|
||||
pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \
|
||||
pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \
|
||||
pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \
|
||||
pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \
|
||||
\
|
||||
(pa + pi + pa3) == 1; \
|
||||
})
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
@ -60,7 +60,20 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
|
||||
return ttbr0 & ~GENMASK_ULL(63, 48);
|
||||
}
|
||||
|
||||
extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_init_nv_sysregs(struct kvm *kvm);
|
||||
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
|
||||
#else
|
||||
static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
|
||||
{
|
||||
/* We really should never execute this... */
|
||||
WARN_ON_ONCE(1);
|
||||
*elr = 0xbad9acc0debadbad;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ARM64_KVM_NESTED_H */
|
||||
|
@ -99,5 +99,26 @@ alternative_else_nop_endif
|
||||
.macro ptrauth_switch_to_hyp g_ctxt, h_ctxt, reg1, reg2, reg3
|
||||
.endm
|
||||
#endif /* CONFIG_ARM64_PTR_AUTH */
|
||||
|
||||
#else /* !__ASSEMBLY */
|
||||
|
||||
#define __ptrauth_save_key(ctxt, key) \
|
||||
do { \
|
||||
u64 __val; \
|
||||
__val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
|
||||
ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
|
||||
__val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
|
||||
ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
|
||||
} while(0)
|
||||
|
||||
#define ptrauth_save_keys(ctxt) \
|
||||
do { \
|
||||
__ptrauth_save_key(ctxt, APIA); \
|
||||
__ptrauth_save_key(ctxt, APIB); \
|
||||
__ptrauth_save_key(ctxt, APDA); \
|
||||
__ptrauth_save_key(ctxt, APDB); \
|
||||
__ptrauth_save_key(ctxt, APGA); \
|
||||
} while(0)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_KVM_PTRAUTH_H */
|
||||
|
@ -297,6 +297,7 @@
|
||||
#define TCR_TBI1 (UL(1) << 38)
|
||||
#define TCR_HA (UL(1) << 39)
|
||||
#define TCR_HD (UL(1) << 40)
|
||||
#define TCR_TBID0 (UL(1) << 51)
|
||||
#define TCR_TBID1 (UL(1) << 52)
|
||||
#define TCR_NFD0 (UL(1) << 53)
|
||||
#define TCR_NFD1 (UL(1) << 54)
|
||||
|
@ -23,6 +23,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
|
||||
vgic/vgic-its.o vgic/vgic-debug.o
|
||||
|
||||
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
|
||||
kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o
|
||||
|
||||
always-y := hyp_constants.h hyp-constants.s
|
||||
|
||||
|
@ -35,10 +35,11 @@
|
||||
#include <asm/virt.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_nested.h>
|
||||
#include <asm/kvm_pkvm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_ptrauth.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
@ -218,6 +219,40 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
kvm_arm_teardown_hypercalls(kvm);
|
||||
}
|
||||
|
||||
static bool kvm_has_full_ptr_auth(void)
|
||||
{
|
||||
bool apa, gpa, api, gpi, apa3, gpa3;
|
||||
u64 isar1, isar2, val;
|
||||
|
||||
/*
|
||||
* Check that:
|
||||
*
|
||||
* - both Address and Generic auth are implemented for a given
|
||||
* algorithm (Q5, IMPDEF or Q3)
|
||||
* - only a single algorithm is implemented.
|
||||
*/
|
||||
if (!system_has_full_ptr_auth())
|
||||
return false;
|
||||
|
||||
isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
|
||||
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
|
||||
|
||||
apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1);
|
||||
val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1);
|
||||
gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP);
|
||||
|
||||
api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1);
|
||||
val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1);
|
||||
gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP);
|
||||
|
||||
apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2);
|
||||
val = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2);
|
||||
gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP);
|
||||
|
||||
return (apa == gpa && api == gpi && apa3 == gpa3 &&
|
||||
(apa + api + apa3) == 1);
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
{
|
||||
int r;
|
||||
@ -311,7 +346,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
break;
|
||||
case KVM_CAP_ARM_PTRAUTH_ADDRESS:
|
||||
case KVM_CAP_ARM_PTRAUTH_GENERIC:
|
||||
r = system_has_full_ptr_auth();
|
||||
r = kvm_has_full_ptr_auth();
|
||||
break;
|
||||
case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
|
||||
if (kvm)
|
||||
@ -422,6 +457,44 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
|
||||
|
||||
}
|
||||
|
||||
static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu_has_ptrauth(vcpu)) {
|
||||
/*
|
||||
* Either we're running running an L2 guest, and the API/APK
|
||||
* bits come from L1's HCR_EL2, or API/APK are both set.
|
||||
*/
|
||||
if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
|
||||
u64 val;
|
||||
|
||||
val = __vcpu_sys_reg(vcpu, HCR_EL2);
|
||||
val &= (HCR_API | HCR_APK);
|
||||
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
|
||||
vcpu->arch.hcr_el2 |= val;
|
||||
} else {
|
||||
vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the host keys if there is any chance for the guest
|
||||
* to use pauth, as the entry code will reload the guest
|
||||
* keys in that case.
|
||||
* Protected mode is the exception to that rule, as the
|
||||
* entry into the EL2 code eagerly switch back and forth
|
||||
* between host and hyp keys (and kvm_hyp_ctxt is out of
|
||||
* reach anyway).
|
||||
*/
|
||||
if (is_protected_kvm_enabled())
|
||||
return;
|
||||
|
||||
if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
|
||||
struct kvm_cpu_context *ctxt;
|
||||
ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
|
||||
ptrauth_save_keys(ctxt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct kvm_s2_mmu *mmu;
|
||||
@ -460,8 +533,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
else
|
||||
vcpu_set_wfx_traps(vcpu);
|
||||
|
||||
if (vcpu_has_ptrauth(vcpu))
|
||||
vcpu_ptrauth_disable(vcpu);
|
||||
vcpu_set_pauth_traps(vcpu);
|
||||
|
||||
kvm_arch_vcpu_load_debug_state_flags(vcpu);
|
||||
|
||||
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
|
||||
@ -1264,7 +1337,7 @@ static unsigned long system_supported_vcpu_features(void)
|
||||
if (!system_supports_sve())
|
||||
clear_bit(KVM_ARM_VCPU_SVE, &features);
|
||||
|
||||
if (!system_has_full_ptr_auth()) {
|
||||
if (!kvm_has_full_ptr_auth()) {
|
||||
clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
|
||||
clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
|
||||
}
|
||||
|
@ -2117,6 +2117,26 @@ inject:
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
|
||||
{
|
||||
bool control_bit_set;
|
||||
|
||||
if (!vcpu_has_nv(vcpu))
|
||||
return false;
|
||||
|
||||
control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
|
||||
if (!is_hyp_ctxt(vcpu) && control_bit_set) {
|
||||
kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool forward_smc_trap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return forward_traps(vcpu, HCR_TSC);
|
||||
}
|
||||
|
||||
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
|
||||
{
|
||||
u64 mode = spsr & PSR_MODE_MASK;
|
||||
@ -2152,37 +2172,39 @@ static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
|
||||
|
||||
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 spsr, elr, mode;
|
||||
bool direct_eret;
|
||||
u64 spsr, elr, esr;
|
||||
|
||||
/*
|
||||
* Going through the whole put/load motions is a waste of time
|
||||
* if this is a VHE guest hypervisor returning to its own
|
||||
* userspace, or the hypervisor performing a local exception
|
||||
* return. No need to save/restore registers, no need to
|
||||
* switch S2 MMU. Just do the canonical ERET.
|
||||
* Forward this trap to the virtual EL2 if the virtual
|
||||
* HCR_EL2.NV bit is set and this is coming from !EL2.
|
||||
*/
|
||||
spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2);
|
||||
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
|
||||
|
||||
mode = spsr & (PSR_MODE_MASK | PSR_MODE32_BIT);
|
||||
|
||||
direct_eret = (mode == PSR_MODE_EL0t &&
|
||||
vcpu_el2_e2h_is_set(vcpu) &&
|
||||
vcpu_el2_tge_is_set(vcpu));
|
||||
direct_eret |= (mode == PSR_MODE_EL2h || mode == PSR_MODE_EL2t);
|
||||
|
||||
if (direct_eret) {
|
||||
*vcpu_pc(vcpu) = vcpu_read_sys_reg(vcpu, ELR_EL2);
|
||||
*vcpu_cpsr(vcpu) = spsr;
|
||||
trace_kvm_nested_eret(vcpu, *vcpu_pc(vcpu), spsr);
|
||||
if (forward_traps(vcpu, HCR_NV))
|
||||
return;
|
||||
|
||||
/* Check for an ERETAx */
|
||||
esr = kvm_vcpu_get_esr(vcpu);
|
||||
if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
|
||||
/*
|
||||
* Oh no, ERETAx failed to authenticate. If we have
|
||||
* FPACCOMBINE, deliver an exception right away. If we
|
||||
* don't, then let the mangled ELR value trickle down the
|
||||
* ERET handling, and the guest will have a little surprise.
|
||||
*/
|
||||
if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
|
||||
esr &= ESR_ELx_ERET_ISS_ERETA;
|
||||
esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
|
||||
kvm_inject_nested_sync(vcpu, esr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
kvm_arch_vcpu_put(vcpu);
|
||||
|
||||
elr = __vcpu_sys_reg(vcpu, ELR_EL2);
|
||||
spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
|
||||
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
|
||||
if (!esr_iss_is_eretax(esr))
|
||||
elr = __vcpu_sys_reg(vcpu, ELR_EL2);
|
||||
|
||||
trace_kvm_nested_eret(vcpu, elr, spsr);
|
||||
|
||||
|
@ -55,6 +55,13 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_smc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Forward this trapped smc instruction to the virtual EL2 if
|
||||
* the guest has asked for it.
|
||||
*/
|
||||
if (forward_smc_trap(vcpu))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* "If an SMC instruction executed at Non-secure EL1 is
|
||||
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
|
||||
@ -207,19 +214,40 @@ static int handle_sve(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
|
||||
* a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
|
||||
* that we can do is give the guest an UNDEF.
|
||||
* Two possibilities to handle a trapping ptrauth instruction:
|
||||
*
|
||||
* - Guest usage of a ptrauth instruction (which the guest EL1 did not
|
||||
* turn into a NOP). If we get here, it is because we didn't enable
|
||||
* ptrauth for the guest. This results in an UNDEF, as it isn't
|
||||
* supposed to use ptrauth without being told it could.
|
||||
*
|
||||
* - Running an L2 NV guest while L1 has left HCR_EL2.API==0, and for
|
||||
* which we reinject the exception into L1.
|
||||
*
|
||||
* Anything else is an emulation bug (hence the WARN_ON + UNDEF).
|
||||
*/
|
||||
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu_has_ptrauth(vcpu)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
|
||||
kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Really shouldn't be here! */
|
||||
WARN_ON_ONCE(1);
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_handle_eret(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET)
|
||||
if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
|
||||
!vcpu_has_ptrauth(vcpu))
|
||||
return kvm_handle_ptrauth(vcpu);
|
||||
|
||||
/*
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_nested.h>
|
||||
#include <asm/kvm_ptrauth.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/processor.h>
|
||||
@ -271,10 +272,8 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
|
||||
__deactivate_traps_hfgxtr(vcpu);
|
||||
}
|
||||
|
||||
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
|
||||
static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
|
||||
{
|
||||
u64 hcr = vcpu->arch.hcr_el2;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
|
||||
hcr |= HCR_TVM;
|
||||
|
||||
@ -449,60 +448,6 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool esr_is_ptrauth_trap(u64 esr)
|
||||
{
|
||||
switch (esr_sys64_to_sysreg(esr)) {
|
||||
case SYS_APIAKEYLO_EL1:
|
||||
case SYS_APIAKEYHI_EL1:
|
||||
case SYS_APIBKEYLO_EL1:
|
||||
case SYS_APIBKEYHI_EL1:
|
||||
case SYS_APDAKEYLO_EL1:
|
||||
case SYS_APDAKEYHI_EL1:
|
||||
case SYS_APDBKEYLO_EL1:
|
||||
case SYS_APDBKEYHI_EL1:
|
||||
case SYS_APGAKEYLO_EL1:
|
||||
case SYS_APGAKEYHI_EL1:
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#define __ptrauth_save_key(ctxt, key) \
|
||||
do { \
|
||||
u64 __val; \
|
||||
__val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
|
||||
ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
|
||||
__val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
|
||||
ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
|
||||
} while(0)
|
||||
|
||||
DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
||||
|
||||
static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
struct kvm_cpu_context *ctxt;
|
||||
u64 val;
|
||||
|
||||
if (!vcpu_has_ptrauth(vcpu))
|
||||
return false;
|
||||
|
||||
ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
|
||||
__ptrauth_save_key(ctxt, APIA);
|
||||
__ptrauth_save_key(ctxt, APIB);
|
||||
__ptrauth_save_key(ctxt, APDA);
|
||||
__ptrauth_save_key(ctxt, APDB);
|
||||
__ptrauth_save_key(ctxt, APGA);
|
||||
|
||||
vcpu_ptrauth_enable(vcpu);
|
||||
|
||||
val = read_sysreg(hcr_el2);
|
||||
val |= (HCR_API | HCR_APK);
|
||||
write_sysreg(val, hcr_el2);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_context *ctxt;
|
||||
@ -590,9 +535,6 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
__vgic_v3_perform_cpuif_access(vcpu) == 1)
|
||||
return true;
|
||||
|
||||
if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
|
||||
return kvm_hyp_handle_ptrauth(vcpu, exit_code);
|
||||
|
||||
if (kvm_hyp_handle_cntpct(vcpu))
|
||||
return true;
|
||||
|
||||
|
@ -40,7 +40,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
___activate_traps(vcpu);
|
||||
___activate_traps(vcpu, vcpu->arch.hcr_el2);
|
||||
__activate_traps_common(vcpu);
|
||||
|
||||
val = vcpu->arch.cptr_el2;
|
||||
@ -191,7 +191,6 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
|
||||
};
|
||||
|
||||
@ -203,7 +202,6 @@ static const exit_handler_fn pvm_exit_handlers[] = {
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
|
||||
};
|
||||
|
||||
|
@ -33,11 +33,43 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
|
||||
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
|
||||
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
|
||||
|
||||
/*
|
||||
* HCR_EL2 bits that the NV guest can freely change (no RES0/RES1
|
||||
* semantics, irrespective of the configuration), but that cannot be
|
||||
* applied to the actual HW as things would otherwise break badly.
|
||||
*
|
||||
* - TGE: we want the guest to use EL1, which is incompatible with
|
||||
* this bit being set
|
||||
*
|
||||
* - API/APK: they are already accounted for by vcpu_load(), and can
|
||||
* only take effect across a load/put cycle (such as ERET)
|
||||
*/
|
||||
#define NV_HCR_GUEST_EXCLUDE (HCR_TGE | HCR_API | HCR_APK)
|
||||
|
||||
static u64 __compute_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 hcr = vcpu->arch.hcr_el2;
|
||||
|
||||
if (!vcpu_has_nv(vcpu))
|
||||
return hcr;
|
||||
|
||||
if (is_hyp_ctxt(vcpu)) {
|
||||
hcr |= HCR_NV | HCR_NV2 | HCR_AT | HCR_TTLB;
|
||||
|
||||
if (!vcpu_el2_e2h_is_set(vcpu))
|
||||
hcr |= HCR_NV1;
|
||||
|
||||
write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2);
|
||||
}
|
||||
|
||||
return hcr | (__vcpu_sys_reg(vcpu, HCR_EL2) & ~NV_HCR_GUEST_EXCLUDE);
|
||||
}
|
||||
|
||||
static void __activate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
___activate_traps(vcpu);
|
||||
___activate_traps(vcpu, __compute_hcr(vcpu));
|
||||
|
||||
if (has_cntpoff()) {
|
||||
struct timer_map map;
|
||||
@ -177,6 +209,59 @@ void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
|
||||
host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 spsr, elr, mode;
|
||||
|
||||
/*
|
||||
* Going through the whole put/load motions is a waste of time
|
||||
* if this is a VHE guest hypervisor returning to its own
|
||||
* userspace, or the hypervisor performing a local exception
|
||||
* return. No need to save/restore registers, no need to
|
||||
* switch S2 MMU. Just do the canonical ERET.
|
||||
*
|
||||
* Unless the trap has to be forwarded further down the line,
|
||||
* of course...
|
||||
*/
|
||||
if ((__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV) ||
|
||||
(__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_ERET))
|
||||
return false;
|
||||
|
||||
spsr = read_sysreg_el1(SYS_SPSR);
|
||||
mode = spsr & (PSR_MODE_MASK | PSR_MODE32_BIT);
|
||||
|
||||
switch (mode) {
|
||||
case PSR_MODE_EL0t:
|
||||
if (!(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)))
|
||||
return false;
|
||||
break;
|
||||
case PSR_MODE_EL2t:
|
||||
mode = PSR_MODE_EL1t;
|
||||
break;
|
||||
case PSR_MODE_EL2h:
|
||||
mode = PSR_MODE_EL1h;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If ERETAx fails, take the slow path */
|
||||
if (esr_iss_is_eretax(esr)) {
|
||||
if (!(vcpu_has_ptrauth(vcpu) && kvm_auth_eretax(vcpu, &elr)))
|
||||
return false;
|
||||
} else {
|
||||
elr = read_sysreg_el1(SYS_ELR);
|
||||
}
|
||||
|
||||
spsr = (spsr & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
|
||||
|
||||
write_sysreg_el2(spsr, SYS_SPSR);
|
||||
write_sysreg_el2(elr, SYS_ELR);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = NULL,
|
||||
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
|
||||
@ -186,7 +271,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
[ESR_ELx_EC_ERET] = kvm_hyp_handle_eret,
|
||||
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
|
||||
};
|
||||
|
||||
@ -201,7 +286,7 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
* If we were in HYP context on entry, adjust the PSTATE view
|
||||
* so that the usual helpers work correctly.
|
||||
*/
|
||||
if (unlikely(vcpu_get_flag(vcpu, VCPU_HYP_CONTEXT))) {
|
||||
if (vcpu_has_nv(vcpu) && (read_sysreg(hcr_el2) & HCR_NV)) {
|
||||
u64 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
|
||||
|
||||
switch (mode) {
|
||||
@ -243,11 +328,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
sysreg_restore_guest_state_vhe(guest_ctxt);
|
||||
__debug_switch_to_guest(vcpu);
|
||||
|
||||
if (is_hyp_ctxt(vcpu))
|
||||
vcpu_set_flag(vcpu, VCPU_HYP_CONTEXT);
|
||||
else
|
||||
vcpu_clear_flag(vcpu, VCPU_HYP_CONTEXT);
|
||||
|
||||
do {
|
||||
/* Jump in the fire! */
|
||||
exit_code = __guest_enter(vcpu);
|
||||
|
@ -35,13 +35,9 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
|
||||
break;
|
||||
|
||||
case SYS_ID_AA64ISAR1_EL1:
|
||||
/* Support everything but PtrAuth and Spec Invalidation */
|
||||
/* Support everything but Spec Invalidation */
|
||||
val &= ~(GENMASK_ULL(63, 56) |
|
||||
NV_FTR(ISAR1, SPECRES) |
|
||||
NV_FTR(ISAR1, GPI) |
|
||||
NV_FTR(ISAR1, GPA) |
|
||||
NV_FTR(ISAR1, API) |
|
||||
NV_FTR(ISAR1, APA));
|
||||
NV_FTR(ISAR1, SPECRES));
|
||||
break;
|
||||
|
||||
case SYS_ID_AA64PFR0_EL1:
|
||||
|
206
arch/arm64/kvm/pauth.c
Normal file
206
arch/arm64/kvm/pauth.c
Normal file
@ -0,0 +1,206 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2024 - Google LLC
|
||||
* Author: Marc Zyngier <maz@kernel.org>
|
||||
*
|
||||
* Primitive PAuth emulation for ERETAA/ERETAB.
|
||||
*
|
||||
* This code assumes that is is run from EL2, and that it is part of
|
||||
* the emulation of ERETAx for a guest hypervisor. That's a lot of
|
||||
* baked-in assumptions and shortcuts.
|
||||
*
|
||||
* Do no reuse for anything else!
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/gpr-num.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/pointer_auth.h>
|
||||
|
||||
/* PACGA Xd, Xn, Xm */
|
||||
#define PACGA(d,n,m) \
|
||||
asm volatile(__DEFINE_ASM_GPR_NUMS \
|
||||
".inst 0x9AC03000 |" \
|
||||
"(.L__gpr_num_%[Rd] << 0) |" \
|
||||
"(.L__gpr_num_%[Rn] << 5) |" \
|
||||
"(.L__gpr_num_%[Rm] << 16)\n" \
|
||||
: [Rd] "=r" ((d)) \
|
||||
: [Rn] "r" ((n)), [Rm] "r" ((m)))
|
||||
|
||||
static u64 compute_pac(struct kvm_vcpu *vcpu, u64 ptr,
|
||||
struct ptrauth_key ikey)
|
||||
{
|
||||
struct ptrauth_key gkey;
|
||||
u64 mod, pac = 0;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
|
||||
mod = __vcpu_sys_reg(vcpu, SP_EL2);
|
||||
else
|
||||
mod = read_sysreg(sp_el1);
|
||||
|
||||
gkey.lo = read_sysreg_s(SYS_APGAKEYLO_EL1);
|
||||
gkey.hi = read_sysreg_s(SYS_APGAKEYHI_EL1);
|
||||
|
||||
__ptrauth_key_install_nosync(APGA, ikey);
|
||||
isb();
|
||||
|
||||
PACGA(pac, ptr, mod);
|
||||
isb();
|
||||
|
||||
__ptrauth_key_install_nosync(APGA, gkey);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
/* PAC in the top 32bits */
|
||||
return pac;
|
||||
}
|
||||
|
||||
static bool effective_tbi(struct kvm_vcpu *vcpu, bool bit55)
|
||||
{
|
||||
u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
|
||||
bool tbi, tbid;
|
||||
|
||||
/*
|
||||
* Since we are authenticating an instruction address, we have
|
||||
* to take TBID into account. If E2H==0, ignore VA[55], as
|
||||
* TCR_EL2 only has a single TBI/TBID. If VA[55] was set in
|
||||
* this case, this is likely a guest bug...
|
||||
*/
|
||||
if (!vcpu_el2_e2h_is_set(vcpu)) {
|
||||
tbi = tcr & BIT(20);
|
||||
tbid = tcr & BIT(29);
|
||||
} else if (bit55) {
|
||||
tbi = tcr & TCR_TBI1;
|
||||
tbid = tcr & TCR_TBID1;
|
||||
} else {
|
||||
tbi = tcr & TCR_TBI0;
|
||||
tbid = tcr & TCR_TBID0;
|
||||
}
|
||||
|
||||
return tbi && !tbid;
|
||||
}
|
||||
|
||||
static int compute_bottom_pac(struct kvm_vcpu *vcpu, bool bit55)
|
||||
{
|
||||
static const int maxtxsz = 39; // Revisit these two values once
|
||||
static const int mintxsz = 16; // (if) we support TTST/LVA/LVA2
|
||||
u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
|
||||
int txsz;
|
||||
|
||||
if (!vcpu_el2_e2h_is_set(vcpu) || !bit55)
|
||||
txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
|
||||
else
|
||||
txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);
|
||||
|
||||
return 64 - clamp(txsz, mintxsz, maxtxsz);
|
||||
}
|
||||
|
||||
static u64 compute_pac_mask(struct kvm_vcpu *vcpu, bool bit55)
|
||||
{
|
||||
int bottom_pac;
|
||||
u64 mask;
|
||||
|
||||
bottom_pac = compute_bottom_pac(vcpu, bit55);
|
||||
|
||||
mask = GENMASK(54, bottom_pac);
|
||||
if (!effective_tbi(vcpu, bit55))
|
||||
mask |= GENMASK(63, 56);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static u64 to_canonical_addr(struct kvm_vcpu *vcpu, u64 ptr, u64 mask)
|
||||
{
|
||||
bool bit55 = !!(ptr & BIT(55));
|
||||
|
||||
if (bit55)
|
||||
return ptr | mask;
|
||||
|
||||
return ptr & ~mask;
|
||||
}
|
||||
|
||||
static u64 corrupt_addr(struct kvm_vcpu *vcpu, u64 ptr)
|
||||
{
|
||||
bool bit55 = !!(ptr & BIT(55));
|
||||
u64 mask, error_code;
|
||||
int shift;
|
||||
|
||||
if (effective_tbi(vcpu, bit55)) {
|
||||
mask = GENMASK(54, 53);
|
||||
shift = 53;
|
||||
} else {
|
||||
mask = GENMASK(62, 61);
|
||||
shift = 61;
|
||||
}
|
||||
|
||||
if (esr_iss_is_eretab(kvm_vcpu_get_esr(vcpu)))
|
||||
error_code = 2 << shift;
|
||||
else
|
||||
error_code = 1 << shift;
|
||||
|
||||
ptr &= ~mask;
|
||||
ptr |= error_code;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Authenticate an ERETAA/ERETAB instruction, returning true if the
|
||||
* authentication succeeded and false otherwise. In all cases, *elr
|
||||
* contains the VA to ERET to. Potential exception injection is left
|
||||
* to the caller.
|
||||
*/
|
||||
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
|
||||
{
|
||||
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2);
|
||||
u64 esr = kvm_vcpu_get_esr(vcpu);
|
||||
u64 ptr, cptr, pac, mask;
|
||||
struct ptrauth_key ikey;
|
||||
|
||||
*elr = ptr = vcpu_read_sys_reg(vcpu, ELR_EL2);
|
||||
|
||||
/* We assume we're already in the context of an ERETAx */
|
||||
if (esr_iss_is_eretab(esr)) {
|
||||
if (!(sctlr & SCTLR_EL1_EnIB))
|
||||
return true;
|
||||
|
||||
ikey.lo = __vcpu_sys_reg(vcpu, APIBKEYLO_EL1);
|
||||
ikey.hi = __vcpu_sys_reg(vcpu, APIBKEYHI_EL1);
|
||||
} else {
|
||||
if (!(sctlr & SCTLR_EL1_EnIA))
|
||||
return true;
|
||||
|
||||
ikey.lo = __vcpu_sys_reg(vcpu, APIAKEYLO_EL1);
|
||||
ikey.hi = __vcpu_sys_reg(vcpu, APIAKEYHI_EL1);
|
||||
}
|
||||
|
||||
mask = compute_pac_mask(vcpu, !!(ptr & BIT(55)));
|
||||
cptr = to_canonical_addr(vcpu, ptr, mask);
|
||||
|
||||
pac = compute_pac(vcpu, cptr, ikey);
|
||||
|
||||
/*
|
||||
* Slightly deviate from the pseudocode: if we have a PAC
|
||||
* match with the signed pointer, then it must be good.
|
||||
* Anything after this point is pure error handling.
|
||||
*/
|
||||
if ((pac & mask) == (ptr & mask)) {
|
||||
*elr = cptr;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Authentication failed, corrupt the canonical address if
|
||||
* PAuth2 isn't implemented, or some XORing if it is.
|
||||
*/
|
||||
if (!kvm_has_pauth(vcpu->kvm, PAuth2))
|
||||
cptr = corrupt_addr(vcpu, cptr);
|
||||
else
|
||||
cptr = ptr ^ (pac & mask);
|
||||
|
||||
*elr = cptr;
|
||||
return false;
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user