* Fix SEV race condition
 
 ARM:
 
 * Fixes for the configuration of SVE/SME traps when hVHE mode is in use
 
 * Allow use of pKVM on systems with FF-A implementations that are v1.0
   compatible
 
 * Request/release percpu IRQs (arch timer, vGIC maintenance) correctly
   when pKVM is in use
 
 * Fix function prototype after __kvm_host_psci_cpu_entry() rename
 
 * Skip to the next instruction when emulating writes to TCR_EL1 on
   AmpereOne systems
 
 Selftests:
 
 * Fix missing include
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmTQ7zsUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroMaZwf+LCD+U/Z5W9o9BLfn0gq/mLS0EPJe
 +aa+AQvh1q0rQVFY8cgglGbpF3L1KGRWTEPNX2izJVOAmOzVwVjxlXj47fMhcwao
 RzFFQ8GIjZGjP+lJ4zTtUzlDSNNDQqeG+Ji2GoWvSZYE6HDmSPv6CYOsUkmp3T6V
 nEST2lCHY+lVEp62Y3YS+QcVEj6qsXDF21W4OxEPM9OWATj34IQTYmhCbbqzalgD
 7D08nIdUtzk3JyiiG52XKACfSpWJMg3W78Kt6noX6be89SAvr2cw14X0sqZP6lID
 akN6rByBZrSBaaj9TJQiEXSK5Ff/TphdxbDG4uDfOf8nzy2+QrKOXJ1Q7w==
 =zBPg
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "x86:

   - Fix SEV race condition

  ARM:

   - Fixes for the configuration of SVE/SME traps when hVHE mode is in
     use

   - Allow use of pKVM on systems with FF-A implementations that are
     v1.0 compatible

   - Request/release percpu IRQs (arch timer, vGIC maintenance)
     correctly when pKVM is in use

   - Fix function prototype after __kvm_host_psci_cpu_entry() rename

   - Skip to the next instruction when emulating writes to TCR_EL1 on
     AmpereOne systems

  Selftests:

   - Fix missing include"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  selftests/rseq: Fix build with undefined __weak
  KVM: SEV: remove ghcb variable declarations
  KVM: SEV: only access GHCB fields once
  KVM: SEV: snapshot the GHCB before accessing it
  KVM: arm64: Skip instruction after emulating write to TCR_EL1
  KVM: arm64: fix __kvm_host_psci_cpu_entry() prototype
  KVM: arm64: Fix resetting SME trap values on reset for (h)VHE
  KVM: arm64: Fix resetting SVE trap values on reset for hVHE
  KVM: arm64: Use the appropriate feature trap register when activating traps
  KVM: arm64: Helper to write to appropriate feature trap register based on mode
  KVM: arm64: Disable SME traps for (h)VHE at setup
  KVM: arm64: Use the appropriate feature trap register for SVE at EL2 setup
  KVM: arm64: Factor out code for checking (h)VHE mode into a macro
  KVM: arm64: Rephrase percpu enable/disable tracking in terms of hyp
  KVM: arm64: Fix hardware enable/disable flows for pKVM
  KVM: arm64: Allow pKVM on v1.0 compatible FF-A implementations
This commit is contained in:
Linus Torvalds 2023-08-07 10:18:20 -07:00
commit a027b2eca0
11 changed files with 182 additions and 120 deletions

View File

@ -31,6 +31,13 @@
.Lskip_hcrx_\@:
.endm
/* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */
.macro __check_hvhe fail, tmp
mrs \tmp, hcr_el2
and \tmp, \tmp, #HCR_E2H
cbz \tmp, \fail
.endm
/*
* Allow Non-secure EL1 and EL0 to access physical timer and counter.
* This is not necessary for VHE, since the host kernel runs in EL2,
@ -43,9 +50,7 @@
*/
.macro __init_el2_timers
mov x0, #3 // Enable EL1 physical timers
mrs x1, hcr_el2
and x1, x1, #HCR_E2H
cbz x1, .LnVHE_\@
__check_hvhe .LnVHE_\@, x1
lsl x0, x0, #10
.LnVHE_\@:
msr cnthctl_el2, x0
@ -139,15 +144,14 @@
/* Coprocessor traps */
.macro __init_el2_cptr
mrs x1, hcr_el2
and x1, x1, #HCR_E2H
cbz x1, .LnVHE_\@
__check_hvhe .LnVHE_\@, x1
mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
b .Lset_cptr_\@
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
.LnVHE_\@:
mov x0, #0x33ff
.Lset_cptr_\@:
msr cptr_el2, x0 // Disable copro. traps to EL2
.Lskip_set_cptr_\@:
.endm
/* Disable any fine grained traps */
@ -268,19 +272,19 @@
check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
.Linit_sve_\@: /* SVE register access */
mrs x0, cptr_el2 // Disable SVE traps
mrs x1, hcr_el2
and x1, x1, #HCR_E2H
cbz x1, .Lcptr_nvhe_\@
__check_hvhe .Lcptr_nvhe_\@, x1
// VHE case
// (h)VHE case
mrs x0, cpacr_el1 // Disable SVE traps
orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
b .Lset_cptr_\@
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
.Lcptr_nvhe_\@: // nVHE case
mrs x0, cptr_el2 // Disable SVE traps
bic x0, x0, #CPTR_EL2_TZ
.Lset_cptr_\@:
msr cptr_el2, x0
.Lskip_set_cptr_\@:
isb
mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
msr_s SYS_ZCR_EL2, x1 // length for EL1.
@ -289,9 +293,19 @@
check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2
.Linit_sme_\@: /* SME register access and priority mapping */
__check_hvhe .Lcptr_nvhe_sme_\@, x1
// (h)VHE case
mrs x0, cpacr_el1 // Disable SME traps
orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
msr cpacr_el1, x0
b .Lskip_set_cptr_sme_\@
.Lcptr_nvhe_sme_\@: // nVHE case
mrs x0, cptr_el2 // Disable SME traps
bic x0, x0, #CPTR_EL2_TSM
msr cptr_el2, x0
.Lskip_set_cptr_sme_\@:
isb
mrs x1, sctlr_el2

View File

@ -278,7 +278,7 @@ asmlinkage void __noreturn hyp_panic_bad_stack(void);
asmlinkage void kvm_unexpected_el2_exception(void);
struct kvm_cpu_context;
void handle_trap(struct kvm_cpu_context *host_ctxt);
asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on);
asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
void __noreturn __pkvm_init_finalise(void);
void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
void kvm_patch_vector_branch(struct alt_instr *alt,

View File

@ -571,6 +571,14 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
return test_bit(feature, vcpu->arch.features);
}
static __always_inline void kvm_write_cptr_el2(u64 val)
{
if (has_vhe() || has_hvhe())
write_sysreg(val, cpacr_el1);
else
write_sysreg(val, cptr_el2);
}
static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
{
u64 val;
@ -578,8 +586,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
if (has_vhe()) {
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
CPACR_EL1_ZEN_EL1EN);
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN;
} else if (has_hvhe()) {
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
if (!vcpu_has_sve(vcpu) ||
(vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
} else {
val = CPTR_NVHE_EL2_RES1;
@ -597,9 +613,6 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
{
u64 val = kvm_get_reset_cptr_el2(vcpu);
if (has_vhe() || has_hvhe())
write_sysreg(val, cpacr_el1);
else
write_sysreg(val, cptr_el2);
kvm_write_cptr_el2(val);
}
#endif /* __ARM64_KVM_EMULATE_H__ */

View File

@ -55,7 +55,7 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
static bool vgic_present, kvm_arm_initialised;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
bool is_kvm_arm_initialised(void)
@ -1864,18 +1864,24 @@ static void cpu_hyp_reinit(void)
cpu_hyp_init_features();
}
static void _kvm_arch_hardware_enable(void *discard)
static void cpu_hyp_init(void *discard)
{
if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
if (!__this_cpu_read(kvm_hyp_initialized)) {
cpu_hyp_reinit();
__this_cpu_write(kvm_arm_hardware_enabled, 1);
__this_cpu_write(kvm_hyp_initialized, 1);
}
}
static void cpu_hyp_uninit(void *discard)
{
if (__this_cpu_read(kvm_hyp_initialized)) {
cpu_hyp_reset();
__this_cpu_write(kvm_hyp_initialized, 0);
}
}
int kvm_arch_hardware_enable(void)
{
int was_enabled;
/*
* Most calls to this function are made with migration
* disabled, but not with preemption disabled. The former is
@ -1884,36 +1890,23 @@ int kvm_arch_hardware_enable(void)
*/
preempt_disable();
was_enabled = __this_cpu_read(kvm_arm_hardware_enabled);
_kvm_arch_hardware_enable(NULL);
cpu_hyp_init(NULL);
if (!was_enabled) {
kvm_vgic_cpu_up();
kvm_timer_cpu_up();
}
preempt_enable();
return 0;
}
static void _kvm_arch_hardware_disable(void *discard)
{
if (__this_cpu_read(kvm_arm_hardware_enabled)) {
cpu_hyp_reset();
__this_cpu_write(kvm_arm_hardware_enabled, 0);
}
}
void kvm_arch_hardware_disable(void)
{
if (__this_cpu_read(kvm_arm_hardware_enabled)) {
kvm_timer_cpu_down();
kvm_vgic_cpu_down();
}
if (!is_protected_kvm_enabled())
_kvm_arch_hardware_disable(NULL);
cpu_hyp_uninit(NULL);
}
#ifdef CONFIG_CPU_PM
@ -1922,16 +1915,16 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
void *v)
{
/*
* kvm_arm_hardware_enabled is left with its old value over
* kvm_hyp_initialized is left with its old value over
* PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
* re-enable hyp.
*/
switch (cmd) {
case CPU_PM_ENTER:
if (__this_cpu_read(kvm_arm_hardware_enabled))
if (__this_cpu_read(kvm_hyp_initialized))
/*
* don't update kvm_arm_hardware_enabled here
* so that the hardware will be re-enabled
* don't update kvm_hyp_initialized here
* so that the hyp will be re-enabled
* when we resume. See below.
*/
cpu_hyp_reset();
@ -1939,8 +1932,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
return NOTIFY_OK;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
if (__this_cpu_read(kvm_arm_hardware_enabled))
/* The hardware was enabled before suspend. */
if (__this_cpu_read(kvm_hyp_initialized))
/* The hyp was enabled before suspend. */
cpu_hyp_reinit();
return NOTIFY_OK;
@ -2021,7 +2014,7 @@ static int __init init_subsystems(void)
/*
* Enable hardware so that subsystem initialisation can access EL2.
*/
on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
on_each_cpu(cpu_hyp_init, NULL, 1);
/*
* Register CPU lower-power notifier
@ -2059,7 +2052,7 @@ out:
hyp_cpu_pm_exit();
if (err || !is_protected_kvm_enabled())
on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
on_each_cpu(cpu_hyp_uninit, NULL, 1);
return err;
}
@ -2097,7 +2090,7 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
* The stub hypercalls are now disabled, so set our local flag to
* prevent a later re-init attempt in kvm_arch_hardware_enable().
*/
__this_cpu_write(kvm_arm_hardware_enabled, 1);
__this_cpu_write(kvm_hyp_initialized, 1);
preempt_enable();
return ret;

View File

@ -457,6 +457,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
*/
val &= ~(TCR_HD | TCR_HA);
write_sysreg_el1(val, SYS_TCR);
__kvm_skip_instr(vcpu);
return true;
}

View File

@ -705,7 +705,20 @@ int hyp_ffa_init(void *pages)
if (res.a0 == FFA_RET_NOT_SUPPORTED)
return 0;
if (res.a0 != FFA_VERSION_1_0)
/*
* Firmware returns the maximum supported version of the FF-A
* implementation. Check that the returned version is
* backwards-compatible with the hyp according to the rules in DEN0077A
* v1.1 REL0 13.2.1.
*
* Of course, things are never simple when dealing with firmware. v1.1
* broke ABI with v1.0 on several structures, which is itself
* incompatible with the aforementioned versioning scheme. The
* expectation is that v1.x implementations that do not support the v1.0
* ABI return NOT_SUPPORTED rather than a version number, according to
* DEN0077A v1.1 REL0 18.6.4.
*/
if (FFA_MAJOR_VERSION(res.a0) != 1)
return -EOPNOTSUPP;
arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);

View File

@ -63,7 +63,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_fpsimd32(vcpu);
}
write_sysreg(val, cptr_el2);
kvm_write_cptr_el2(val);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {

View File

@ -2417,15 +2417,18 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
*/
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
if (ghcb_xcr0_is_valid(ghcb)) {
svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
if (kvm_ghcb_xcr0_is_valid(svm)) {
vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
kvm_update_cpuid_runtime(vcpu);
}
@ -2436,84 +2439,88 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
control->exit_code_hi = upper_32_bits(exit_code);
control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
/* Clear the valid entries fields */
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
{
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
}
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu;
struct ghcb *ghcb;
struct vmcb_control_area *control = &svm->vmcb->control;
struct kvm_vcpu *vcpu = &svm->vcpu;
u64 exit_code;
u64 reason;
ghcb = svm->sev_es.ghcb;
/*
* Retrieve the exit code now even though it may not be marked valid
* as it could help with debugging.
*/
exit_code = ghcb_get_sw_exit_code(ghcb);
exit_code = kvm_ghcb_get_sw_exit_code(control);
/* Only GHCB Usage code 0 is supported */
if (ghcb->ghcb_usage) {
if (svm->sev_es.ghcb->ghcb_usage) {
reason = GHCB_ERR_INVALID_USAGE;
goto vmgexit_err;
}
reason = GHCB_ERR_MISSING_INPUT;
if (!ghcb_sw_exit_code_is_valid(ghcb) ||
!ghcb_sw_exit_info_1_is_valid(ghcb) ||
!ghcb_sw_exit_info_2_is_valid(ghcb))
if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
!kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
!kvm_ghcb_sw_exit_info_2_is_valid(svm))
goto vmgexit_err;
switch (ghcb_get_sw_exit_code(ghcb)) {
switch (exit_code) {
case SVM_EXIT_READ_DR7:
break;
case SVM_EXIT_WRITE_DR7:
if (!ghcb_rax_is_valid(ghcb))
if (!kvm_ghcb_rax_is_valid(svm))
goto vmgexit_err;
break;
case SVM_EXIT_RDTSC:
break;
case SVM_EXIT_RDPMC:
if (!ghcb_rcx_is_valid(ghcb))
if (!kvm_ghcb_rcx_is_valid(svm))
goto vmgexit_err;
break;
case SVM_EXIT_CPUID:
if (!ghcb_rax_is_valid(ghcb) ||
!ghcb_rcx_is_valid(ghcb))
if (!kvm_ghcb_rax_is_valid(svm) ||
!kvm_ghcb_rcx_is_valid(svm))
goto vmgexit_err;
if (ghcb_get_rax(ghcb) == 0xd)
if (!ghcb_xcr0_is_valid(ghcb))
if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
if (!kvm_ghcb_xcr0_is_valid(svm))
goto vmgexit_err;
break;
case SVM_EXIT_INVD:
break;
case SVM_EXIT_IOIO:
if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
if (!ghcb_sw_scratch_is_valid(ghcb))
if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
if (!kvm_ghcb_sw_scratch_is_valid(svm))
goto vmgexit_err;
} else {
if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
if (!ghcb_rax_is_valid(ghcb))
if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
if (!kvm_ghcb_rax_is_valid(svm))
goto vmgexit_err;
}
break;
case SVM_EXIT_MSR:
if (!ghcb_rcx_is_valid(ghcb))
if (!kvm_ghcb_rcx_is_valid(svm))
goto vmgexit_err;
if (ghcb_get_sw_exit_info_1(ghcb)) {
if (!ghcb_rax_is_valid(ghcb) ||
!ghcb_rdx_is_valid(ghcb))
if (control->exit_info_1) {
if (!kvm_ghcb_rax_is_valid(svm) ||
!kvm_ghcb_rdx_is_valid(svm))
goto vmgexit_err;
}
break;
case SVM_EXIT_VMMCALL:
if (!ghcb_rax_is_valid(ghcb) ||
!ghcb_cpl_is_valid(ghcb))
if (!kvm_ghcb_rax_is_valid(svm) ||
!kvm_ghcb_cpl_is_valid(svm))
goto vmgexit_err;
break;
case SVM_EXIT_RDTSCP:
@ -2521,19 +2528,19 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
case SVM_EXIT_WBINVD:
break;
case SVM_EXIT_MONITOR:
if (!ghcb_rax_is_valid(ghcb) ||
!ghcb_rcx_is_valid(ghcb) ||
!ghcb_rdx_is_valid(ghcb))
if (!kvm_ghcb_rax_is_valid(svm) ||
!kvm_ghcb_rcx_is_valid(svm) ||
!kvm_ghcb_rdx_is_valid(svm))
goto vmgexit_err;
break;
case SVM_EXIT_MWAIT:
if (!ghcb_rax_is_valid(ghcb) ||
!ghcb_rcx_is_valid(ghcb))
if (!kvm_ghcb_rax_is_valid(svm) ||
!kvm_ghcb_rcx_is_valid(svm))
goto vmgexit_err;
break;
case SVM_VMGEXIT_MMIO_READ:
case SVM_VMGEXIT_MMIO_WRITE:
if (!ghcb_sw_scratch_is_valid(ghcb))
if (!kvm_ghcb_sw_scratch_is_valid(svm))
goto vmgexit_err;
break;
case SVM_VMGEXIT_NMI_COMPLETE:
@ -2549,11 +2556,9 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
return 0;
vmgexit_err:
vcpu = &svm->vcpu;
if (reason == GHCB_ERR_INVALID_USAGE) {
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
ghcb->ghcb_usage);
svm->sev_es.ghcb->ghcb_usage);
} else if (reason == GHCB_ERR_INVALID_EVENT) {
vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
exit_code);
@ -2563,11 +2568,8 @@ vmgexit_err:
dump_ghcb(svm);
}
/* Clear the valid entries fields */
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
ghcb_set_sw_exit_info_1(ghcb, 2);
ghcb_set_sw_exit_info_2(ghcb, reason);
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason);
/* Resume the guest to "return" the error code. */
return 1;
@ -2586,7 +2588,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
*/
if (svm->sev_es.ghcb_sa_sync) {
kvm_write_guest(svm->vcpu.kvm,
ghcb_get_sw_scratch(svm->sev_es.ghcb),
svm->sev_es.sw_scratch,
svm->sev_es.ghcb_sa,
svm->sev_es.ghcb_sa_len);
svm->sev_es.ghcb_sa_sync = false;
@ -2632,12 +2634,11 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct ghcb *ghcb = svm->sev_es.ghcb;
u64 ghcb_scratch_beg, ghcb_scratch_end;
u64 scratch_gpa_beg, scratch_gpa_end;
void *scratch_va;
scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
scratch_gpa_beg = svm->sev_es.sw_scratch;
if (!scratch_gpa_beg) {
pr_err("vmgexit: scratch gpa not provided\n");
goto e_scratch;
@ -2708,8 +2709,8 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
return 0;
e_scratch:
ghcb_set_sw_exit_info_1(ghcb, 2);
ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
return 1;
}
@ -2822,7 +2823,6 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
u64 ghcb_gpa, exit_code;
struct ghcb *ghcb;
int ret;
/* Validate the GHCB */
@ -2847,20 +2847,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
}
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
ghcb = svm->sev_es.ghcb_map.hva;
trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
exit_code = ghcb_get_sw_exit_code(ghcb);
trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb);
sev_es_sync_from_ghcb(svm);
ret = sev_es_validate_vmgexit(svm);
if (ret)
return ret;
sev_es_sync_from_ghcb(svm);
ghcb_set_sw_exit_info_1(ghcb, 0);
ghcb_set_sw_exit_info_2(ghcb, 0);
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0);
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0);
exit_code = kvm_ghcb_get_sw_exit_code(control);
switch (exit_code) {
case SVM_VMGEXIT_MMIO_READ:
ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
@ -2898,13 +2896,13 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
break;
case 1:
/* Get AP jump table address */
ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table);
break;
default:
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
control->exit_info_1);
ghcb_set_sw_exit_info_1(ghcb, 2);
ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
}
ret = 1;

View File

@ -190,10 +190,12 @@ struct vcpu_sev_es_state {
/* SEV-ES support */
struct sev_es_save_area *vmsa;
struct ghcb *ghcb;
u8 valid_bitmap[16];
struct kvm_host_map ghcb_map;
bool received_first_sipi;
/* SEV-ES scratch area support */
u64 sw_scratch;
void *ghcb_sa;
u32 ghcb_sa_len;
bool ghcb_sa_sync;
@ -744,4 +746,28 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
#define DEFINE_KVM_GHCB_ACCESSORS(field) \
static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
{ \
return test_bit(GHCB_BITMAP_IDX(field), \
(unsigned long *)&svm->sev_es.valid_bitmap); \
} \
\
static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
{ \
return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
} \
DEFINE_KVM_GHCB_ACCESSORS(cpl)
DEFINE_KVM_GHCB_ACCESSORS(rax)
DEFINE_KVM_GHCB_ACCESSORS(rcx)
DEFINE_KVM_GHCB_ACCESSORS(rdx)
DEFINE_KVM_GHCB_ACCESSORS(rbx)
DEFINE_KVM_GHCB_ACCESSORS(rsi)
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
DEFINE_KVM_GHCB_ACCESSORS(xcr0)
#endif

View File

@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
CLANG_FLAGS += -no-integrated-as
endif
top_srcdir = ../../../..
CFLAGS += -O2 -Wall -g -I./ $(KHDR_INCLUDES) -L$(OUTPUT) -Wl,-rpath=./ \
$(CLANG_FLAGS)
$(CLANG_FLAGS) -I$(top_srcdir)/tools/include
LDLIBS += -lpthread -ldl
# Own dependencies because we only want to build against 1st prerequisite, but

View File

@ -31,6 +31,8 @@
#include <sys/auxv.h>
#include <linux/auxvec.h>
#include <linux/compiler.h>
#include "../kselftest.h"
#include "rseq.h"