Merge branch 'svm' of https://github.com/kvm-x86/linux into HEAD
Clean up SVM's enter/exit assembly code so that it can be compiled without OBJECT_FILES_NON_STANDARD. The "standard" __svm_vcpu_run() can't be made 100% bulletproof, as RBP isn't restored on #VMEXIT, but that's also the case for __vmx_vcpu_run(), and getting "close enough" is better than not even trying. As for SEV-ES, after yet another refresher on swap types, I realized KVM can simply let the hardware restore registers after #VMEXIT, all that's missing is storing the current values to the host save area (they are swap type B). This should provide 100% accuracy when using stack frames for unwinding, and requires less assembly. In between, build the SEV-ES code iff CONFIG_KVM_AMD_SEV=y, and yank out "support" for 32-bit kernels in __svm_sev_es_vcpu_run, which was unnecessarily polluting the code for a configuration that is disabled at build time. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
commit
44ecfa3e5f
@ -3,11 +3,6 @@
|
||||
ccflags-y += -I $(srctree)/arch/x86/kvm
|
||||
ccflags-$(CONFIG_KVM_WERROR) += -Werror
|
||||
|
||||
ifeq ($(CONFIG_FRAME_POINTER),y)
|
||||
OBJECT_FILES_NON_STANDARD_vmx/vmenter.o := y
|
||||
OBJECT_FILES_NON_STANDARD_svm/vmenter.o := y
|
||||
endif
|
||||
|
||||
include $(srctree)/virt/kvm/Makefile.kvm
|
||||
|
||||
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
|
||||
|
@ -434,7 +434,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
|
||||
/* Avoid using vmalloc for smaller buffers. */
|
||||
size = npages * sizeof(struct page *);
|
||||
if (size > PAGE_SIZE)
|
||||
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT);
|
||||
else
|
||||
pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
|
||||
|
||||
|
@ -1503,6 +1503,11 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
|
||||
}
|
||||
|
||||
static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
|
||||
{
|
||||
return page_address(sd->save_area) + 0x400;
|
||||
}
|
||||
|
||||
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
@ -1519,12 +1524,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
* or subsequent vmload of host save area.
|
||||
*/
|
||||
vmsave(sd->save_area_pa);
|
||||
if (sev_es_guest(vcpu->kvm)) {
|
||||
struct sev_es_save_area *hostsa;
|
||||
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
|
||||
|
||||
sev_es_prepare_switch_to_guest(hostsa);
|
||||
}
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
sev_es_prepare_switch_to_guest(sev_es_host_save_area(sd));
|
||||
|
||||
if (tsc_scaling)
|
||||
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
||||
@ -4101,6 +4102,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
||||
|
||||
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
|
||||
{
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
guest_state_enter_irqoff();
|
||||
@ -4108,7 +4110,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
|
||||
amd_clear_divider();
|
||||
|
||||
if (sev_es_guest(vcpu->kvm))
|
||||
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
|
||||
__svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted,
|
||||
sev_es_host_save_area(sd));
|
||||
else
|
||||
__svm_vcpu_run(svm, spec_ctrl_intercepted);
|
||||
|
||||
|
@ -698,7 +698,8 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* vmenter.S */
|
||||
|
||||
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
|
||||
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
|
||||
struct sev_es_save_area *hostsa);
|
||||
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
|
||||
|
||||
#define DEFINE_KVM_GHCB_ACCESSORS(field) \
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <asm/frame.h>
|
||||
#include <asm/kvm_vcpu_regs.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include "kvm-asm-offsets.h"
|
||||
@ -67,7 +68,7 @@
|
||||
"", X86_FEATURE_V_SPEC_CTRL
|
||||
901:
|
||||
.endm
|
||||
.macro RESTORE_HOST_SPEC_CTRL_BODY
|
||||
.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
|
||||
900:
|
||||
/* Same for after vmexit. */
|
||||
mov $MSR_IA32_SPEC_CTRL, %ecx
|
||||
@ -76,7 +77,7 @@
|
||||
* Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
|
||||
* if it was not intercepted during guest execution.
|
||||
*/
|
||||
cmpb $0, (%_ASM_SP)
|
||||
cmpb $0, \spec_ctrl_intercepted
|
||||
jnz 998f
|
||||
rdmsr
|
||||
movl %eax, SVM_spec_ctrl(%_ASM_DI)
|
||||
@ -99,6 +100,7 @@
|
||||
*/
|
||||
SYM_FUNC_START(__svm_vcpu_run)
|
||||
push %_ASM_BP
|
||||
mov %_ASM_SP, %_ASM_BP
|
||||
#ifdef CONFIG_X86_64
|
||||
push %r15
|
||||
push %r14
|
||||
@ -268,7 +270,7 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
RET
|
||||
|
||||
RESTORE_GUEST_SPEC_CTRL_BODY
|
||||
RESTORE_HOST_SPEC_CTRL_BODY
|
||||
RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
|
||||
|
||||
10: cmpb $0, _ASM_RIP(kvm_rebooting)
|
||||
jne 2b
|
||||
@ -290,66 +292,68 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
|
||||
SYM_FUNC_END(__svm_vcpu_run)
|
||||
|
||||
#ifdef CONFIG_KVM_AMD_SEV
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define SEV_ES_GPRS_BASE 0x300
|
||||
#define SEV_ES_RBX (SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
|
||||
#define SEV_ES_RBP (SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
|
||||
#define SEV_ES_RSI (SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
|
||||
#define SEV_ES_RDI (SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
|
||||
#define SEV_ES_R12 (SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
|
||||
#define SEV_ES_R13 (SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
|
||||
#define SEV_ES_R14 (SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
|
||||
#define SEV_ES_R15 (SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
|
||||
* @svm: struct vcpu_svm *
|
||||
* @spec_ctrl_intercepted: bool
|
||||
*/
|
||||
SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
push %_ASM_BP
|
||||
#ifdef CONFIG_X86_64
|
||||
push %r15
|
||||
push %r14
|
||||
push %r13
|
||||
push %r12
|
||||
#else
|
||||
push %edi
|
||||
push %esi
|
||||
#endif
|
||||
push %_ASM_BX
|
||||
FRAME_BEGIN
|
||||
|
||||
/*
|
||||
* Save variables needed after vmexit on the stack, in inverse
|
||||
* order compared to when they are needed.
|
||||
* Save non-volatile (callee-saved) registers to the host save area.
|
||||
* Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
|
||||
* saved on VMRUN.
|
||||
*/
|
||||
mov %rbp, SEV_ES_RBP (%rdx)
|
||||
mov %r15, SEV_ES_R15 (%rdx)
|
||||
mov %r14, SEV_ES_R14 (%rdx)
|
||||
mov %r13, SEV_ES_R13 (%rdx)
|
||||
mov %r12, SEV_ES_R12 (%rdx)
|
||||
mov %rbx, SEV_ES_RBX (%rdx)
|
||||
|
||||
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
|
||||
push %_ASM_ARG2
|
||||
|
||||
/* Save @svm. */
|
||||
push %_ASM_ARG1
|
||||
|
||||
.ifnc _ASM_ARG1, _ASM_DI
|
||||
/*
|
||||
* Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
|
||||
* and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
|
||||
* Save volatile registers that hold arguments that are needed after
|
||||
* #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
|
||||
*/
|
||||
mov %_ASM_ARG1, %_ASM_DI
|
||||
.endif
|
||||
mov %rdi, SEV_ES_RDI (%rdx)
|
||||
mov %rsi, SEV_ES_RSI (%rdx)
|
||||
|
||||
/* Clobbers RAX, RCX, RDX. */
|
||||
/* Clobbers RAX, RCX, RDX (@hostsa). */
|
||||
RESTORE_GUEST_SPEC_CTRL
|
||||
|
||||
/* Get svm->current_vmcb->pa into RAX. */
|
||||
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
|
||||
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
|
||||
mov SVM_current_vmcb(%rdi), %rax
|
||||
mov KVM_VMCB_pa(%rax), %rax
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
|
||||
1: vmrun %_ASM_AX
|
||||
1: vmrun %rax
|
||||
|
||||
2: cli
|
||||
|
||||
/* Pop @svm to RDI, guest registers have been saved already. */
|
||||
pop %_ASM_DI
|
||||
|
||||
#ifdef CONFIG_MITIGATION_RETPOLINE
|
||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||
#endif
|
||||
|
||||
/* Clobbers RAX, RCX, RDX. */
|
||||
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
|
||||
RESTORE_HOST_SPEC_CTRL
|
||||
|
||||
/*
|
||||
@ -361,30 +365,17 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
*/
|
||||
UNTRAIN_RET_VM
|
||||
|
||||
/* "Pop" @spec_ctrl_intercepted. */
|
||||
pop %_ASM_BX
|
||||
|
||||
pop %_ASM_BX
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
pop %r12
|
||||
pop %r13
|
||||
pop %r14
|
||||
pop %r15
|
||||
#else
|
||||
pop %esi
|
||||
pop %edi
|
||||
#endif
|
||||
pop %_ASM_BP
|
||||
FRAME_END
|
||||
RET
|
||||
|
||||
RESTORE_GUEST_SPEC_CTRL_BODY
|
||||
RESTORE_HOST_SPEC_CTRL_BODY
|
||||
RESTORE_HOST_SPEC_CTRL_BODY %sil
|
||||
|
||||
3: cmpb $0, _ASM_RIP(kvm_rebooting)
|
||||
3: cmpb $0, kvm_rebooting(%rip)
|
||||
jne 2b
|
||||
ud2
|
||||
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
|
||||
SYM_FUNC_END(__svm_sev_es_vcpu_run)
|
||||
#endif /* CONFIG_KVM_AMD_SEV */
|
||||
|
Loading…
x
Reference in New Issue
Block a user