x86/boot/compressed/64: Check SEV encryption in 64-bit boot-path
Check whether the hypervisor reported the correct C-bit when running as an SEV guest. Using a wrong C-bit position could be used to leak sensitive data from the guest to the hypervisor. The check function is in a separate file: arch/x86/kernel/sev_verify_cbit.S so that it can be re-used in the running kernel image. [ bp: Massage. ] Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lkml.kernel.org/r/20201028164659.27002-4-joro@8bytes.org
This commit is contained in:
parent
ed7b895f3e
commit
86ce43f7dd
@ -164,6 +164,7 @@ void initialize_identity_maps(void *rmode)
|
||||
add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
|
||||
|
||||
/* Load the new page-table. */
|
||||
sev_verify_cbit(top_level_pgt);
|
||||
write_cr3(top_level_pgt);
|
||||
}
|
||||
|
||||
|
@ -68,6 +68,9 @@ SYM_FUNC_START(get_sev_encryption_bit)
|
||||
SYM_FUNC_END(get_sev_encryption_bit)
|
||||
|
||||
.code64
|
||||
|
||||
#include "../../kernel/sev_verify_cbit.S"
|
||||
|
||||
SYM_FUNC_START(set_sev_encryption_mask)
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
push %rbp
|
||||
@ -111,4 +114,5 @@ SYM_FUNC_END(set_sev_encryption_mask)
|
||||
.balign 8
|
||||
SYM_DATA(sme_me_mask, .quad 0)
|
||||
SYM_DATA(sev_status, .quad 0)
|
||||
SYM_DATA(sev_check_data, .quad 0)
|
||||
#endif
|
||||
|
@ -159,4 +159,6 @@ void boot_page_fault(void);
|
||||
void boot_stage1_vc(void);
|
||||
void boot_stage2_vc(void);
|
||||
|
||||
unsigned long sev_verify_cbit(unsigned long cr3);
|
||||
|
||||
#endif /* BOOT_COMPRESSED_MISC_H */
|
||||
|
89
arch/x86/kernel/sev_verify_cbit.S
Normal file
89
arch/x86/kernel/sev_verify_cbit.S
Normal file
@ -0,0 +1,89 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* sev_verify_cbit.S - Code for verification of the C-bit position reported
|
||||
* by the Hypervisor when running with SEV enabled.
|
||||
*
|
||||
* Copyright (c) 2020 Joerg Roedel (jroedel@suse.de)
|
||||
*
|
||||
* sev_verify_cbit() is called before switching to a new long-mode page-table
|
||||
* at boot.
|
||||
*
|
||||
* Verify that the C-bit position is correct by writing a random value to
|
||||
* an encrypted memory location while on the current page-table. Then it
|
||||
* switches to the new page-table to verify the memory content is still the
|
||||
* same. After that it switches back to the current page-table and when the
|
||||
* check succeeded it returns. If the check failed the code invalidates the
|
||||
* stack pointer and goes into a hlt loop. The stack-pointer is invalidated to
|
||||
* make sure no interrupt or exception can get the CPU out of the hlt loop.
|
||||
*
|
||||
* New page-table pointer is expected in %rdi (first parameter)
|
||||
*
|
||||
*/
|
||||
SYM_FUNC_START(sev_verify_cbit)
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
/* First check if a C-bit was detected */
|
||||
movq sme_me_mask(%rip), %rsi
|
||||
testq %rsi, %rsi
|
||||
jz 3f
|
||||
|
||||
/* sme_me_mask != 0 could mean SME or SEV - Check also for SEV */
|
||||
movq sev_status(%rip), %rsi
|
||||
testq %rsi, %rsi
|
||||
jz 3f
|
||||
|
||||
/* Save CR4 in %rsi */
|
||||
movq %cr4, %rsi
|
||||
|
||||
/* Disable Global Pages */
|
||||
movq %rsi, %rdx
|
||||
andq $(~X86_CR4_PGE), %rdx
|
||||
movq %rdx, %cr4
|
||||
|
||||
/*
|
||||
* Verified that running under SEV - now get a random value using
|
||||
* RDRAND. This instruction is mandatory when running as an SEV guest.
|
||||
*
|
||||
* Don't bail out of the loop if RDRAND returns errors. It is better to
|
||||
* prevent forward progress than to work with a non-random value here.
|
||||
*/
|
||||
1: rdrand %rdx
|
||||
jnc 1b
|
||||
|
||||
/* Store value to memory and keep it in %rdx */
|
||||
movq %rdx, sev_check_data(%rip)
|
||||
|
||||
/* Backup current %cr3 value to restore it later */
|
||||
movq %cr3, %rcx
|
||||
|
||||
/* Switch to new %cr3 - This might unmap the stack */
|
||||
movq %rdi, %cr3
|
||||
|
||||
/*
|
||||
* Compare value in %rdx with memory location. If C-bit is incorrect
|
||||
* this would read the encrypted data and make the check fail.
|
||||
*/
|
||||
cmpq %rdx, sev_check_data(%rip)
|
||||
|
||||
/* Restore old %cr3 */
|
||||
movq %rcx, %cr3
|
||||
|
||||
/* Restore previous CR4 */
|
||||
movq %rsi, %cr4
|
||||
|
||||
/* Check CMPQ result */
|
||||
je 3f
|
||||
|
||||
/*
|
||||
* The check failed, prevent any forward progress to prevent ROP
|
||||
* attacks, invalidate the stack and go into a hlt loop.
|
||||
*/
|
||||
xorq %rsp, %rsp
|
||||
subq $0x1000, %rsp
|
||||
2: hlt
|
||||
jmp 2b
|
||||
3:
|
||||
#endif
|
||||
/* Return page-table pointer */
|
||||
movq %rdi, %rax
|
||||
ret
|
||||
SYM_FUNC_END(sev_verify_cbit)
|
Loading…
Reference in New Issue
Block a user