x86/startup_64: Simplify virtual switch on primary boot
The secondary startup code is used on the primary boot path as well, but in this case, the initial part runs from a 1:1 mapping, until an explicit cross-jump is made to the kernel virtual mapping of the same code. On the secondary boot path, this jump is pointless as the code already executes from the mapping targeted by the jump. So combine this cross-jump with the jump from startup_64() into the common boot path. This simplifies the execution flow, and clearly separates code that runs from a 1:1 mapping from code that runs from the kernel virtual mapping. Note that this requires a page table switch, so hoist the CR3 assignment into startup_64() as well. And since absolute symbol references will no longer be permitted in .head.text once we enable the associated build time checks, a RIP-relative memory operand is used in the JMP instruction, referring to an absolute constant in the .init.rodata section. Given that the secondary startup code does not require a special placement inside the executable, move it to the .text section. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lore.kernel.org/r/20240227151907.387873-15-ardb+git@google.com
This commit is contained in:
parent
d6a41f184d
commit
8282639576
@ -39,7 +39,6 @@ L4_START_KERNEL = l4_index(__START_KERNEL_map)
|
||||
|
||||
L3_START_KERNEL = pud_index(__START_KERNEL_map)
|
||||
|
||||
.text
|
||||
__HEAD
|
||||
.code64
|
||||
SYM_CODE_START_NOALIGN(startup_64)
|
||||
@ -126,9 +125,21 @@ SYM_CODE_START_NOALIGN(startup_64)
|
||||
call sev_verify_cbit
|
||||
#endif
|
||||
|
||||
jmp 1f
|
||||
/*
|
||||
* Switch to early_top_pgt which still has the identity mappings
|
||||
* present.
|
||||
*/
|
||||
movq %rax, %cr3
|
||||
|
||||
/* Branch to the common startup code at its kernel virtual address */
|
||||
ANNOTATE_RETPOLINE_SAFE
|
||||
jmp *0f(%rip)
|
||||
SYM_CODE_END(startup_64)
|
||||
|
||||
__INITRODATA
|
||||
0: .quad common_startup_64
|
||||
|
||||
.text
|
||||
SYM_CODE_START(secondary_startup_64)
|
||||
UNWIND_HINT_END_OF_STACK
|
||||
ANNOTATE_NOENDBR
|
||||
@ -174,8 +185,15 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
addq sme_me_mask(%rip), %rax
|
||||
#endif
|
||||
/*
|
||||
* Switch to the init_top_pgt here, away from the trampoline_pgd and
|
||||
* unmap the identity mapped ranges.
|
||||
*/
|
||||
movq %rax, %cr3
|
||||
|
||||
1:
|
||||
SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
|
||||
UNWIND_HINT_END_OF_STACK
|
||||
ANNOTATE_NOENDBR
|
||||
|
||||
/*
|
||||
* Create a mask of CR4 bits to preserve. Omit PGE in order to flush
|
||||
@ -204,30 +222,12 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
|
||||
btsl $X86_CR4_PSE_BIT, %ecx
|
||||
movq %rcx, %cr4
|
||||
|
||||
/*
|
||||
* Switch to new page-table
|
||||
*
|
||||
* For the boot CPU this switches to early_top_pgt which still has the
|
||||
* identity mappings present. The secondary CPUs will switch to the
|
||||
* init_top_pgt here, away from the trampoline_pgd and unmap the
|
||||
* identity mapped ranges.
|
||||
*/
|
||||
movq %rax, %cr3
|
||||
|
||||
/*
|
||||
* Set CR4.PGE to re-enable global translations.
|
||||
*/
|
||||
btsl $X86_CR4_PGE_BIT, %ecx
|
||||
movq %rcx, %cr4
|
||||
|
||||
/* Ensure I am executing from virtual addresses */
|
||||
movq $1f, %rax
|
||||
ANNOTATE_RETPOLINE_SAFE
|
||||
jmp *%rax
|
||||
1:
|
||||
UNWIND_HINT_END_OF_STACK
|
||||
ANNOTATE_NOENDBR // above
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* For parallel boot, the APIC ID is read from the APIC, and then
|
||||
|
Loading…
Reference in New Issue
Block a user