From 9134b5a4647e46a76dbedb3604f0dd1870e19169 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 9 Jan 2023 18:47:59 +0100 Subject: [PATCH] arm64: Always load shadow stack pointer directly from the task struct commit 2198d07c509f1db4a1185d1f65aaada794c6ea59 upstream. All occurrences of the scs_load macro load the value of the shadow call stack pointer from the task which is current at that point. So instead of taking a task struct register argument in the scs_load macro to specify the task struct to load from, let's always reference the current task directly. This should make it much harder to exploit any instruction sequences reloading the shadow call stack pointer register from memory. Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20230109174800.3286265-2-ardb@kernel.org Signed-off-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/scs.h | 7 ++++--- arch/arm64/kernel/entry.S | 4 ++-- arch/arm64/kernel/head.S | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index eaa2cd92e4c1..7155055a5beb 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -9,15 +9,16 @@ #ifdef CONFIG_SHADOW_CALL_STACK scs_sp .req x18 - .macro scs_load tsk, tmp - ldr scs_sp, [\tsk, #TSK_TI_SCS_SP] + .macro scs_load_current + get_current_task scs_sp + ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP] .endm .macro scs_save tsk, tmp str scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm #else - .macro scs_load tsk, tmp + .macro scs_load_current .endm .macro scs_save tsk, tmp diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index d5bc1dbdd2fd..28d4cdeee5ae 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -221,7 +221,7 @@ alternative_else_nop_endif ptrauth_keys_install_kernel tsk, x20, x22, x23 - scs_load tsk, x20 + scs_load_current .else add x21, sp, #S_FRAME_SIZE get_current_task tsk @@ -1025,7 +1025,7 @@ SYM_FUNC_START(cpu_switch_to) msr sp_el0, x1 ptrauth_keys_install_kernel x1, x8, x9, x10 scs_save x0, x8 - scs_load x1, x8 + scs_load_current ret SYM_FUNC_END(cpu_switch_to) NOKPROBE(cpu_switch_to) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index e1c25fa3b8e6..351ee64c7deb 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -747,7 +747,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched) ldr x2, [x0, #CPU_BOOT_TASK] cbz x2, __secondary_too_slow msr sp_el0, x2 - scs_load x2, x3 + scs_load_current mov x29, #0 mov x30, #0