From a638b0461b58aa3205cd9d5f14d6f703d795b4af Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Thu, 23 May 2024 11:43:23 +0300 Subject: [PATCH 1/4] riscv: prevent pt_regs corruption for secondary idle threads Top of the kernel thread stack should be reserved for pt_regs. However this is not the case for the idle threads of the secondary boot harts. Their stacks overlap with their pt_regs, so both may get corrupted. Similar issue has been fixed for the primary hart, see c7cdd96eca28 ("riscv: prevent stack corruption by reserving task_pt_regs(p) early"). However that fix was not propagated to the secondary harts. The problem has been noticed in some CPU hotplug tests with V enabled. The function smp_callin stored several registers on stack, corrupting top of pt_regs structure including status field. As a result, kernel attempted to save or restore inexistent V context. Fixes: 9a2451f18663 ("RISC-V: Avoid using per cpu array for ordered booting") Fixes: 2875fe056156 ("RISC-V: Add cpu_ops and modify default booting method") Signed-off-by: Sergey Matyukevich Reviewed-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20240523084327.2013211-1-geomatsi@gmail.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/cpu_ops_sbi.c | 2 +- arch/riscv/kernel/cpu_ops_spinwait.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c index 1cc7df740edd..e6fbaaf54956 100644 --- a/arch/riscv/kernel/cpu_ops_sbi.c +++ b/arch/riscv/kernel/cpu_ops_sbi.c @@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle) /* Make sure tidle is updated */ smp_mb(); bdata->task_ptr = tidle; - bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE; + bdata->stack_ptr = task_pt_regs(tidle); /* Make sure boot data is updated */ smp_mb(); hsm_data = __pa(bdata); diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c index 613872b0a21a..24869eb88908 100644 --- a/arch/riscv/kernel/cpu_ops_spinwait.c +++ b/arch/riscv/kernel/cpu_ops_spinwait.c @@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid, /* Make sure tidle is updated */ smp_mb(); - WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], - task_stack_page(tidle) + THREAD_SIZE); + WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle)); WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle); } From 7bed51617401dab2be930b13ed5aacf581f7c8ef Mon Sep 17 00:00:00 2001 From: Nam Cao Date: Sun, 26 May 2024 13:01:04 +0200 Subject: [PATCH 2/4] riscv: enable HAVE_ARCH_HUGE_VMAP for XIP kernel HAVE_ARCH_HUGE_VMAP also works on XIP kernel, so remove its dependency on !XIP_KERNEL. This also fixes a boot problem for XIP kernel introduced by the commit in "Fixes:". This commit used huge page mapping for vmemmap, but huge page vmap was not enabled for XIP kernel. Fixes: ff172d4818ad ("riscv: Use hugepage mappings for vmemmap") Signed-off-by: Nam Cao Cc: Reviewed-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20240526110104.470429-1-namcao@linutronix.de Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index b94176e25be1..0525ee2d63c7 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -106,7 +106,7 @@ config RISCV select HAS_IOPORT if MMU select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP - select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL + select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && 64BIT From 982a7eb97be685d1129c06671aed4c26d6919af4 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Fri, 24 May 2024 11:56:00 -0700 Subject: [PATCH 3/4] Documentation: RISC-V: uabi: Only scalar misaligned loads are supported We're stuck supporting scalar misaligned loads in userspace because they were part of the ISA at the time we froze the uABI. That wasn't the case for vector misaligned accesses, so depending on them unconditionally is a userspace bug. All extant vector hardware traps on these misaligned accesses. Reviewed-by: Conor Dooley Link: https://lore.kernel.org/r/20240524185600.5919-1-palmer@rivosinc.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/uabi.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/arch/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst index 54d199dce78b..2b420bab0527 100644 --- a/Documentation/arch/riscv/uabi.rst +++ b/Documentation/arch/riscv/uabi.rst @@ -65,4 +65,6 @@ the extension, or may have deliberately removed it from the listing. Misaligned accesses ------------------- -Misaligned accesses are supported in userspace, but they may perform poorly. +Misaligned scalar accesses are supported in userspace, but they may perform +poorly. Misaligned vector accesses are only supported if the Zicclsm extension +is supported. From 1d84afaf02524d2558e8ca3ca169be2ef720380b Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Thu, 30 May 2024 16:55:46 +0200 Subject: [PATCH 4/4] riscv: Fix fully ordered LR/SC xchg[8|16]() implementations The fully ordered versions of xchg[8|16]() using LR/SC lack the necessary memory barriers to guarantee the order. Fix this by matching what is already implemented in the fully ordered versions of cmpxchg() using LR/SC. Suggested-by: Andrea Parri Reported-by: Andrea Parri Closes: https://lore.kernel.org/linux-riscv/ZlYbupL5XgzgA0MX@andrea/T/#u Fixes: a8ed2b7a2c13 ("riscv/cmpxchg: Implement xchg for variables of size 1 and 2") Signed-off-by: Alexandre Ghiti Reviewed-by: Andrea Parri Link: https://lore.kernel.org/r/20240530145546.394248-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cmpxchg.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index ddb002ed89de..808b4c78462e 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -10,7 +10,7 @@ #include -#define __arch_xchg_masked(prepend, append, r, p, n) \ +#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ ({ \ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ @@ -25,7 +25,7 @@ "0: lr.w %0, %2\n" \ " and %1, %0, %z4\n" \ " or %1, %1, %z3\n" \ - " sc.w %1, %1, %2\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ " bnez %1, 0b\n" \ append \ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ @@ -46,7 +46,8 @@ : "memory"); \ }) -#define _arch_xchg(ptr, new, sfx, prepend, append) \ +#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \ + sc_append, swap_append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(__ptr)) __new = (new); \ @@ -55,15 +56,15 @@ switch (sizeof(*__ptr)) { \ case 1: \ case 2: \ - __arch_xchg_masked(prepend, append, \ + __arch_xchg_masked(sc_sfx, prepend, sc_append, \ __ret, __ptr, __new); \ break; \ case 4: \ - __arch_xchg(".w" sfx, prepend, append, \ + __arch_xchg(".w" swap_sfx, prepend, swap_append, \ __ret, __ptr, __new); \ break; \ case 8: \ - __arch_xchg(".d" sfx, prepend, append, \ + __arch_xchg(".d" swap_sfx, prepend, swap_append, \ __ret, __ptr, __new); \ break; \ default: \ @@ -73,16 +74,17 @@ }) #define arch_xchg_relaxed(ptr, x) \ - _arch_xchg(ptr, x, "", "", "") + _arch_xchg(ptr, x, "", "", "", "", "") #define arch_xchg_acquire(ptr, x) \ - _arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER) + _arch_xchg(ptr, x, "", "", "", \ + RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER) #define arch_xchg_release(ptr, x) \ - _arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "") + _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "") #define arch_xchg(ptr, x) \ - _arch_xchg(ptr, x, ".aqrl", "", "") + _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "") #define xchg32(ptr, x) \ ({ \