RISC-V: Add supported for ordered booting method using HSM
Currently, all harts have to jump Linux in RISC-V. This complicates the multi-stage boot process as every transient stage also has to ensure all harts enter to that stage and jump to Linux afterwards. It also obstructs a clean Kexec implementation. SBI HSM extension provides alternate solutions where only a single hart need to boot and enter Linux. The booting hart can bring up secondary harts one by one afterwards. Add SBI HSM based cpu_ops that implements an ordered booting method in RISC-V. This change is also backward compatible with older firmware not implementing HSM extension. If a latest kernel is used with older firmware, it will continue to use the default spinning booting method. Signed-off-by: Atish Patra <atish.patra@wdc.com> Reviewed-by: Anup Patel <anup@brainfault.org> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
This commit is contained in:
parent
db5a794603
commit
cfafe26013
@ -46,5 +46,8 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
|
||||
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
|
||||
obj-$(CONFIG_RISCV_SBI) += sbi.o
|
||||
ifeq ($(CONFIG_RISCV_SBI), y)
|
||||
obj-$(CONFIG_SMP) += cpu_ops_sbi.o
|
||||
endif
|
||||
|
||||
clean:
|
||||
|
@ -18,6 +18,7 @@ const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
|
||||
void *__cpu_up_stack_pointer[NR_CPUS];
|
||||
void *__cpu_up_task_pointer[NR_CPUS];
|
||||
|
||||
extern const struct cpu_operations cpu_ops_sbi;
|
||||
extern const struct cpu_operations cpu_ops_spinwait;
|
||||
|
||||
void cpu_update_secondary_bootdata(unsigned int cpuid,
|
||||
@ -34,5 +35,12 @@ void cpu_update_secondary_bootdata(unsigned int cpuid,
|
||||
|
||||
void __init cpu_set_ops(int cpuid)
|
||||
{
|
||||
cpu_ops[cpuid] = &cpu_ops_spinwait;
|
||||
#if IS_ENABLED(CONFIG_RISCV_SBI)
|
||||
if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
|
||||
if (!cpuid)
|
||||
pr_info("SBI v0.2 HSM extension detected\n");
|
||||
cpu_ops[cpuid] = &cpu_ops_sbi;
|
||||
} else
|
||||
#endif
|
||||
cpu_ops[cpuid] = &cpu_ops_spinwait;
|
||||
}
|
||||
|
81
arch/riscv/kernel/cpu_ops_sbi.c
Normal file
81
arch/riscv/kernel/cpu_ops_sbi.c
Normal file
@ -0,0 +1,81 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* HSM extension and cpu_ops implementation.
|
||||
*
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
extern char secondary_start_sbi[];
|
||||
const struct cpu_operations cpu_ops_sbi;
|
||||
|
||||
static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
|
||||
unsigned long priv)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START,
|
||||
hartid, saddr, priv, 0, 0, 0);
|
||||
if (ret.error)
|
||||
return sbi_err_map_linux_errno(ret.error);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int sbi_hsm_hart_stop(void)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STOP, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
if (ret.error)
|
||||
return sbi_err_map_linux_errno(ret.error);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sbi_hsm_hart_get_status(unsigned long hartid)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STATUS,
|
||||
hartid, 0, 0, 0, 0, 0);
|
||||
if (ret.error)
|
||||
return sbi_err_map_linux_errno(ret.error);
|
||||
else
|
||||
return ret.value;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
|
||||
{
|
||||
int rc;
|
||||
unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
|
||||
int hartid = cpuid_to_hartid_map(cpuid);
|
||||
|
||||
cpu_update_secondary_bootdata(cpuid, tidle);
|
||||
rc = sbi_hsm_hart_start(hartid, boot_addr, 0);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int sbi_cpu_prepare(unsigned int cpuid)
|
||||
{
|
||||
if (!cpu_ops_sbi.cpu_start) {
|
||||
pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct cpu_operations cpu_ops_sbi = {
|
||||
.name = "sbi",
|
||||
.cpu_prepare = sbi_cpu_prepare,
|
||||
.cpu_start = sbi_cpu_start,
|
||||
};
|
@ -99,11 +99,37 @@ relocate:
|
||||
ret
|
||||
#endif /* CONFIG_MMU */
|
||||
#ifdef CONFIG_SMP
|
||||
.global secondary_start_sbi
|
||||
secondary_start_sbi:
|
||||
/* Mask all interrupts */
|
||||
csrw CSR_IE, zero
|
||||
csrw CSR_IP, zero
|
||||
|
||||
/* Load the global pointer */
|
||||
.option push
|
||||
.option norelax
|
||||
la gp, __global_pointer$
|
||||
.option pop
|
||||
|
||||
/*
|
||||
* Disable FPU to detect illegal usage of
|
||||
* floating point in kernel space
|
||||
*/
|
||||
li t0, SR_FS
|
||||
csrc CSR_STATUS, t0
|
||||
|
||||
/* Set trap vector to spin forever to help debug */
|
||||
la a3, .Lsecondary_park
|
||||
csrw CSR_TVEC, a3
|
||||
|
||||
slli a3, a0, LGREG
|
||||
la a4, __cpu_up_stack_pointer
|
||||
la a5, __cpu_up_task_pointer
|
||||
add a4, a3, a4
|
||||
add a5, a3, a5
|
||||
REG_L sp, (a4)
|
||||
REG_L tp, (a5)
|
||||
|
||||
.global secondary_start_common
|
||||
secondary_start_common:
|
||||
|
||||
|
@ -143,7 +143,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
/*
|
||||
* C entry point for a secondary processor.
|
||||
*/
|
||||
asmlinkage __visible void __init smp_callin(void)
|
||||
asmlinkage __visible void smp_callin(void)
|
||||
{
|
||||
struct mm_struct *mm = &init_mm;
|
||||
|
||||
|
@ -148,7 +148,7 @@ int is_valid_bugaddr(unsigned long pc)
|
||||
}
|
||||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
void __init trap_init(void)
|
||||
void trap_init(void)
|
||||
{
|
||||
/*
|
||||
* Set sup0 scratch register to 0, indicating to exception vector
|
||||
|
Loading…
Reference in New Issue
Block a user