2019-05-27 09:55:21 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2017-07-11 04:00:26 +03:00
/*
* SMP initialisation and IPI support
* Based on arch / arm64 / kernel / smp . c
*
* Copyright ( C ) 2012 ARM Ltd .
* Copyright ( C ) 2015 Regents of the University of California
* Copyright ( C ) 2017 SiFive
*/
2019-06-27 22:53:00 +03:00
# include <linux/arch_topology.h>
2017-07-11 04:00:26 +03:00
# include <linux/module.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/sched.h>
# include <linux/kernel_stat.h>
# include <linux/notifier.h>
# include <linux/cpu.h>
# include <linux/percpu.h>
# include <linux/delay.h>
# include <linux/err.h>
# include <linux/irq.h>
# include <linux/of.h>
# include <linux/sched/task_stack.h>
2018-10-02 22:15:02 +03:00
# include <linux/sched/mm.h>
2020-03-18 04:11:40 +03:00
# include <asm/cpu_ops.h>
2017-07-11 04:00:26 +03:00
# include <asm/irq.h>
# include <asm/mmu_context.h>
# include <asm/tlbflush.h>
# include <asm/sections.h>
# include <asm/sbi.h>
2019-10-18 01:21:28 +03:00
# include <asm/smp.h>
2017-07-11 04:00:26 +03:00
2019-10-18 01:00:17 +03:00
# include "head.h"
2019-02-22 22:41:35 +03:00
static DECLARE_COMPLETION ( cpu_running ) ;
2017-07-11 04:00:26 +03:00
void __init smp_prepare_boot_cpu ( void )
{
2019-06-27 22:53:00 +03:00
init_cpu_topology ( ) ;
2017-07-11 04:00:26 +03:00
}
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
2019-04-25 00:47:59 +03:00
int cpuid ;
2020-03-18 04:11:40 +03:00
int ret ;
2019-04-25 00:47:59 +03:00
/* This covers non-smp usecase mandated by "nosmp" option */
if ( max_cpus = = 0 )
return ;
for_each_possible_cpu ( cpuid ) {
if ( cpuid = = smp_processor_id ( ) )
continue ;
2020-03-18 04:11:40 +03:00
if ( cpu_ops [ cpuid ] - > cpu_prepare ) {
ret = cpu_ops [ cpuid ] - > cpu_prepare ( cpuid ) ;
if ( ret )
continue ;
}
2019-04-25 00:47:59 +03:00
set_cpu_present ( cpuid , true ) ;
}
2017-07-11 04:00:26 +03:00
}
void __init setup_smp ( void )
{
2019-01-18 17:03:08 +03:00
struct device_node * dn ;
2018-10-02 22:15:01 +03:00
int hart ;
bool found_boot_cpu = false ;
2018-10-02 22:15:05 +03:00
int cpuid = 1 ;
2017-07-11 04:00:26 +03:00
2020-03-18 04:11:40 +03:00
cpu_set_ops ( 0 ) ;
2019-01-18 17:03:08 +03:00
for_each_of_cpu_node ( dn ) {
2018-10-02 22:15:00 +03:00
hart = riscv_of_processor_hartid ( dn ) ;
2019-01-07 17:16:35 +03:00
if ( hart < 0 )
2018-10-02 22:15:05 +03:00
continue ;
if ( hart = = cpuid_to_hartid_map ( 0 ) ) {
BUG_ON ( found_boot_cpu ) ;
found_boot_cpu = 1 ;
continue ;
2017-07-11 04:00:26 +03:00
}
2019-02-22 22:41:39 +03:00
if ( cpuid > = NR_CPUS ) {
pr_warn ( " Invalid cpuid [%d] for hartid [%d] \n " ,
cpuid , hart ) ;
break ;
}
2018-10-02 22:15:05 +03:00
cpuid_to_hartid_map ( cpuid ) = hart ;
cpuid + + ;
2017-07-11 04:00:26 +03:00
}
2018-10-02 22:15:01 +03:00
BUG_ON ( ! found_boot_cpu ) ;
2019-04-25 00:48:00 +03:00
if ( cpuid > nr_cpu_ids )
pr_warn ( " Total number of cpus [%d] is greater than nr_cpus option value [%d] \n " ,
cpuid , nr_cpu_ids ) ;
for ( cpuid = 1 ; cpuid < nr_cpu_ids ; cpuid + + ) {
2020-03-18 04:11:40 +03:00
if ( cpuid_to_hartid_map ( cpuid ) ! = INVALID_HARTID ) {
cpu_set_ops ( cpuid ) ;
2019-04-25 00:48:00 +03:00
set_cpu_possible ( cpuid , true ) ;
2020-03-18 04:11:40 +03:00
}
2019-04-25 00:48:00 +03:00
}
2017-07-11 04:00:26 +03:00
}
2020-07-30 03:25:35 +03:00
static int start_secondary_cpu ( int cpu , struct task_struct * tidle )
2020-03-18 04:11:40 +03:00
{
if ( cpu_ops [ cpu ] - > cpu_start )
return cpu_ops [ cpu ] - > cpu_start ( cpu , tidle ) ;
return - EOPNOTSUPP ;
}
2017-07-11 04:00:26 +03:00
int __cpu_up ( unsigned int cpu , struct task_struct * tidle )
{
2019-02-22 22:41:35 +03:00
int ret = 0 ;
2017-07-11 04:00:26 +03:00
tidle - > thread_info . cpu = cpu ;
2020-03-18 04:11:40 +03:00
ret = start_secondary_cpu ( cpu , tidle ) ;
if ( ! ret ) {
wait_for_completion_timeout ( & cpu_running ,
2019-02-22 22:41:35 +03:00
msecs_to_jiffies ( 1000 ) ) ;
2017-07-11 04:00:26 +03:00
2020-03-18 04:11:40 +03:00
if ( ! cpu_online ( cpu ) ) {
pr_crit ( " CPU%u: failed to come online \n " , cpu ) ;
ret = - EIO ;
}
} else {
pr_crit ( " CPU%u: failed to start \n " , cpu ) ;
2019-02-22 22:41:35 +03:00
}
return ret ;
2017-07-11 04:00:26 +03:00
}
void __init smp_cpus_done ( unsigned int max_cpus )
{
}
/*
* C entry point for a secondary processor .
*/
2020-03-18 04:11:43 +03:00
asmlinkage __visible void smp_callin ( void )
2017-07-11 04:00:26 +03:00
{
struct mm_struct * mm = & init_mm ;
2020-06-23 02:47:25 +03:00
unsigned int curr_cpuid = smp_processor_id ( ) ;
2017-07-11 04:00:26 +03:00
2020-08-17 15:42:48 +03:00
riscv_clear_ipi ( ) ;
2019-10-28 15:10:38 +03:00
2017-07-11 04:00:26 +03:00
/* All kernel threads share the same mm context. */
2018-10-02 22:15:02 +03:00
mmgrab ( mm ) ;
2017-07-11 04:00:26 +03:00
current - > active_mm = mm ;
2020-06-23 02:47:25 +03:00
notify_cpu_starting ( curr_cpuid ) ;
update_siblings_masks ( curr_cpuid ) ;
set_cpu_online ( curr_cpuid , 1 ) ;
2020-07-16 02:30:06 +03:00
2018-10-02 22:14:57 +03:00
/*
* Remote TLB flushes are ignored while the CPU is offline , so emit
* a local TLB flush right now just in case .
*/
2017-07-11 04:00:26 +03:00
local_flush_tlb_all ( ) ;
2019-02-22 22:41:35 +03:00
complete ( & cpu_running ) ;
2018-10-02 22:14:58 +03:00
/*
* Disable preemption before enabling interrupts , so we don ' t try to
* schedule a CPU that hasn ' t actually started yet .
*/
2017-07-11 04:00:26 +03:00
preempt_disable ( ) ;
2018-10-02 22:14:58 +03:00
local_irq_enable ( ) ;
2017-07-11 04:00:26 +03:00
cpu_startup_entry ( CPUHP_AP_ONLINE_IDLE ) ;
}