2017-07-11 04:00:26 +03:00
/*
* SMP initialisation and IPI support
* Based on arch / arm64 / kernel / smp . c
*
* Copyright ( C ) 2012 ARM Ltd .
* Copyright ( C ) 2015 Regents of the University of California
* Copyright ( C ) 2017 SiFive
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/sched.h>
# include <linux/kernel_stat.h>
# include <linux/notifier.h>
# include <linux/cpu.h>
# include <linux/percpu.h>
# include <linux/delay.h>
# include <linux/err.h>
# include <linux/irq.h>
# include <linux/of.h>
# include <linux/sched/task_stack.h>
2018-10-02 22:15:02 +03:00
# include <linux/sched/mm.h>
2017-07-11 04:00:26 +03:00
# include <asm/irq.h>
# include <asm/mmu_context.h>
# include <asm/tlbflush.h>
# include <asm/sections.h>
# include <asm/sbi.h>
void * __cpu_up_stack_pointer [ NR_CPUS ] ;
void * __cpu_up_task_pointer [ NR_CPUS ] ;
2019-02-22 22:41:35 +03:00
static DECLARE_COMPLETION ( cpu_running ) ;
2017-07-11 04:00:26 +03:00
void __init smp_prepare_boot_cpu ( void )
{
}
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
}
void __init setup_smp ( void )
{
2019-01-18 17:03:08 +03:00
struct device_node * dn ;
2018-10-02 22:15:01 +03:00
int hart ;
bool found_boot_cpu = false ;
2018-10-02 22:15:05 +03:00
int cpuid = 1 ;
2017-07-11 04:00:26 +03:00
2019-01-18 17:03:08 +03:00
for_each_of_cpu_node ( dn ) {
2018-10-02 22:15:00 +03:00
hart = riscv_of_processor_hartid ( dn ) ;
2019-01-07 17:16:35 +03:00
if ( hart < 0 )
2018-10-02 22:15:05 +03:00
continue ;
if ( hart = = cpuid_to_hartid_map ( 0 ) ) {
BUG_ON ( found_boot_cpu ) ;
found_boot_cpu = 1 ;
continue ;
2017-07-11 04:00:26 +03:00
}
2018-10-02 22:15:05 +03:00
cpuid_to_hartid_map ( cpuid ) = hart ;
set_cpu_possible ( cpuid , true ) ;
set_cpu_present ( cpuid , true ) ;
cpuid + + ;
2017-07-11 04:00:26 +03:00
}
2018-10-02 22:15:01 +03:00
BUG_ON ( ! found_boot_cpu ) ;
2017-07-11 04:00:26 +03:00
}
int __cpu_up ( unsigned int cpu , struct task_struct * tidle )
{
2019-02-22 22:41:35 +03:00
int ret = 0 ;
2018-10-02 22:15:05 +03:00
int hartid = cpuid_to_hartid_map ( cpu ) ;
2017-07-11 04:00:26 +03:00
tidle - > thread_info . cpu = cpu ;
/*
* On RISC - V systems , all harts boot on their own accord . Our _start
* selects the first hart to boot the kernel and causes the remainder
* of the harts to spin in a loop waiting for their stack pointer to be
* setup by that main hart . Writing __cpu_up_stack_pointer signals to
* the spinning harts that they can continue the boot process .
*/
smp_mb ( ) ;
2018-10-02 22:15:05 +03:00
WRITE_ONCE ( __cpu_up_stack_pointer [ hartid ] ,
2018-10-02 22:15:03 +03:00
task_stack_page ( tidle ) + THREAD_SIZE ) ;
2018-10-02 22:15:05 +03:00
WRITE_ONCE ( __cpu_up_task_pointer [ hartid ] , tidle ) ;
2017-07-11 04:00:26 +03:00
2019-02-22 22:41:35 +03:00
lockdep_assert_held ( & cpu_running ) ;
wait_for_completion_timeout ( & cpu_running ,
msecs_to_jiffies ( 1000 ) ) ;
2017-07-11 04:00:26 +03:00
2019-02-22 22:41:35 +03:00
if ( ! cpu_online ( cpu ) ) {
pr_crit ( " CPU%u: failed to come online \n " , cpu ) ;
ret = - EIO ;
}
return ret ;
2017-07-11 04:00:26 +03:00
}
void __init smp_cpus_done ( unsigned int max_cpus )
{
}
/*
* C entry point for a secondary processor .
*/
asmlinkage void __init smp_callin ( void )
{
struct mm_struct * mm = & init_mm ;
/* All kernel threads share the same mm context. */
2018-10-02 22:15:02 +03:00
mmgrab ( mm ) ;
2017-07-11 04:00:26 +03:00
current - > active_mm = mm ;
trap_init ( ) ;
notify_cpu_starting ( smp_processor_id ( ) ) ;
set_cpu_online ( smp_processor_id ( ) , 1 ) ;
2018-10-02 22:14:57 +03:00
/*
* Remote TLB flushes are ignored while the CPU is offline , so emit
* a local TLB flush right now just in case .
*/
2017-07-11 04:00:26 +03:00
local_flush_tlb_all ( ) ;
2019-02-22 22:41:35 +03:00
complete ( & cpu_running ) ;
2018-10-02 22:14:58 +03:00
/*
* Disable preemption before enabling interrupts , so we don ' t try to
* schedule a CPU that hasn ' t actually started yet .
*/
2017-07-11 04:00:26 +03:00
preempt_disable ( ) ;
2018-10-02 22:14:58 +03:00
local_irq_enable ( ) ;
2017-07-11 04:00:26 +03:00
cpu_startup_entry ( CPUHP_AP_ONLINE_IDLE ) ;
}