2019-06-03 08:44:50 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2017-07-11 04:00:26 +03:00
/*
* SMP initialisation and IPI support
* Based on arch / arm64 / kernel / smp . c
*
* Copyright ( C ) 2012 ARM Ltd .
* Copyright ( C ) 2015 Regents of the University of California
* Copyright ( C ) 2017 SiFive
*/
2019-10-18 01:21:28 +03:00
# include <linux/cpu.h>
2021-03-07 05:24:46 +03:00
# include <linux/clockchips.h>
2017-07-11 04:00:26 +03:00
# include <linux/interrupt.h>
2020-04-24 07:59:26 +03:00
# include <linux/module.h>
2019-10-18 01:21:28 +03:00
# include <linux/profile.h>
2017-07-11 04:00:26 +03:00
# include <linux/smp.h>
# include <linux/sched.h>
2018-10-02 22:15:07 +03:00
# include <linux/seq_file.h>
2018-12-11 13:20:40 +03:00
# include <linux/delay.h>
2020-06-24 12:03:15 +03:00
# include <linux/irq_work.h>
2017-07-11 04:00:26 +03:00
# include <asm/sbi.h>
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
enum ipi_message_type {
IPI_RESCHEDULE ,
IPI_CALL_FUNC ,
2018-12-11 13:20:40 +03:00
IPI_CPU_STOP ,
2020-06-24 12:03:15 +03:00
IPI_IRQ_WORK ,
2021-03-07 05:24:46 +03:00
IPI_TIMER ,
2017-07-11 04:00:26 +03:00
IPI_MAX
} ;
2021-03-29 21:22:51 +03:00
unsigned long __cpuid_to_hartid_map [ NR_CPUS ] __ro_after_init = {
2019-02-22 22:41:36 +03:00
[ 0 . . . NR_CPUS - 1 ] = INVALID_HARTID
} ;
void __init smp_setup_processor_id ( void )
{
2019-04-25 00:48:01 +03:00
cpuid_to_hartid_map ( 0 ) = boot_cpu_hartid ;
2019-02-22 22:41:36 +03:00
}
2018-10-02 22:15:07 +03:00
/* A collection of single bit ipi messages. */
static struct {
unsigned long stats [ IPI_MAX ] ____cacheline_aligned ;
unsigned long bits ____cacheline_aligned ;
} ipi_data [ NR_CPUS ] __cacheline_aligned ;
2018-10-02 22:15:04 +03:00
int riscv_hartid_to_cpuid ( int hartid )
{
2019-04-25 00:48:01 +03:00
int i ;
2018-10-02 22:15:04 +03:00
for ( i = 0 ; i < NR_CPUS ; i + + )
if ( cpuid_to_hartid_map ( i ) = = hartid )
return i ;
pr_err ( " Couldn't find cpu id for hartid [%d] \n " , hartid ) ;
2021-04-15 11:55:22 +03:00
return - ENOENT ;
2018-10-02 22:15:04 +03:00
}
2017-11-30 04:55:17 +03:00
2019-04-25 00:47:58 +03:00
bool arch_match_cpu_phys_id ( int cpu , u64 phys_id )
{
return phys_id = = cpuid_to_hartid_map ( cpu ) ;
}
2017-11-30 04:55:17 +03:00
/* Unsupported */
int setup_profiling_timer ( unsigned int multiplier )
{
return - EINVAL ;
}
2018-12-11 13:20:40 +03:00
static void ipi_stop ( void )
{
set_cpu_online ( smp_processor_id ( ) , false ) ;
while ( 1 )
wait_for_interrupt ( ) ;
}
2021-03-29 21:23:54 +03:00
static const struct riscv_ipi_ops * ipi_ops __ro_after_init ;
2020-08-17 15:42:48 +03:00
2021-03-29 21:23:54 +03:00
void riscv_set_ipi_ops ( const struct riscv_ipi_ops * ops )
2020-08-17 15:42:48 +03:00
{
ipi_ops = ops ;
}
EXPORT_SYMBOL_GPL ( riscv_set_ipi_ops ) ;
void riscv_clear_ipi ( void )
{
if ( ipi_ops & & ipi_ops - > ipi_clear )
ipi_ops - > ipi_clear ( ) ;
csr_clear ( CSR_IP , IE_SIE ) ;
}
EXPORT_SYMBOL_GPL ( riscv_clear_ipi ) ;
2019-08-21 17:58:32 +03:00
static void send_ipi_mask ( const struct cpumask * mask , enum ipi_message_type op )
{
2019-08-21 17:58:33 +03:00
int cpu ;
smp_mb__before_atomic ( ) ;
for_each_cpu ( cpu , mask )
set_bit ( op , & ipi_data [ cpu ] . bits ) ;
smp_mb__after_atomic ( ) ;
2019-08-21 17:58:32 +03:00
2020-08-17 15:42:48 +03:00
if ( ipi_ops & & ipi_ops - > ipi_inject )
ipi_ops - > ipi_inject ( mask ) ;
2019-10-28 15:10:38 +03:00
else
2020-08-17 15:42:48 +03:00
pr_warn ( " SMP: IPI inject method not available \n " ) ;
2019-08-21 17:58:32 +03:00
}
static void send_ipi_single ( int cpu , enum ipi_message_type op )
{
2019-08-21 17:58:34 +03:00
smp_mb__before_atomic ( ) ;
set_bit ( op , & ipi_data [ cpu ] . bits ) ;
smp_mb__after_atomic ( ) ;
2020-08-17 15:42:48 +03:00
if ( ipi_ops & & ipi_ops - > ipi_inject )
ipi_ops - > ipi_inject ( cpumask_of ( cpu ) ) ;
2019-10-28 15:10:38 +03:00
else
2020-08-17 15:42:48 +03:00
pr_warn ( " SMP: IPI inject method not available \n " ) ;
2019-08-21 17:58:32 +03:00
}
2020-06-24 12:03:15 +03:00
# ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise ( void )
{
send_ipi_single ( smp_processor_id ( ) , IPI_IRQ_WORK ) ;
}
# endif
2020-06-01 12:15:38 +03:00
void handle_IPI ( struct pt_regs * regs )
2017-07-11 04:00:26 +03:00
{
unsigned long * pending_ipis = & ipi_data [ smp_processor_id ( ) ] . bits ;
2018-10-02 22:15:07 +03:00
unsigned long * stats = ipi_data [ smp_processor_id ( ) ] . stats ;
2017-07-11 04:00:26 +03:00
2020-08-17 15:42:48 +03:00
riscv_clear_ipi ( ) ;
2017-07-11 04:00:26 +03:00
while ( true ) {
unsigned long ops ;
/* Order bit clearing and data access. */
mb ( ) ;
ops = xchg ( pending_ipis , 0 ) ;
if ( ops = = 0 )
2021-10-20 13:33:49 +03:00
return ;
2017-07-11 04:00:26 +03:00
2018-10-02 22:15:07 +03:00
if ( ops & ( 1 < < IPI_RESCHEDULE ) ) {
stats [ IPI_RESCHEDULE ] + + ;
2017-07-11 04:00:26 +03:00
scheduler_ipi ( ) ;
2018-10-02 22:15:07 +03:00
}
2017-07-11 04:00:26 +03:00
2018-10-02 22:15:07 +03:00
if ( ops & ( 1 < < IPI_CALL_FUNC ) ) {
stats [ IPI_CALL_FUNC ] + + ;
2017-07-11 04:00:26 +03:00
generic_smp_call_function_interrupt ( ) ;
2018-10-02 22:15:07 +03:00
}
2017-07-11 04:00:26 +03:00
2018-12-11 13:20:40 +03:00
if ( ops & ( 1 < < IPI_CPU_STOP ) ) {
stats [ IPI_CPU_STOP ] + + ;
ipi_stop ( ) ;
}
2020-06-24 12:03:15 +03:00
if ( ops & ( 1 < < IPI_IRQ_WORK ) ) {
stats [ IPI_IRQ_WORK ] + + ;
irq_work_run ( ) ;
}
2021-03-07 05:24:46 +03:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
if ( ops & ( 1 < < IPI_TIMER ) ) {
stats [ IPI_TIMER ] + + ;
tick_receive_broadcast ( ) ;
}
# endif
2017-07-11 04:00:26 +03:00
BUG_ON ( ( ops > > IPI_MAX ) ! = 0 ) ;
/* Order data access and bit testing. */
mb ( ) ;
}
}
2018-10-02 22:15:07 +03:00
static const char * const ipi_names [ ] = {
[ IPI_RESCHEDULE ] = " Rescheduling interrupts " ,
[ IPI_CALL_FUNC ] = " Function call interrupts " ,
2018-12-11 13:20:40 +03:00
[ IPI_CPU_STOP ] = " CPU stop interrupts " ,
2020-06-24 12:03:15 +03:00
[ IPI_IRQ_WORK ] = " IRQ work interrupts " ,
2021-03-07 05:24:46 +03:00
[ IPI_TIMER ] = " Timer broadcast interrupts " ,
2018-10-02 22:15:07 +03:00
} ;
void show_ipi_stats ( struct seq_file * p , int prec )
{
unsigned int cpu , i ;
for ( i = 0 ; i < IPI_MAX ; i + + ) {
seq_printf ( p , " %*s%u:%s " , prec - 1 , " IPI " , i ,
prec > = 4 ? " " : " " ) ;
for_each_online_cpu ( cpu )
seq_printf ( p , " %10lu " , ipi_data [ cpu ] . stats [ i ] ) ;
seq_printf ( p , " %s \n " , ipi_names [ i ] ) ;
}
}
2017-07-11 04:00:26 +03:00
void arch_send_call_function_ipi_mask ( struct cpumask * mask )
{
2019-08-21 17:58:32 +03:00
send_ipi_mask ( mask , IPI_CALL_FUNC ) ;
2017-07-11 04:00:26 +03:00
}
void arch_send_call_function_single_ipi ( int cpu )
{
2019-08-21 17:58:32 +03:00
send_ipi_single ( cpu , IPI_CALL_FUNC ) ;
2017-07-11 04:00:26 +03:00
}
2021-03-07 05:24:46 +03:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast ( const struct cpumask * mask )
{
send_ipi_mask ( mask , IPI_TIMER ) ;
}
# endif
2017-07-11 04:00:26 +03:00
void smp_send_stop ( void )
{
2018-12-11 13:20:40 +03:00
unsigned long timeout ;
if ( num_online_cpus ( ) > 1 ) {
cpumask_t mask ;
cpumask_copy ( & mask , cpu_online_mask ) ;
cpumask_clear_cpu ( smp_processor_id ( ) , & mask ) ;
if ( system_state < = SYSTEM_RUNNING )
pr_crit ( " SMP: stopping secondary CPUs \n " ) ;
2019-08-21 17:58:32 +03:00
send_ipi_mask ( & mask , IPI_CPU_STOP ) ;
2018-12-11 13:20:40 +03:00
}
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC ;
while ( num_online_cpus ( ) > 1 & & timeout - - )
udelay ( 1 ) ;
if ( num_online_cpus ( ) > 1 )
pr_warn ( " SMP: failed to stop secondary CPUs %*pbl \n " ,
cpumask_pr_args ( cpu_online_mask ) ) ;
2017-07-11 04:00:26 +03:00
}
void smp_send_reschedule ( int cpu )
{
2019-08-21 17:58:32 +03:00
send_ipi_single ( cpu , IPI_RESCHEDULE ) ;
2017-07-11 04:00:26 +03:00
}
2019-09-04 19:14:06 +03:00
EXPORT_SYMBOL_GPL ( smp_send_reschedule ) ;