2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2017-07-10 18:00:26 -07:00
/*
* SMP initialisation and IPI support
* Based on arch / arm64 / kernel / smp . c
*
* Copyright ( C ) 2012 ARM Ltd .
* Copyright ( C ) 2015 Regents of the University of California
* Copyright ( C ) 2017 SiFive
*/
2019-10-17 15:21:28 -07:00
# include <linux/cpu.h>
2021-03-07 10:24:46 +08:00
# include <linux/clockchips.h>
2017-07-10 18:00:26 -07:00
# include <linux/interrupt.h>
2020-04-24 10:29:26 +05:30
# include <linux/module.h>
2022-10-20 10:16:03 -04:00
# include <linux/kexec.h>
2023-03-28 09:22:19 +05:30
# include <linux/percpu.h>
2019-10-17 15:21:28 -07:00
# include <linux/profile.h>
2017-07-10 18:00:26 -07:00
# include <linux/smp.h>
# include <linux/sched.h>
2018-10-02 12:15:07 -07:00
# include <linux/seq_file.h>
2018-12-11 11:20:40 +01:00
# include <linux/delay.h>
2023-03-28 09:22:19 +05:30
# include <linux/irq.h>
2020-06-24 17:03:15 +08:00
# include <linux/irq_work.h>
2017-07-10 18:00:26 -07:00
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
2022-10-20 10:16:03 -04:00
# include <asm/cpu_ops.h>
2017-07-10 18:00:26 -07:00
enum ipi_message_type {
IPI_RESCHEDULE ,
IPI_CALL_FUNC ,
2018-12-11 11:20:40 +01:00
IPI_CPU_STOP ,
2022-10-20 10:16:03 -04:00
IPI_CPU_CRASH_STOP ,
2020-06-24 17:03:15 +08:00
IPI_IRQ_WORK ,
2021-03-07 10:24:46 +08:00
IPI_TIMER ,
2017-07-10 18:00:26 -07:00
IPI_MAX
} ;
2021-03-30 02:22:51 +08:00
unsigned long __cpuid_to_hartid_map [ NR_CPUS ] __ro_after_init = {
2019-02-22 11:41:36 -08:00
[ 0 . . . NR_CPUS - 1 ] = INVALID_HARTID
} ;
void __init smp_setup_processor_id ( void )
{
2019-04-24 14:48:01 -07:00
cpuid_to_hartid_map ( 0 ) = boot_cpu_hartid ;
2019-02-22 11:41:36 -08:00
}
2023-03-28 09:22:19 +05:30
static DEFINE_PER_CPU_READ_MOSTLY ( int , ipi_dummy_dev ) ;
static int ipi_virq_base __ro_after_init ;
static int nr_ipi __ro_after_init = IPI_MAX ;
static struct irq_desc * ipi_desc [ IPI_MAX ] __read_mostly ;
2018-10-02 12:15:07 -07:00
2022-05-27 10:47:41 +05:30
int riscv_hartid_to_cpuid ( unsigned long hartid )
2018-10-02 12:15:04 -07:00
{
2019-04-24 14:48:01 -07:00
int i ;
2018-10-02 12:15:04 -07:00
for ( i = 0 ; i < NR_CPUS ; i + + )
if ( cpuid_to_hartid_map ( i ) = = hartid )
return i ;
2021-04-15 14:25:22 +05:30
return - ENOENT ;
2018-10-02 12:15:04 -07:00
}
2017-11-29 17:55:17 -08:00
2018-12-11 11:20:40 +01:00
static void ipi_stop ( void )
{
set_cpu_online ( smp_processor_id ( ) , false ) ;
while ( 1 )
wait_for_interrupt ( ) ;
}
2022-10-20 10:16:03 -04:00
# ifdef CONFIG_KEXEC_CORE
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT ( 0 ) ;
static inline void ipi_cpu_crash_stop ( unsigned int cpu , struct pt_regs * regs )
{
crash_save_cpu ( regs , cpu ) ;
atomic_dec ( & waiting_for_crash_ipi ) ;
local_irq_disable ( ) ;
# ifdef CONFIG_HOTPLUG_CPU
if ( cpu_has_hotplug ( cpu ) )
cpu_ops [ cpu ] - > cpu_stop ( ) ;
# endif
for ( ; ; )
wait_for_interrupt ( ) ;
}
# else
static inline void ipi_cpu_crash_stop ( unsigned int cpu , struct pt_regs * regs )
{
unreachable ( ) ;
}
# endif
2019-08-21 23:58:32 +09:00
static void send_ipi_mask ( const struct cpumask * mask , enum ipi_message_type op )
{
2023-03-28 09:22:19 +05:30
__ipi_send_mask ( ipi_desc [ op ] , mask ) ;
2019-08-21 23:58:32 +09:00
}
static void send_ipi_single ( int cpu , enum ipi_message_type op )
{
2023-03-28 09:22:19 +05:30
__ipi_send_mask ( ipi_desc [ op ] , cpumask_of ( cpu ) ) ;
2019-08-21 23:58:32 +09:00
}
2020-06-24 17:03:15 +08:00
# ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise ( void )
{
send_ipi_single ( smp_processor_id ( ) , IPI_IRQ_WORK ) ;
}
# endif
2023-03-28 09:22:19 +05:30
static irqreturn_t handle_IPI ( int irq , void * data )
{
int ipi = irq - ipi_virq_base ;
switch ( ipi ) {
case IPI_RESCHEDULE :
scheduler_ipi ( ) ;
break ;
case IPI_CALL_FUNC :
generic_smp_call_function_interrupt ( ) ;
break ;
case IPI_CPU_STOP :
ipi_stop ( ) ;
break ;
case IPI_CPU_CRASH_STOP :
ipi_cpu_crash_stop ( smp_processor_id ( ) , get_irq_regs ( ) ) ;
break ;
case IPI_IRQ_WORK :
irq_work_run ( ) ;
break ;
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER :
tick_receive_broadcast ( ) ;
break ;
# endif
default :
pr_warn ( " CPU%d: unhandled IPI%d \n " , smp_processor_id ( ) , ipi ) ;
break ;
}
return IRQ_HANDLED ;
}
void riscv_ipi_enable ( void )
2017-07-10 18:00:26 -07:00
{
2023-03-28 09:22:19 +05:30
int i ;
2017-07-10 18:00:26 -07:00
2023-03-28 09:22:19 +05:30
if ( WARN_ON_ONCE ( ! ipi_virq_base ) )
return ;
2017-07-10 18:00:26 -07:00
2023-03-28 09:22:19 +05:30
for ( i = 0 ; i < nr_ipi ; i + + )
enable_percpu_irq ( ipi_virq_base + i , 0 ) ;
}
2017-07-10 18:00:26 -07:00
2023-03-28 09:22:19 +05:30
void riscv_ipi_disable ( void )
{
int i ;
2017-07-10 18:00:26 -07:00
2023-03-28 09:22:19 +05:30
if ( WARN_ON_ONCE ( ! ipi_virq_base ) )
return ;
2017-07-10 18:00:26 -07:00
2023-03-28 09:22:19 +05:30
for ( i = 0 ; i < nr_ipi ; i + + )
disable_percpu_irq ( ipi_virq_base + i ) ;
}
2017-07-10 18:00:26 -07:00
2023-03-28 09:22:19 +05:30
bool riscv_ipi_have_virq_range ( void )
{
return ( ipi_virq_base ) ? true : false ;
}
2017-07-10 18:00:26 -07:00
2023-03-28 09:22:20 +05:30
DEFINE_STATIC_KEY_FALSE ( riscv_ipi_for_rfence ) ;
EXPORT_SYMBOL_GPL ( riscv_ipi_for_rfence ) ;
void riscv_ipi_set_virq_range ( int virq , int nr , bool use_for_rfence )
2023-03-28 09:22:19 +05:30
{
int i , err ;
2018-12-11 11:20:40 +01:00
2023-03-28 09:22:19 +05:30
if ( WARN_ON ( ipi_virq_base ) )
return ;
2022-10-20 10:16:03 -04:00
2023-03-28 09:22:19 +05:30
WARN_ON ( nr < IPI_MAX ) ;
nr_ipi = min ( nr , IPI_MAX ) ;
ipi_virq_base = virq ;
2020-06-24 17:03:15 +08:00
2023-03-28 09:22:19 +05:30
/* Request IPIs */
for ( i = 0 ; i < nr_ipi ; i + + ) {
err = request_percpu_irq ( ipi_virq_base + i , handle_IPI ,
" IPI " , & ipi_dummy_dev ) ;
WARN_ON ( err ) ;
2017-07-10 18:00:26 -07:00
2023-03-28 09:22:19 +05:30
ipi_desc [ i ] = irq_to_desc ( ipi_virq_base + i ) ;
irq_set_status_flags ( ipi_virq_base + i , IRQ_HIDDEN ) ;
2017-07-10 18:00:26 -07:00
}
2023-03-28 09:22:19 +05:30
/* Enabled IPIs for boot CPU immediately */
riscv_ipi_enable ( ) ;
2023-03-28 09:22:20 +05:30
/* Update RFENCE static key */
if ( use_for_rfence )
static_branch_enable ( & riscv_ipi_for_rfence ) ;
else
static_branch_disable ( & riscv_ipi_for_rfence ) ;
2017-07-10 18:00:26 -07:00
}
2018-10-02 12:15:07 -07:00
static const char * const ipi_names [ ] = {
[ IPI_RESCHEDULE ] = " Rescheduling interrupts " ,
[ IPI_CALL_FUNC ] = " Function call interrupts " ,
2018-12-11 11:20:40 +01:00
[ IPI_CPU_STOP ] = " CPU stop interrupts " ,
2022-10-20 10:16:03 -04:00
[ IPI_CPU_CRASH_STOP ] = " CPU stop (for crash dump) interrupts " ,
2020-06-24 17:03:15 +08:00
[ IPI_IRQ_WORK ] = " IRQ work interrupts " ,
2021-03-07 10:24:46 +08:00
[ IPI_TIMER ] = " Timer broadcast interrupts " ,
2018-10-02 12:15:07 -07:00
} ;
void show_ipi_stats ( struct seq_file * p , int prec )
{
unsigned int cpu , i ;
for ( i = 0 ; i < IPI_MAX ; i + + ) {
seq_printf ( p , " %*s%u:%s " , prec - 1 , " IPI " , i ,
prec > = 4 ? " " : " " ) ;
for_each_online_cpu ( cpu )
2023-03-28 09:22:19 +05:30
seq_printf ( p , " %10u " , irq_desc_kstat_cpu ( ipi_desc [ i ] , cpu ) ) ;
2018-10-02 12:15:07 -07:00
seq_printf ( p , " %s \n " , ipi_names [ i ] ) ;
}
}
2017-07-10 18:00:26 -07:00
void arch_send_call_function_ipi_mask ( struct cpumask * mask )
{
2019-08-21 23:58:32 +09:00
send_ipi_mask ( mask , IPI_CALL_FUNC ) ;
2017-07-10 18:00:26 -07:00
}
void arch_send_call_function_single_ipi ( int cpu )
{
2019-08-21 23:58:32 +09:00
send_ipi_single ( cpu , IPI_CALL_FUNC ) ;
2017-07-10 18:00:26 -07:00
}
2021-03-07 10:24:46 +08:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast ( const struct cpumask * mask )
{
send_ipi_mask ( mask , IPI_TIMER ) ;
}
# endif
2017-07-10 18:00:26 -07:00
void smp_send_stop ( void )
{
2018-12-11 11:20:40 +01:00
unsigned long timeout ;
if ( num_online_cpus ( ) > 1 ) {
cpumask_t mask ;
cpumask_copy ( & mask , cpu_online_mask ) ;
cpumask_clear_cpu ( smp_processor_id ( ) , & mask ) ;
if ( system_state < = SYSTEM_RUNNING )
pr_crit ( " SMP: stopping secondary CPUs \n " ) ;
2019-08-21 23:58:32 +09:00
send_ipi_mask ( & mask , IPI_CPU_STOP ) ;
2018-12-11 11:20:40 +01:00
}
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC ;
while ( num_online_cpus ( ) > 1 & & timeout - - )
udelay ( 1 ) ;
if ( num_online_cpus ( ) > 1 )
pr_warn ( " SMP: failed to stop secondary CPUs %*pbl \n " ,
cpumask_pr_args ( cpu_online_mask ) ) ;
2017-07-10 18:00:26 -07:00
}
2022-10-20 10:16:03 -04:00
# ifdef CONFIG_KEXEC_CORE
/*
* The number of CPUs online , not counting this CPU ( which may not be
* fully online and so not counted in num_online_cpus ( ) ) .
*/
static inline unsigned int num_other_online_cpus ( void )
{
unsigned int this_cpu_online = cpu_online ( smp_processor_id ( ) ) ;
return num_online_cpus ( ) - this_cpu_online ;
}
void crash_smp_send_stop ( void )
{
static int cpus_stopped ;
cpumask_t mask ;
unsigned long timeout ;
/*
* This function can be called twice in panic path , but obviously
* we execute this only once .
*/
if ( cpus_stopped )
return ;
cpus_stopped = 1 ;
/*
* If this cpu is the only one alive at this point in time , online or
* not , there are no stop messages to be sent around , so just back out .
*/
if ( num_other_online_cpus ( ) = = 0 )
return ;
cpumask_copy ( & mask , cpu_online_mask ) ;
cpumask_clear_cpu ( smp_processor_id ( ) , & mask ) ;
atomic_set ( & waiting_for_crash_ipi , num_other_online_cpus ( ) ) ;
pr_crit ( " SMP: stopping secondary CPUs \n " ) ;
send_ipi_mask ( & mask , IPI_CPU_CRASH_STOP ) ;
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC ;
while ( ( atomic_read ( & waiting_for_crash_ipi ) > 0 ) & & timeout - - )
udelay ( 1 ) ;
if ( atomic_read ( & waiting_for_crash_ipi ) > 0 )
pr_warn ( " SMP: failed to stop secondary CPUs %*pbl \n " ,
cpumask_pr_args ( & mask ) ) ;
}
bool smp_crash_stop_failed ( void )
{
return ( atomic_read ( & waiting_for_crash_ipi ) > 0 ) ;
}
# endif
2023-03-07 14:35:56 +00:00
void arch_smp_send_reschedule ( int cpu )
2017-07-10 18:00:26 -07:00
{
2019-08-21 23:58:32 +09:00
send_ipi_single ( cpu , IPI_RESCHEDULE ) ;
2017-07-10 18:00:26 -07:00
}
2023-03-07 14:35:56 +00:00
EXPORT_SYMBOL_GPL ( arch_smp_send_reschedule ) ;