2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-16 15:20:36 -07:00
/*
* * SMP Support
* *
* * Copyright ( C ) 1999 Walt Drummond < drummond @ valinux . com >
* * Copyright ( C ) 1999 David Mosberger - Tang < davidm @ hpl . hp . com >
* * Copyright ( C ) 2001 , 2004 Grant Grundler < grundler @ parisc - linux . org >
* *
* * Lots of stuff stolen from arch / alpha / kernel / smp . c
* * . . . and then parisc stole from arch / ia64 / kernel / smp . c . Thanks David ! : ^ )
* *
2007-05-11 20:42:34 +01:00
* * Thanks to John Curry and Ullas Ponnadi . I learned a lot from their work .
2005-04-16 15:20:36 -07:00
* * - grant ( 1 / 12 / 2001 )
* *
*/
# include <linux/types.h>
# include <linux/spinlock.h>
# include <linux/kernel.h>
# include <linux/module.h>
2017-02-01 19:08:20 +01:00
# include <linux/sched/mm.h>
2005-04-16 15:20:36 -07:00
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/smp.h>
# include <linux/kernel_stat.h>
# include <linux/mm.h>
2007-07-30 02:36:13 +04:00
# include <linux/err.h>
2005-04-16 15:20:36 -07:00
# include <linux/delay.h>
# include <linux/bitops.h>
2009-02-09 00:43:36 +01:00
# include <linux/ftrace.h>
2012-05-16 00:32:17 +05:30
# include <linux/cpu.h>
2021-10-15 21:49:23 +02:00
# include <linux/kgdb.h>
2022-03-25 14:31:08 +01:00
# include <linux/sched/hotplug.h>
2005-04-16 15:20:36 -07:00
2011-07-26 16:09:06 -07:00
# include <linux/atomic.h>
2005-04-16 15:20:36 -07:00
# include <asm/current.h>
# include <asm/delay.h>
2006-01-10 20:47:49 -05:00
# include <asm/tlbflush.h>
2005-04-16 15:20:36 -07:00
# include <asm/io.h>
# include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
# include <asm/mmu_context.h>
# include <asm/page.h>
# include <asm/processor.h>
# include <asm/ptrace.h>
# include <asm/unistd.h>
# include <asm/cacheflush.h>
2007-01-15 12:23:03 -05:00
# undef DEBUG_SMP
# ifdef DEBUG_SMP
static int smp_debug_lvl = 0 ;
# define smp_debug(lvl, printargs...) \
if ( lvl > = smp_debug_lvl ) \
printk ( printargs ) ;
# else
2008-12-31 03:12:10 +00:00
# define smp_debug(lvl, ...) do { } while(0)
2007-01-15 12:23:03 -05:00
# endif /* DEBUG_SMP */
2005-04-16 15:20:36 -07:00
volatile struct task_struct * smp_init_current_idle_task ;
2008-12-31 03:12:10 +00:00
/* track which CPU is booting */
2013-06-17 15:43:14 -04:00
static volatile int cpu_now_booting ;
2005-04-16 15:20:36 -07:00
2009-11-06 22:41:51 +00:00
static DEFINE_PER_CPU ( spinlock_t , ipi_lock ) ;
2005-04-16 15:20:36 -07:00
enum ipi_message_type {
IPI_NOP = 0 ,
IPI_RESCHEDULE = 1 ,
IPI_CALL_FUNC ,
IPI_CPU_START ,
IPI_CPU_STOP ,
2021-10-15 21:49:23 +02:00
IPI_CPU_TEST ,
# ifdef CONFIG_KGDB
IPI_ENTER_KGDB ,
# endif
2005-04-16 15:20:36 -07:00
} ;
/********** SMP inter processor interrupt and communication routines */
# undef PER_CPU_IRQ_REGION
# ifdef PER_CPU_IRQ_REGION
/* XXX REVISIT Ignore for now.
* * * May * need this " hook " to register IPI handler
* * once we have perCPU ExtIntr switch tables .
*/
static void
ipi_init ( int cpuid )
{
# error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
if ( cpu_online ( cpuid ) )
{
switch_to_idle_task ( current ) ;
}
return ;
}
# endif
/*
* * Yoink this CPU from the runnable list . . .
* *
*/
static void
halt_processor ( void )
{
/* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */
2009-03-16 14:19:38 +10:30
set_cpu_online ( smp_processor_id ( ) , false ) ;
2005-04-16 15:20:36 -07:00
local_irq_disable ( ) ;
2019-09-08 11:33:03 +02:00
__pdc_cpu_rendezvous ( ) ;
2005-04-16 15:20:36 -07:00
for ( ; ; )
;
}
2009-02-09 00:43:36 +01:00
irqreturn_t __irq_entry
2006-10-07 06:01:11 -06:00
ipi_interrupt ( int irq , void * dev_id )
2005-04-16 15:20:36 -07:00
{
int this_cpu = smp_processor_id ( ) ;
2008-12-31 03:12:10 +00:00
struct cpuinfo_parisc * p = & per_cpu ( cpu_data , this_cpu ) ;
2005-04-16 15:20:36 -07:00
unsigned long ops ;
unsigned long flags ;
for ( ; ; ) {
2006-12-07 23:52:27 -05:00
spinlock_t * lock = & per_cpu ( ipi_lock , this_cpu ) ;
spin_lock_irqsave ( lock , flags ) ;
2005-04-16 15:20:36 -07:00
ops = p - > pending_ipi ;
p - > pending_ipi = 0 ;
2006-12-07 23:52:27 -05:00
spin_unlock_irqrestore ( lock , flags ) ;
2005-04-16 15:20:36 -07:00
mb ( ) ; /* Order bit clearing and data access. */
if ( ! ops )
break ;
while ( ops ) {
unsigned long which = ffz ( ~ ops ) ;
2005-11-17 16:27:02 -05:00
ops & = ~ ( 1 < < which ) ;
2005-04-16 15:20:36 -07:00
switch ( which ) {
2005-11-17 16:27:02 -05:00
case IPI_NOP :
2007-01-15 12:23:03 -05:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_NOP \n " , this_cpu ) ;
2005-11-17 16:27:02 -05:00
break ;
2005-04-16 15:20:36 -07:00
case IPI_RESCHEDULE :
2007-01-15 12:23:03 -05:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_RESCHEDULE \n " , this_cpu ) ;
2013-05-06 19:20:26 +00:00
inc_irq_stat ( irq_resched_count ) ;
2011-04-05 17:23:39 +02:00
scheduler_ipi ( ) ;
2005-04-16 15:20:36 -07:00
break ;
case IPI_CALL_FUNC :
2007-01-15 12:23:03 -05:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CALL_FUNC \n " , this_cpu ) ;
2019-01-05 20:07:27 +01:00
inc_irq_stat ( irq_call_count ) ;
2008-06-10 20:50:56 +02:00
generic_smp_call_function_interrupt ( ) ;
break ;
2005-04-16 15:20:36 -07:00
case IPI_CPU_START :
2007-01-15 12:23:03 -05:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CPU_START \n " , this_cpu ) ;
2005-04-16 15:20:36 -07:00
break ;
case IPI_CPU_STOP :
2007-01-15 12:23:03 -05:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CPU_STOP \n " , this_cpu ) ;
2005-04-16 15:20:36 -07:00
halt_processor ( ) ;
break ;
case IPI_CPU_TEST :
2007-01-15 12:23:03 -05:00
smp_debug ( 100 , KERN_DEBUG " CPU%d is alive! \n " , this_cpu ) ;
2005-04-16 15:20:36 -07:00
break ;
2021-10-15 21:49:23 +02:00
# ifdef CONFIG_KGDB
case IPI_ENTER_KGDB :
smp_debug ( 100 , KERN_DEBUG " CPU%d ENTER_KGDB \n " , this_cpu ) ;
kgdb_nmicallback ( raw_smp_processor_id ( ) , get_irq_regs ( ) ) ;
break ;
# endif
2005-04-16 15:20:36 -07:00
default :
printk ( KERN_CRIT " Unknown IPI num on CPU%d: %lu \n " ,
this_cpu , which ) ;
return IRQ_NONE ;
} /* Switch */
2020-08-14 15:14:12 +02:00
/* before doing more, let in any pending interrupts */
if ( ops ) {
local_irq_enable ( ) ;
local_irq_disable ( ) ;
}
2005-04-16 15:20:36 -07:00
} /* while (ops) */
}
return IRQ_HANDLED ;
}
static inline void
ipi_send ( int cpu , enum ipi_message_type op )
{
2008-12-31 03:12:10 +00:00
struct cpuinfo_parisc * p = & per_cpu ( cpu_data , cpu ) ;
2006-12-07 23:52:27 -05:00
spinlock_t * lock = & per_cpu ( ipi_lock , cpu ) ;
2005-04-16 15:20:36 -07:00
unsigned long flags ;
2006-12-07 23:52:27 -05:00
spin_lock_irqsave ( lock , flags ) ;
2005-04-16 15:20:36 -07:00
p - > pending_ipi | = 1 < < op ;
2008-12-31 03:12:10 +00:00
gsc_writel ( IPI_IRQ - CPU_IRQ_BASE , p - > hpa ) ;
2006-12-07 23:52:27 -05:00
spin_unlock_irqrestore ( lock , flags ) ;
2005-04-16 15:20:36 -07:00
}
2008-06-10 20:50:56 +02:00
static void
2009-03-16 14:19:37 +10:30
send_IPI_mask ( const struct cpumask * mask , enum ipi_message_type op )
2008-06-10 20:50:56 +02:00
{
int cpu ;
2009-03-16 14:19:37 +10:30
for_each_cpu ( cpu , mask )
2008-06-10 20:50:56 +02:00
ipi_send ( cpu , op ) ;
}
2005-04-16 15:20:36 -07:00
static inline void
send_IPI_single ( int dest_cpu , enum ipi_message_type op )
{
2008-12-31 03:11:31 +00:00
BUG_ON ( dest_cpu = = NO_PROC_ID ) ;
2005-04-16 15:20:36 -07:00
ipi_send ( dest_cpu , op ) ;
}
static inline void
send_IPI_allbutself ( enum ipi_message_type op )
{
int i ;
2021-10-09 20:24:38 +02:00
preempt_disable ( ) ;
2006-03-23 03:01:05 -08:00
for_each_online_cpu ( i ) {
if ( i ! = smp_processor_id ( ) )
2005-04-16 15:20:36 -07:00
send_IPI_single ( i , op ) ;
}
2021-10-09 20:24:38 +02:00
preempt_enable ( ) ;
2005-04-16 15:20:36 -07:00
}
2021-10-15 21:49:23 +02:00
# ifdef CONFIG_KGDB
void kgdb_roundup_cpus ( void )
{
send_IPI_allbutself ( IPI_ENTER_KGDB ) ;
}
# endif
2005-04-16 15:20:36 -07:00
inline void
smp_send_stop ( void ) { send_IPI_allbutself ( IPI_CPU_STOP ) ; }
2023-03-07 14:35:56 +00:00
void
arch_smp_send_reschedule ( int cpu ) { send_IPI_single ( cpu , IPI_RESCHEDULE ) ; }
2005-04-16 15:20:36 -07:00
2005-11-17 16:27:02 -05:00
void
smp_send_all_nop ( void )
{
send_IPI_allbutself ( IPI_NOP ) ;
}
2009-03-16 14:19:37 +10:30
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
2005-04-16 15:20:36 -07:00
{
2008-06-10 20:50:56 +02:00
send_IPI_mask ( mask , IPI_CALL_FUNC ) ;
2005-04-16 15:20:36 -07:00
}
2008-06-10 20:50:56 +02:00
void arch_send_call_function_single_ipi ( int cpu )
{
2013-09-12 00:07:18 +08:00
send_IPI_single ( cpu , IPI_CALL_FUNC ) ;
2008-06-10 20:50:56 +02:00
}
2005-04-16 15:20:36 -07:00
/*
* Called by secondaries to update state and initialize CPU registers .
*/
2022-03-25 14:31:08 +01:00
static void
2005-04-16 15:20:36 -07:00
smp_cpu_init ( int cpunum )
{
/* Set modes and Enable floating point coprocessor */
2017-09-21 21:22:27 +02:00
init_per_cpu ( cpunum ) ;
2005-04-16 15:20:36 -07:00
disable_sr_hashing ( ) ;
mb ( ) ;
/* Well, support 2.4 linux scheme as well. */
2012-02-15 15:28:04 +10:30
if ( cpu_online ( cpunum ) ) {
2005-04-16 15:20:36 -07:00
extern void machine_halt ( void ) ; /* arch/parisc.../process.c */
printk ( KERN_CRIT " CPU#%d already initialized! \n " , cpunum ) ;
machine_halt ( ) ;
2012-05-16 00:32:17 +05:30
}
notify_cpu_starting ( cpunum ) ;
2009-03-16 14:19:38 +10:30
set_cpu_online ( cpunum , true ) ;
2005-04-16 15:20:36 -07:00
/* Initialise the idle task for this CPU */
2017-02-27 14:30:07 -08:00
mmgrab ( & init_mm ) ;
2005-04-16 15:20:36 -07:00
current - > active_mm = & init_mm ;
2008-12-31 03:11:31 +00:00
BUG_ON ( current - > mm ) ;
2005-04-16 15:20:36 -07:00
enter_lazy_tlb ( & init_mm , current ) ;
2007-05-11 20:42:34 +01:00
init_IRQ ( ) ; /* make sure no IRQs are enabled or pending */
2006-09-03 00:02:16 -07:00
start_cpu_itimer ( ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Slaves start using C here . Indirectly called from smp_slave_stext .
* Do what start_kernel ( ) and main ( ) do for boot strap processor ( aka monarch )
*/
2022-03-25 14:31:08 +01:00
void smp_callin ( unsigned long pdce_proc )
2005-04-16 15:20:36 -07:00
{
int slave_id = cpu_now_booting ;
2018-01-12 22:51:22 +01:00
# ifdef CONFIG_64BIT
WARN_ON ( ( ( unsigned long ) ( PAGE0 - > mem_pdc_hi ) < < 32
| PAGE0 - > mem_pdc ) ! = pdce_proc ) ;
# endif
2005-04-16 15:20:36 -07:00
smp_cpu_init ( slave_id ) ;
flush_cache_all_local ( ) ; /* start with known state */
2006-01-10 20:47:49 -05:00
flush_tlb_all_local ( NULL ) ;
2005-04-16 15:20:36 -07:00
local_irq_enable ( ) ; /* Interrupts have been off until now */
2016-02-26 18:43:40 +00:00
cpu_startup_entry ( CPUHP_AP_ONLINE_IDLE ) ;
2005-04-16 15:20:36 -07:00
/* NOTREACHED */
panic ( " smp_callin() AAAAaaaaahhhh.... \n " ) ;
}
/*
* Bring one cpu online .
*/
2022-03-25 14:31:08 +01:00
static int smp_boot_one_cpu ( int cpuid , struct task_struct * idle )
2005-04-16 15:20:36 -07:00
{
2008-12-31 03:12:10 +00:00
const struct cpuinfo_parisc * p = & per_cpu ( cpu_data , cpuid ) ;
2005-04-16 15:20:36 -07:00
long timeout ;
2022-03-25 14:31:08 +01:00
# ifdef CONFIG_HOTPLUG_CPU
int i ;
/* reset irq statistics for this CPU */
memset ( & per_cpu ( irq_stat , cpuid ) , 0 , sizeof ( irq_cpustat_t ) ) ;
for ( i = 0 ; i < NR_IRQS ; i + + ) {
struct irq_desc * desc = irq_to_desc ( i ) ;
if ( desc & & desc - > kstat_irqs )
* per_cpu_ptr ( desc - > kstat_irqs , cpuid ) = 0 ;
}
# endif
/* wait until last booting CPU has started. */
while ( cpu_now_booting )
;
2005-04-16 15:20:36 -07:00
/* Let _start know what logical CPU we're booting
* * ( offset into init_tasks [ ] , cpu_data [ ] )
*/
cpu_now_booting = cpuid ;
/*
* * boot strap code needs to know the task address since
* * it also contains the process stack .
*/
smp_init_current_idle_task = idle ;
mb ( ) ;
2008-12-31 03:12:10 +00:00
printk ( KERN_INFO " Releasing cpu %d now, hpa=%lx \n " , cpuid , p - > hpa ) ;
2005-04-16 15:20:36 -07:00
/*
* * This gets PDC to release the CPU from a very tight loop .
* *
* * From the PA - RISC 2.0 Firmware Architecture Reference Specification :
* * " The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
* * is executed after receiving the rendezvous signal ( an interrupt to
* * EIR { 0 } ) . MEM_RENDEZ is valid only when it is nonzero and the
* * contents of memory are valid . "
*/
2008-12-31 03:12:10 +00:00
gsc_writel ( TIMER_IRQ - CPU_IRQ_BASE , p - > hpa ) ;
2005-04-16 15:20:36 -07:00
mb ( ) ;
/*
* OK , wait a bit for that CPU to finish staggering about .
* Slave will set a bit when it reaches smp_cpu_init ( ) .
* Once the " monarch CPU " sees the bit change , it can move on .
*/
for ( timeout = 0 ; timeout < 10000 ; timeout + + ) {
if ( cpu_online ( cpuid ) ) {
/* Which implies Slave has started up */
cpu_now_booting = 0 ;
goto alive ;
}
udelay ( 100 ) ;
barrier ( ) ;
}
printk ( KERN_CRIT " SMP: CPU:%d is stuck. \n " , cpuid ) ;
return - 1 ;
alive :
/* Remember the Slave data */
2007-01-15 12:23:03 -05:00
smp_debug ( 100 , KERN_DEBUG " SMP: CPU:%d came alive after %ld _us \n " ,
2005-04-16 15:20:36 -07:00
cpuid , timeout * 100 ) ;
return 0 ;
}
2008-12-31 03:12:10 +00:00
void __init smp_prepare_boot_cpu ( void )
2005-04-16 15:20:36 -07:00
{
parisc: simplify smp_prepare_boot_cpu()
smp_prepare_boot_cpu() reads the cpuid of the first CPU, printing a
message to state which processor booted, and setting it online and
present.
This cpuid is retrieved from per_cpu(cpu_data, 0).cpuid, which is
initialised in arch/parisc/kernel/processor.c:processor_probe() thusly:
p = &per_cpu(cpu_data, cpuid);
...
p->cpuid = cpuid; /* save CPU id */
Consequently, the cpuid retrieved seems to be guaranteed to also be
zero, meaning that the message printed in this boils down to:
pr_info("SMP: bootstrap CPU ID is 0\n");
Moreover, since kernel/cpu.c::boot_cpu_init() already sets CPU 0 to
be present and online, there is no need to do this again in
smp_prepare_boot_cpu().
Remove this code, and simplify the printk().
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Signed-off-by: Helge Deller <deller@gmx.de>
2023-10-20 15:45:30 +01:00
pr_info ( " SMP: bootstrap CPU ID is 0 \n " ) ;
2005-04-16 15:20:36 -07:00
}
/*
* * inventory . c : do_inventory ( ) hasn ' t yet been run and thus we
2007-05-11 20:42:34 +01:00
* * don ' t ' discover ' the additional CPUs until later .
2005-04-16 15:20:36 -07:00
*/
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
2009-11-06 22:41:51 +00:00
int cpu ;
for_each_possible_cpu ( cpu )
spin_lock_init ( & per_cpu ( ipi_lock , cpu ) ) ;
2009-03-16 14:19:38 +10:30
init_cpu_present ( cpumask_of ( 0 ) ) ;
2005-04-16 15:20:36 -07:00
}
2022-03-25 14:31:08 +01:00
void __init smp_cpus_done ( unsigned int cpu_max )
2005-04-16 15:20:36 -07:00
{
}
2013-06-17 15:43:14 -04:00
int __cpu_up ( unsigned int cpu , struct task_struct * tidle )
2005-04-16 15:20:36 -07:00
{
2022-03-25 14:31:08 +01:00
if ( cpu_online ( cpu ) )
return 0 ;
2023-09-19 15:26:35 +02:00
if ( num_online_cpus ( ) < nr_cpu_ids & &
num_online_cpus ( ) < setup_max_cpus & &
smp_boot_one_cpu ( cpu , tidle ) )
2022-03-25 14:31:08 +01:00
return - EIO ;
return cpu_online ( cpu ) ? 0 : - EIO ;
}
/*
* __cpu_disable runs on the processor to be shutdown .
*/
int __cpu_disable ( void )
{
# ifdef CONFIG_HOTPLUG_CPU
unsigned int cpu = smp_processor_id ( ) ;
remove_cpu_topology ( cpu ) ;
/*
* Take this CPU offline . Once we clear this , we can ' t return ,
* and we must not schedule until we ' re ready to give up the cpu .
*/
set_cpu_online ( cpu , false ) ;
2022-03-27 15:03:53 +02:00
/* Find a new timesync master */
if ( cpu = = time_keeper_id ) {
time_keeper_id = cpumask_first ( cpu_online_mask ) ;
pr_info ( " CPU %d is now promoted to time-keeper master \n " , time_keeper_id ) ;
}
2022-03-25 14:31:08 +01:00
disable_percpu_irq ( IPI_IRQ ) ;
irq_migrate_all_off_this_cpu ( ) ;
flush_cache_all_local ( ) ;
flush_tlb_all_local ( NULL ) ;
/* disable all irqs, including timer irq */
local_irq_disable ( ) ;
/* wait for next timer irq ... */
mdelay ( 1000 / HZ + 100 ) ;
/* ... and then clear all pending external irqs */
set_eiem ( 0 ) ;
mtctl ( ~ 0UL , CR_EIRR ) ;
mfctl ( CR_EIRR ) ;
mtctl ( 0 , CR_EIRR ) ;
# endif
return 0 ;
}
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed , or it is timed out .
*/
void __cpu_die ( unsigned int cpu )
{
pdc_cpu_rendezvous_lock ( ) ;
2023-05-12 23:07:38 +02:00
}
2022-03-25 14:31:08 +01:00
2023-05-12 23:07:38 +02:00
void arch_cpuhp_cleanup_dead_cpu ( unsigned int cpu )
{
2022-03-25 14:31:08 +01:00
pr_info ( " CPU%u: is shutting down \n " , cpu ) ;
/* set task's state to interruptible sleep */
set_current_state ( TASK_INTERRUPTIBLE ) ;
schedule_timeout ( ( IS_ENABLED ( CONFIG_64BIT ) ? 8 : 2 ) * HZ ) ;
2005-04-16 15:20:36 -07:00
2022-03-25 14:31:08 +01:00
pdc_cpu_rendezvous_unlock ( ) ;
2005-04-16 15:20:36 -07:00
}