2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-17 02:20:36 +04:00
/*
* * SMP Support
* *
* * Copyright ( C ) 1999 Walt Drummond < drummond @ valinux . com >
* * Copyright ( C ) 1999 David Mosberger - Tang < davidm @ hpl . hp . com >
* * Copyright ( C ) 2001 , 2004 Grant Grundler < grundler @ parisc - linux . org >
* *
* * Lots of stuff stolen from arch / alpha / kernel / smp . c
* * . . . and then parisc stole from arch / ia64 / kernel / smp . c . Thanks David ! : ^ )
* *
2007-05-11 23:42:34 +04:00
* * Thanks to John Curry and Ullas Ponnadi . I learned a lot from their work .
2005-04-17 02:20:36 +04:00
* * - grant ( 1 / 12 / 2001 )
* *
*/
# include <linux/types.h>
# include <linux/spinlock.h>
# include <linux/kernel.h>
# include <linux/module.h>
2017-02-01 21:08:20 +03:00
# include <linux/sched/mm.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/smp.h>
# include <linux/kernel_stat.h>
# include <linux/mm.h>
2007-07-30 02:36:13 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/delay.h>
# include <linux/bitops.h>
2009-02-09 02:43:36 +03:00
# include <linux/ftrace.h>
2012-05-15 23:02:17 +04:00
# include <linux/cpu.h>
2005-04-17 02:20:36 +04:00
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2005-04-17 02:20:36 +04:00
# include <asm/current.h>
# include <asm/delay.h>
2006-01-11 04:47:49 +03:00
# include <asm/tlbflush.h>
2005-04-17 02:20:36 +04:00
# include <asm/io.h>
# include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
# include <asm/mmu_context.h>
# include <asm/page.h>
# include <asm/processor.h>
# include <asm/ptrace.h>
# include <asm/unistd.h>
# include <asm/cacheflush.h>
2007-01-15 20:23:03 +03:00
# undef DEBUG_SMP
# ifdef DEBUG_SMP
static int smp_debug_lvl = 0 ;
# define smp_debug(lvl, printargs...) \
if ( lvl > = smp_debug_lvl ) \
printk ( printargs ) ;
# else
2008-12-31 06:12:10 +03:00
# define smp_debug(lvl, ...) do { } while(0)
2007-01-15 20:23:03 +03:00
# endif /* DEBUG_SMP */
2005-04-17 02:20:36 +04:00
volatile struct task_struct * smp_init_current_idle_task ;
2008-12-31 06:12:10 +03:00
/* track which CPU is booting */
2013-06-17 23:43:14 +04:00
static volatile int cpu_now_booting ;
2005-04-17 02:20:36 +04:00
2013-06-17 23:43:14 +04:00
static int parisc_max_cpus = 1 ;
2005-04-17 02:20:36 +04:00
2009-11-07 01:41:51 +03:00
static DEFINE_PER_CPU ( spinlock_t , ipi_lock ) ;
2005-04-17 02:20:36 +04:00
enum ipi_message_type {
IPI_NOP = 0 ,
IPI_RESCHEDULE = 1 ,
IPI_CALL_FUNC ,
IPI_CPU_START ,
IPI_CPU_STOP ,
IPI_CPU_TEST
} ;
/********** SMP inter processor interrupt and communication routines */
# undef PER_CPU_IRQ_REGION
# ifdef PER_CPU_IRQ_REGION
/* XXX REVISIT Ignore for now.
* * * May * need this " hook " to register IPI handler
* * once we have perCPU ExtIntr switch tables .
*/
static void
ipi_init ( int cpuid )
{
# error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
if ( cpu_online ( cpuid ) )
{
switch_to_idle_task ( current ) ;
}
return ;
}
# endif
/*
* * Yoink this CPU from the runnable list . . .
* *
*/
static void
halt_processor ( void )
{
/* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */
2009-03-16 06:49:38 +03:00
set_cpu_online ( smp_processor_id ( ) , false ) ;
2005-04-17 02:20:36 +04:00
local_irq_disable ( ) ;
2019-09-08 12:33:03 +03:00
__pdc_cpu_rendezvous ( ) ;
2005-04-17 02:20:36 +04:00
for ( ; ; )
;
}
2009-02-09 02:43:36 +03:00
irqreturn_t __irq_entry
2006-10-07 16:01:11 +04:00
ipi_interrupt ( int irq , void * dev_id )
2005-04-17 02:20:36 +04:00
{
int this_cpu = smp_processor_id ( ) ;
2008-12-31 06:12:10 +03:00
struct cpuinfo_parisc * p = & per_cpu ( cpu_data , this_cpu ) ;
2005-04-17 02:20:36 +04:00
unsigned long ops ;
unsigned long flags ;
for ( ; ; ) {
2006-12-08 07:52:27 +03:00
spinlock_t * lock = & per_cpu ( ipi_lock , this_cpu ) ;
spin_lock_irqsave ( lock , flags ) ;
2005-04-17 02:20:36 +04:00
ops = p - > pending_ipi ;
p - > pending_ipi = 0 ;
2006-12-08 07:52:27 +03:00
spin_unlock_irqrestore ( lock , flags ) ;
2005-04-17 02:20:36 +04:00
mb ( ) ; /* Order bit clearing and data access. */
if ( ! ops )
break ;
while ( ops ) {
unsigned long which = ffz ( ~ ops ) ;
2005-11-18 00:27:02 +03:00
ops & = ~ ( 1 < < which ) ;
2005-04-17 02:20:36 +04:00
switch ( which ) {
2005-11-18 00:27:02 +03:00
case IPI_NOP :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_NOP \n " , this_cpu ) ;
2005-11-18 00:27:02 +03:00
break ;
2005-04-17 02:20:36 +04:00
case IPI_RESCHEDULE :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_RESCHEDULE \n " , this_cpu ) ;
2013-05-06 23:20:26 +04:00
inc_irq_stat ( irq_resched_count ) ;
2011-04-05 19:23:39 +04:00
scheduler_ipi ( ) ;
2005-04-17 02:20:36 +04:00
break ;
case IPI_CALL_FUNC :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CALL_FUNC \n " , this_cpu ) ;
2019-01-05 22:07:27 +03:00
inc_irq_stat ( irq_call_count ) ;
2008-06-10 22:50:56 +04:00
generic_smp_call_function_interrupt ( ) ;
break ;
2005-04-17 02:20:36 +04:00
case IPI_CPU_START :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CPU_START \n " , this_cpu ) ;
2005-04-17 02:20:36 +04:00
break ;
case IPI_CPU_STOP :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CPU_STOP \n " , this_cpu ) ;
2005-04-17 02:20:36 +04:00
halt_processor ( ) ;
break ;
case IPI_CPU_TEST :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d is alive! \n " , this_cpu ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
printk ( KERN_CRIT " Unknown IPI num on CPU%d: %lu \n " ,
this_cpu , which ) ;
return IRQ_NONE ;
} /* Switch */
2020-08-14 16:14:12 +03:00
/* before doing more, let in any pending interrupts */
if ( ops ) {
local_irq_enable ( ) ;
local_irq_disable ( ) ;
}
2005-04-17 02:20:36 +04:00
} /* while (ops) */
}
return IRQ_HANDLED ;
}
static inline void
ipi_send ( int cpu , enum ipi_message_type op )
{
2008-12-31 06:12:10 +03:00
struct cpuinfo_parisc * p = & per_cpu ( cpu_data , cpu ) ;
2006-12-08 07:52:27 +03:00
spinlock_t * lock = & per_cpu ( ipi_lock , cpu ) ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
2006-12-08 07:52:27 +03:00
spin_lock_irqsave ( lock , flags ) ;
2005-04-17 02:20:36 +04:00
p - > pending_ipi | = 1 < < op ;
2008-12-31 06:12:10 +03:00
gsc_writel ( IPI_IRQ - CPU_IRQ_BASE , p - > hpa ) ;
2006-12-08 07:52:27 +03:00
spin_unlock_irqrestore ( lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 22:50:56 +04:00
static void
2009-03-16 06:49:37 +03:00
send_IPI_mask ( const struct cpumask * mask , enum ipi_message_type op )
2008-06-10 22:50:56 +04:00
{
int cpu ;
2009-03-16 06:49:37 +03:00
for_each_cpu ( cpu , mask )
2008-06-10 22:50:56 +04:00
ipi_send ( cpu , op ) ;
}
2005-04-17 02:20:36 +04:00
static inline void
send_IPI_single ( int dest_cpu , enum ipi_message_type op )
{
2008-12-31 06:11:31 +03:00
BUG_ON ( dest_cpu = = NO_PROC_ID ) ;
2005-04-17 02:20:36 +04:00
ipi_send ( dest_cpu , op ) ;
}
static inline void
send_IPI_allbutself ( enum ipi_message_type op )
{
int i ;
2006-03-23 14:01:05 +03:00
for_each_online_cpu ( i ) {
if ( i ! = smp_processor_id ( ) )
2005-04-17 02:20:36 +04:00
send_IPI_single ( i , op ) ;
}
}
inline void
smp_send_stop ( void ) { send_IPI_allbutself ( IPI_CPU_STOP ) ; }
void
smp_send_reschedule ( int cpu ) { send_IPI_single ( cpu , IPI_RESCHEDULE ) ; }
2005-11-18 00:27:02 +03:00
void
smp_send_all_nop ( void )
{
send_IPI_allbutself ( IPI_NOP ) ;
}
2009-03-16 06:49:37 +03:00
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
2005-04-17 02:20:36 +04:00
{
2008-06-10 22:50:56 +04:00
send_IPI_mask ( mask , IPI_CALL_FUNC ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 22:50:56 +04:00
void arch_send_call_function_single_ipi ( int cpu )
{
2013-09-11 20:07:18 +04:00
send_IPI_single ( cpu , IPI_CALL_FUNC ) ;
2008-06-10 22:50:56 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* Called by secondaries to update state and initialize CPU registers .
*/
static void __init
smp_cpu_init ( int cpunum )
{
extern void init_IRQ ( void ) ; /* arch/parisc/kernel/irq.c */
2006-09-03 11:02:16 +04:00
extern void start_cpu_itimer ( void ) ; /* arch/parisc/kernel/time.c */
2005-04-17 02:20:36 +04:00
/* Set modes and Enable floating point coprocessor */
2017-09-21 22:22:27 +03:00
init_per_cpu ( cpunum ) ;
2005-04-17 02:20:36 +04:00
disable_sr_hashing ( ) ;
mb ( ) ;
/* Well, support 2.4 linux scheme as well. */
2012-02-15 08:58:04 +04:00
if ( cpu_online ( cpunum ) ) {
2005-04-17 02:20:36 +04:00
extern void machine_halt ( void ) ; /* arch/parisc.../process.c */
printk ( KERN_CRIT " CPU#%d already initialized! \n " , cpunum ) ;
machine_halt ( ) ;
2012-05-15 23:02:17 +04:00
}
notify_cpu_starting ( cpunum ) ;
2009-03-16 06:49:38 +03:00
set_cpu_online ( cpunum , true ) ;
2005-04-17 02:20:36 +04:00
/* Initialise the idle task for this CPU */
2017-02-28 01:30:07 +03:00
mmgrab ( & init_mm ) ;
2005-04-17 02:20:36 +04:00
current - > active_mm = & init_mm ;
2008-12-31 06:11:31 +03:00
BUG_ON ( current - > mm ) ;
2005-04-17 02:20:36 +04:00
enter_lazy_tlb ( & init_mm , current ) ;
2007-05-11 23:42:34 +04:00
init_IRQ ( ) ; /* make sure no IRQs are enabled or pending */
2006-09-03 11:02:16 +04:00
start_cpu_itimer ( ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Slaves start using C here . Indirectly called from smp_slave_stext .
* Do what start_kernel ( ) and main ( ) do for boot strap processor ( aka monarch )
*/
2018-01-13 00:51:22 +03:00
void __init smp_callin ( unsigned long pdce_proc )
2005-04-17 02:20:36 +04:00
{
int slave_id = cpu_now_booting ;
2018-01-13 00:51:22 +03:00
# ifdef CONFIG_64BIT
WARN_ON ( ( ( unsigned long ) ( PAGE0 - > mem_pdc_hi ) < < 32
| PAGE0 - > mem_pdc ) ! = pdce_proc ) ;
# endif
2005-04-17 02:20:36 +04:00
smp_cpu_init ( slave_id ) ;
2005-11-09 08:39:01 +03:00
preempt_disable ( ) ;
2005-04-17 02:20:36 +04:00
flush_cache_all_local ( ) ; /* start with known state */
2006-01-11 04:47:49 +03:00
flush_tlb_all_local ( NULL ) ;
2005-04-17 02:20:36 +04:00
local_irq_enable ( ) ; /* Interrupts have been off until now */
2016-02-26 21:43:40 +03:00
cpu_startup_entry ( CPUHP_AP_ONLINE_IDLE ) ;
2005-04-17 02:20:36 +04:00
/* NOTREACHED */
panic ( " smp_callin() AAAAaaaaahhhh.... \n " ) ;
}
/*
* Bring one cpu online .
*/
2013-06-17 23:43:14 +04:00
int smp_boot_one_cpu ( int cpuid , struct task_struct * idle )
2005-04-17 02:20:36 +04:00
{
2008-12-31 06:12:10 +03:00
const struct cpuinfo_parisc * p = & per_cpu ( cpu_data , cpuid ) ;
2005-04-17 02:20:36 +04:00
long timeout ;
2006-01-12 12:05:55 +03:00
task_thread_info ( idle ) - > cpu = cpuid ;
2005-04-17 02:20:36 +04:00
/* Let _start know what logical CPU we're booting
* * ( offset into init_tasks [ ] , cpu_data [ ] )
*/
cpu_now_booting = cpuid ;
/*
* * boot strap code needs to know the task address since
* * it also contains the process stack .
*/
smp_init_current_idle_task = idle ;
mb ( ) ;
2008-12-31 06:12:10 +03:00
printk ( KERN_INFO " Releasing cpu %d now, hpa=%lx \n " , cpuid , p - > hpa ) ;
2005-04-17 02:20:36 +04:00
/*
* * This gets PDC to release the CPU from a very tight loop .
* *
* * From the PA - RISC 2.0 Firmware Architecture Reference Specification :
* * " The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
* * is executed after receiving the rendezvous signal ( an interrupt to
* * EIR { 0 } ) . MEM_RENDEZ is valid only when it is nonzero and the
* * contents of memory are valid . "
*/
2008-12-31 06:12:10 +03:00
gsc_writel ( TIMER_IRQ - CPU_IRQ_BASE , p - > hpa ) ;
2005-04-17 02:20:36 +04:00
mb ( ) ;
/*
* OK , wait a bit for that CPU to finish staggering about .
* Slave will set a bit when it reaches smp_cpu_init ( ) .
* Once the " monarch CPU " sees the bit change , it can move on .
*/
for ( timeout = 0 ; timeout < 10000 ; timeout + + ) {
if ( cpu_online ( cpuid ) ) {
/* Which implies Slave has started up */
cpu_now_booting = 0 ;
smp_init_current_idle_task = NULL ;
goto alive ;
}
udelay ( 100 ) ;
barrier ( ) ;
}
printk ( KERN_CRIT " SMP: CPU:%d is stuck. \n " , cpuid ) ;
return - 1 ;
alive :
/* Remember the Slave data */
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " SMP: CPU:%d came alive after %ld _us \n " ,
2005-04-17 02:20:36 +04:00
cpuid , timeout * 100 ) ;
return 0 ;
}
2008-12-31 06:12:10 +03:00
void __init smp_prepare_boot_cpu ( void )
2005-04-17 02:20:36 +04:00
{
2008-12-31 06:12:10 +03:00
int bootstrap_processor = per_cpu ( cpu_data , 0 ) . cpuid ;
2005-04-17 02:20:36 +04:00
/* Setup BSP mappings */
2008-12-31 06:12:10 +03:00
printk ( KERN_INFO " SMP: bootstrap CPU ID is %d \n " , bootstrap_processor ) ;
2005-04-17 02:20:36 +04:00
2009-03-16 06:49:38 +03:00
set_cpu_online ( bootstrap_processor , true ) ;
set_cpu_present ( bootstrap_processor , true ) ;
2005-04-17 02:20:36 +04:00
}
/*
* * inventory . c : do_inventory ( ) hasn ' t yet been run and thus we
2007-05-11 23:42:34 +04:00
* * don ' t ' discover ' the additional CPUs until later .
2005-04-17 02:20:36 +04:00
*/
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
2009-11-07 01:41:51 +03:00
int cpu ;
for_each_possible_cpu ( cpu )
spin_lock_init ( & per_cpu ( ipi_lock , cpu ) ) ;
2009-03-16 06:49:38 +03:00
init_cpu_present ( cpumask_of ( 0 ) ) ;
2005-04-17 02:20:36 +04:00
parisc_max_cpus = max_cpus ;
if ( ! max_cpus )
printk ( KERN_INFO " SMP mode deactivated. \n " ) ;
}
void smp_cpus_done ( unsigned int cpu_max )
{
return ;
}
2013-06-17 23:43:14 +04:00
int __cpu_up ( unsigned int cpu , struct task_struct * tidle )
2005-04-17 02:20:36 +04:00
{
2016-09-20 18:29:32 +03:00
if ( cpu ! = 0 & & cpu < parisc_max_cpus & & smp_boot_one_cpu ( cpu , tidle ) )
return - ENOSYS ;
2005-04-17 02:20:36 +04:00
return cpu_online ( cpu ) ? 0 : - ENOSYS ;
}
# ifdef CONFIG_PROC_FS
2018-05-18 17:12:12 +03:00
int setup_profiling_timer ( unsigned int multiplier )
2005-04-17 02:20:36 +04:00
{
return - EINVAL ;
}
# endif