2005-04-17 02:20:36 +04:00
/*
* * SMP Support
* *
* * Copyright ( C ) 1999 Walt Drummond < drummond @ valinux . com >
* * Copyright ( C ) 1999 David Mosberger - Tang < davidm @ hpl . hp . com >
* * Copyright ( C ) 2001 , 2004 Grant Grundler < grundler @ parisc - linux . org >
* *
* * Lots of stuff stolen from arch / alpha / kernel / smp . c
* * . . . and then parisc stole from arch / ia64 / kernel / smp . c . Thanks David ! : ^ )
* *
2007-05-11 23:42:34 +04:00
* * Thanks to John Curry and Ullas Ponnadi . I learned a lot from their work .
2005-04-17 02:20:36 +04:00
* * - grant ( 1 / 12 / 2001 )
* *
* * This program is free software ; you can redistribute it and / or modify
* * it under the terms of the GNU General Public License as published by
* * the Free Software Foundation ; either version 2 of the License , or
* * ( at your option ) any later version .
*/
# include <linux/types.h>
# include <linux/spinlock.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/smp.h>
# include <linux/kernel_stat.h>
# include <linux/mm.h>
2007-07-30 02:36:13 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/delay.h>
# include <linux/bitops.h>
2009-02-09 02:43:36 +03:00
# include <linux/ftrace.h>
2005-04-17 02:20:36 +04:00
# include <asm/system.h>
# include <asm/atomic.h>
# include <asm/current.h>
# include <asm/delay.h>
2006-01-11 04:47:49 +03:00
# include <asm/tlbflush.h>
2005-04-17 02:20:36 +04:00
# include <asm/io.h>
# include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
# include <asm/mmu_context.h>
# include <asm/page.h>
# include <asm/pgtable.h>
# include <asm/pgalloc.h>
# include <asm/processor.h>
# include <asm/ptrace.h>
# include <asm/unistd.h>
# include <asm/cacheflush.h>
2007-01-15 20:23:03 +03:00
# undef DEBUG_SMP
# ifdef DEBUG_SMP
static int smp_debug_lvl = 0 ;
# define smp_debug(lvl, printargs...) \
if ( lvl > = smp_debug_lvl ) \
printk ( printargs ) ;
# else
2008-12-31 06:12:10 +03:00
# define smp_debug(lvl, ...) do { } while(0)
2007-01-15 20:23:03 +03:00
# endif /* DEBUG_SMP */
2005-04-17 02:20:36 +04:00
volatile struct task_struct * smp_init_current_idle_task ;
2008-12-31 06:12:10 +03:00
/* track which CPU is booting */
static volatile int cpu_now_booting __cpuinitdata ;
2005-04-17 02:20:36 +04:00
2008-12-31 06:12:10 +03:00
static int parisc_max_cpus __cpuinitdata = 1 ;
2005-04-17 02:20:36 +04:00
2009-11-07 01:41:51 +03:00
static DEFINE_PER_CPU ( spinlock_t , ipi_lock ) ;
2005-04-17 02:20:36 +04:00
enum ipi_message_type {
IPI_NOP = 0 ,
IPI_RESCHEDULE = 1 ,
IPI_CALL_FUNC ,
2008-06-10 22:50:56 +04:00
IPI_CALL_FUNC_SINGLE ,
2005-04-17 02:20:36 +04:00
IPI_CPU_START ,
IPI_CPU_STOP ,
IPI_CPU_TEST
} ;
/********** SMP inter processor interrupt and communication routines */
# undef PER_CPU_IRQ_REGION
# ifdef PER_CPU_IRQ_REGION
/* XXX REVISIT Ignore for now.
* * * May * need this " hook " to register IPI handler
* * once we have perCPU ExtIntr switch tables .
*/
static void
ipi_init ( int cpuid )
{
# error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
if ( cpu_online ( cpuid ) )
{
switch_to_idle_task ( current ) ;
}
return ;
}
# endif
/*
* * Yoink this CPU from the runnable list . . .
* *
*/
static void
halt_processor ( void )
{
/* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */
2009-03-16 06:49:38 +03:00
set_cpu_online ( smp_processor_id ( ) , false ) ;
2005-04-17 02:20:36 +04:00
local_irq_disable ( ) ;
for ( ; ; )
;
}
2009-02-09 02:43:36 +03:00
irqreturn_t __irq_entry
2006-10-07 16:01:11 +04:00
ipi_interrupt ( int irq , void * dev_id )
2005-04-17 02:20:36 +04:00
{
int this_cpu = smp_processor_id ( ) ;
2008-12-31 06:12:10 +03:00
struct cpuinfo_parisc * p = & per_cpu ( cpu_data , this_cpu ) ;
2005-04-17 02:20:36 +04:00
unsigned long ops ;
unsigned long flags ;
/* Count this now; we may make a call that never returns. */
p - > ipi_count + + ;
mb ( ) ; /* Order interrupt and bit testing. */
for ( ; ; ) {
2006-12-08 07:52:27 +03:00
spinlock_t * lock = & per_cpu ( ipi_lock , this_cpu ) ;
spin_lock_irqsave ( lock , flags ) ;
2005-04-17 02:20:36 +04:00
ops = p - > pending_ipi ;
p - > pending_ipi = 0 ;
2006-12-08 07:52:27 +03:00
spin_unlock_irqrestore ( lock , flags ) ;
2005-04-17 02:20:36 +04:00
mb ( ) ; /* Order bit clearing and data access. */
if ( ! ops )
break ;
while ( ops ) {
unsigned long which = ffz ( ~ ops ) ;
2005-11-18 00:27:02 +03:00
ops & = ~ ( 1 < < which ) ;
2005-04-17 02:20:36 +04:00
switch ( which ) {
2005-11-18 00:27:02 +03:00
case IPI_NOP :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_NOP \n " , this_cpu ) ;
2005-11-18 00:27:02 +03:00
break ;
2005-04-17 02:20:36 +04:00
case IPI_RESCHEDULE :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_RESCHEDULE \n " , this_cpu ) ;
2011-04-05 19:23:39 +04:00
scheduler_ipi ( ) ;
2005-04-17 02:20:36 +04:00
break ;
case IPI_CALL_FUNC :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CALL_FUNC \n " , this_cpu ) ;
2008-06-10 22:50:56 +04:00
generic_smp_call_function_interrupt ( ) ;
break ;
case IPI_CALL_FUNC_SINGLE :
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CALL_FUNC_SINGLE \n " , this_cpu ) ;
generic_smp_call_function_single_interrupt ( ) ;
2005-04-17 02:20:36 +04:00
break ;
case IPI_CPU_START :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CPU_START \n " , this_cpu ) ;
2005-04-17 02:20:36 +04:00
break ;
case IPI_CPU_STOP :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d IPI_CPU_STOP \n " , this_cpu ) ;
2005-04-17 02:20:36 +04:00
halt_processor ( ) ;
break ;
case IPI_CPU_TEST :
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " CPU%d is alive! \n " , this_cpu ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
printk ( KERN_CRIT " Unknown IPI num on CPU%d: %lu \n " ,
this_cpu , which ) ;
return IRQ_NONE ;
} /* Switch */
2006-09-09 23:36:25 +04:00
/* let in any pending interrupts */
local_irq_enable ( ) ;
local_irq_disable ( ) ;
2005-04-17 02:20:36 +04:00
} /* while (ops) */
}
return IRQ_HANDLED ;
}
static inline void
ipi_send ( int cpu , enum ipi_message_type op )
{
2008-12-31 06:12:10 +03:00
struct cpuinfo_parisc * p = & per_cpu ( cpu_data , cpu ) ;
2006-12-08 07:52:27 +03:00
spinlock_t * lock = & per_cpu ( ipi_lock , cpu ) ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
2006-12-08 07:52:27 +03:00
spin_lock_irqsave ( lock , flags ) ;
2005-04-17 02:20:36 +04:00
p - > pending_ipi | = 1 < < op ;
2008-12-31 06:12:10 +03:00
gsc_writel ( IPI_IRQ - CPU_IRQ_BASE , p - > hpa ) ;
2006-12-08 07:52:27 +03:00
spin_unlock_irqrestore ( lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 22:50:56 +04:00
static void
2009-03-16 06:49:37 +03:00
send_IPI_mask ( const struct cpumask * mask , enum ipi_message_type op )
2008-06-10 22:50:56 +04:00
{
int cpu ;
2009-03-16 06:49:37 +03:00
for_each_cpu ( cpu , mask )
2008-06-10 22:50:56 +04:00
ipi_send ( cpu , op ) ;
}
2005-04-17 02:20:36 +04:00
static inline void
send_IPI_single ( int dest_cpu , enum ipi_message_type op )
{
2008-12-31 06:11:31 +03:00
BUG_ON ( dest_cpu = = NO_PROC_ID ) ;
2005-04-17 02:20:36 +04:00
ipi_send ( dest_cpu , op ) ;
}
static inline void
send_IPI_allbutself ( enum ipi_message_type op )
{
int i ;
2006-03-23 14:01:05 +03:00
for_each_online_cpu ( i ) {
if ( i ! = smp_processor_id ( ) )
2005-04-17 02:20:36 +04:00
send_IPI_single ( i , op ) ;
}
}
inline void
smp_send_stop ( void ) { send_IPI_allbutself ( IPI_CPU_STOP ) ; }
static inline void
smp_send_start ( void ) { send_IPI_allbutself ( IPI_CPU_START ) ; }
void
smp_send_reschedule ( int cpu ) { send_IPI_single ( cpu , IPI_RESCHEDULE ) ; }
2005-11-18 00:27:02 +03:00
void
smp_send_all_nop ( void )
{
send_IPI_allbutself ( IPI_NOP ) ;
}
2009-03-16 06:49:37 +03:00
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
2005-04-17 02:20:36 +04:00
{
2008-06-10 22:50:56 +04:00
send_IPI_mask ( mask , IPI_CALL_FUNC ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 22:50:56 +04:00
void arch_send_call_function_single_ipi ( int cpu )
{
send_IPI_single ( cpu , IPI_CALL_FUNC_SINGLE ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Flush all other CPU ' s tlb and then mine . Do this with on_each_cpu ( )
* as we want to ensure all TLB ' s flushed before proceeding .
*/
void
smp_flush_tlb_all ( void )
{
2008-05-09 11:39:44 +04:00
on_each_cpu ( flush_tlb_all_local , NULL , 1 ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Called by secondaries to update state and initialize CPU registers .
*/
static void __init
smp_cpu_init ( int cpunum )
{
2006-09-03 11:02:16 +04:00
extern int init_per_cpu ( int ) ; /* arch/parisc/kernel/processor.c */
2005-04-17 02:20:36 +04:00
extern void init_IRQ ( void ) ; /* arch/parisc/kernel/irq.c */
2006-09-03 11:02:16 +04:00
extern void start_cpu_itimer ( void ) ; /* arch/parisc/kernel/time.c */
2005-04-17 02:20:36 +04:00
/* Set modes and Enable floating point coprocessor */
( void ) init_per_cpu ( cpunum ) ;
disable_sr_hashing ( ) ;
mb ( ) ;
/* Well, support 2.4 linux scheme as well. */
2009-03-16 06:49:38 +03:00
if ( cpu_isset ( cpunum , cpu_online_map ) )
2005-04-17 02:20:36 +04:00
{
extern void machine_halt ( void ) ; /* arch/parisc.../process.c */
printk ( KERN_CRIT " CPU#%d already initialized! \n " , cpunum ) ;
machine_halt ( ) ;
}
2009-03-16 06:49:38 +03:00
set_cpu_online ( cpunum , true ) ;
2005-04-17 02:20:36 +04:00
/* Initialise the idle task for this CPU */
atomic_inc ( & init_mm . mm_count ) ;
current - > active_mm = & init_mm ;
2008-12-31 06:11:31 +03:00
BUG_ON ( current - > mm ) ;
2005-04-17 02:20:36 +04:00
enter_lazy_tlb ( & init_mm , current ) ;
2007-05-11 23:42:34 +04:00
init_IRQ ( ) ; /* make sure no IRQs are enabled or pending */
2006-09-03 11:02:16 +04:00
start_cpu_itimer ( ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Slaves start using C here . Indirectly called from smp_slave_stext .
* Do what start_kernel ( ) and main ( ) do for boot strap processor ( aka monarch )
*/
void __init smp_callin ( void )
{
int slave_id = cpu_now_booting ;
smp_cpu_init ( slave_id ) ;
2005-11-09 08:39:01 +03:00
preempt_disable ( ) ;
2005-04-17 02:20:36 +04:00
flush_cache_all_local ( ) ; /* start with known state */
2006-01-11 04:47:49 +03:00
flush_tlb_all_local ( NULL ) ;
2005-04-17 02:20:36 +04:00
local_irq_enable ( ) ; /* Interrupts have been off until now */
cpu_idle ( ) ; /* Wait for timer to schedule some work */
/* NOTREACHED */
panic ( " smp_callin() AAAAaaaaahhhh.... \n " ) ;
}
/*
* Bring one cpu online .
*/
2007-05-28 02:26:02 +04:00
int __cpuinit smp_boot_one_cpu ( int cpuid )
2005-04-17 02:20:36 +04:00
{
2008-12-31 06:12:10 +03:00
const struct cpuinfo_parisc * p = & per_cpu ( cpu_data , cpuid ) ;
2005-04-17 02:20:36 +04:00
struct task_struct * idle ;
long timeout ;
/*
* Create an idle task for this CPU . Note the address wed * give
* to kernel_thread is irrelevant - - it ' s going to start
* where OS_BOOT_RENDEVZ vector in SAL says to start . But
* this gets all the other task - y sort of data structures set
* up like we wish . We need to pull the just created idle task
* off the run queue and stuff it into the init_tasks [ ] array .
* Sheesh . . .
*/
idle = fork_idle ( cpuid ) ;
if ( IS_ERR ( idle ) )
panic ( " SMP: fork failed for CPU:%d " , cpuid ) ;
2006-01-12 12:05:55 +03:00
task_thread_info ( idle ) - > cpu = cpuid ;
2005-04-17 02:20:36 +04:00
/* Let _start know what logical CPU we're booting
* * ( offset into init_tasks [ ] , cpu_data [ ] )
*/
cpu_now_booting = cpuid ;
/*
* * boot strap code needs to know the task address since
* * it also contains the process stack .
*/
smp_init_current_idle_task = idle ;
mb ( ) ;
2008-12-31 06:12:10 +03:00
printk ( KERN_INFO " Releasing cpu %d now, hpa=%lx \n " , cpuid , p - > hpa ) ;
2005-04-17 02:20:36 +04:00
/*
* * This gets PDC to release the CPU from a very tight loop .
* *
* * From the PA - RISC 2.0 Firmware Architecture Reference Specification :
* * " The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
* * is executed after receiving the rendezvous signal ( an interrupt to
* * EIR { 0 } ) . MEM_RENDEZ is valid only when it is nonzero and the
* * contents of memory are valid . "
*/
2008-12-31 06:12:10 +03:00
gsc_writel ( TIMER_IRQ - CPU_IRQ_BASE , p - > hpa ) ;
2005-04-17 02:20:36 +04:00
mb ( ) ;
/*
* OK , wait a bit for that CPU to finish staggering about .
* Slave will set a bit when it reaches smp_cpu_init ( ) .
* Once the " monarch CPU " sees the bit change , it can move on .
*/
for ( timeout = 0 ; timeout < 10000 ; timeout + + ) {
if ( cpu_online ( cpuid ) ) {
/* Which implies Slave has started up */
cpu_now_booting = 0 ;
smp_init_current_idle_task = NULL ;
goto alive ;
}
udelay ( 100 ) ;
barrier ( ) ;
}
put_task_struct ( idle ) ;
idle = NULL ;
printk ( KERN_CRIT " SMP: CPU:%d is stuck. \n " , cpuid ) ;
return - 1 ;
alive :
/* Remember the Slave data */
2007-01-15 20:23:03 +03:00
smp_debug ( 100 , KERN_DEBUG " SMP: CPU:%d came alive after %ld _us \n " ,
2005-04-17 02:20:36 +04:00
cpuid , timeout * 100 ) ;
return 0 ;
}
2008-12-31 06:12:10 +03:00
void __init smp_prepare_boot_cpu ( void )
2005-04-17 02:20:36 +04:00
{
2008-12-31 06:12:10 +03:00
int bootstrap_processor = per_cpu ( cpu_data , 0 ) . cpuid ;
2005-04-17 02:20:36 +04:00
/* Setup BSP mappings */
2008-12-31 06:12:10 +03:00
printk ( KERN_INFO " SMP: bootstrap CPU ID is %d \n " , bootstrap_processor ) ;
2005-04-17 02:20:36 +04:00
2009-03-16 06:49:38 +03:00
set_cpu_online ( bootstrap_processor , true ) ;
set_cpu_present ( bootstrap_processor , true ) ;
2005-04-17 02:20:36 +04:00
}
/*
* * inventory . c : do_inventory ( ) hasn ' t yet been run and thus we
2007-05-11 23:42:34 +04:00
* * don ' t ' discover ' the additional CPUs until later .
2005-04-17 02:20:36 +04:00
*/
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
2009-11-07 01:41:51 +03:00
int cpu ;
for_each_possible_cpu ( cpu )
spin_lock_init ( & per_cpu ( ipi_lock , cpu ) ) ;
2009-03-16 06:49:38 +03:00
init_cpu_present ( cpumask_of ( 0 ) ) ;
2005-04-17 02:20:36 +04:00
parisc_max_cpus = max_cpus ;
if ( ! max_cpus )
printk ( KERN_INFO " SMP mode deactivated. \n " ) ;
}
void smp_cpus_done ( unsigned int cpu_max )
{
return ;
}
2007-01-11 10:15:34 +03:00
int __cpuinit __cpu_up ( unsigned int cpu )
2005-04-17 02:20:36 +04:00
{
if ( cpu ! = 0 & & cpu < parisc_max_cpus )
smp_boot_one_cpu ( cpu ) ;
return cpu_online ( cpu ) ? 0 : - ENOSYS ;
}
# ifdef CONFIG_PROC_FS
int __init
setup_profiling_timer ( unsigned int multiplier )
{
return - EINVAL ;
}
# endif