2011-01-29 01:08:21 +03:00
/* Sparc SS1000/SC2000 SMP support.
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 1998 Jakub Jelinek ( jj @ sunsite . mff . cuni . cz )
*
* Based on sun4m ' s smp . c , which is :
* Copyright ( C ) 1996 David S . Miller ( davem @ caip . rutgers . edu )
*/
2012-04-04 23:49:26 +04:00
# include <linux/clockchips.h>
2005-04-17 02:20:36 +04:00
# include <linux/interrupt.h>
# include <linux/profile.h>
2008-02-06 12:37:51 +03:00
# include <linux/delay.h>
2012-05-14 07:49:31 +04:00
# include <linux/sched.h>
2008-10-13 07:52:26 +04:00
# include <linux/cpu.h>
2005-04-17 02:20:36 +04:00
2012-04-04 23:49:26 +04:00
# include <asm/cacheflush.h>
# include <asm/switch_to.h>
# include <asm/tlbflush.h>
# include <asm/timer.h>
2012-05-14 07:49:31 +04:00
# include <asm/oplib.h>
2005-04-17 02:20:36 +04:00
# include <asm/sbi.h>
2011-01-29 01:08:21 +03:00
# include <asm/mmu.h>
2005-04-17 02:20:36 +04:00
2011-01-29 01:08:21 +03:00
# include "kernel.h"
2007-07-22 06:18:57 +04:00
# include "irq.h"
2005-04-17 02:20:36 +04:00
2011-01-29 01:08:21 +03:00
# define IRQ_CROSS_CALL 15
2005-04-17 02:20:36 +04:00
2011-01-29 01:08:21 +03:00
static volatile int smp_processors_ready ;
2005-04-17 02:20:36 +04:00
static int smp_highest_cpu ;
2009-01-09 03:47:17 +03:00
static inline unsigned long sun4d_swap ( volatile unsigned long * ptr , unsigned long val )
2005-04-17 02:20:36 +04:00
{
__asm__ __volatile__ ( " swap [%1], %0 \n \t " :
" =&r " ( val ) , " =&r " ( ptr ) :
" 0 " ( val ) , " 1 " ( ptr ) ) ;
return val ;
}
2011-05-02 04:08:54 +04:00
static void smp4d_ipi_init ( void ) ;
2005-04-17 02:20:36 +04:00
2008-09-02 12:17:41 +04:00
static unsigned char cpu_leds [ 32 ] ;
static inline void show_leds ( int cpuid )
{
cpuid & = 0x1e ;
__asm__ __volatile__ ( " stba %0, [%1] %2 " : :
" r " ( ( cpu_leds [ cpuid ] < < 4 ) | cpu_leds [ cpuid + 1 ] ) ,
" r " ( ECSR_BASE ( cpuid ) | BB_LEDS ) ,
" i " ( ASI_M_CTL ) ) ;
}
2013-02-15 18:52:06 +04:00
void __cpuinit sun4d_cpu_pre_starting ( void * arg )
2005-04-17 02:20:36 +04:00
{
2012-05-14 10:09:04 +04:00
int cpuid = hard_smp_processor_id ( ) ;
2011-01-29 01:08:21 +03:00
2005-04-17 02:20:36 +04:00
/* Show we are alive */
cpu_leds [ cpuid ] = 0x6 ;
show_leds ( cpuid ) ;
/* Enable level15 interrupt, disable level14 interrupt for now */
cc_set_imsk ( ( cc_get_imsk ( ) & ~ 0x8000 ) | 0x4000 ) ;
2013-02-15 18:52:06 +04:00
}
2005-04-17 02:20:36 +04:00
2013-02-15 18:52:06 +04:00
void __cpuinit sun4d_cpu_pre_online ( void * arg )
{
unsigned long flags ;
int cpuid ;
cpuid = hard_smp_processor_id ( ) ;
2005-04-17 02:20:36 +04:00
2013-02-15 18:52:06 +04:00
/* Unblock the master CPU _only_ when the scheduler state
2005-04-17 02:20:36 +04:00
* of all secondary CPUs will be up - to - date , so after
* the SMP initialization the master will be just allowed
* to call the scheduler code .
*/
2009-01-09 03:47:17 +03:00
sun4d_swap ( ( unsigned long * ) & cpu_callin_map [ cpuid ] , 1 ) ;
2012-05-14 07:49:31 +04:00
local_ops - > cache_all ( ) ;
local_ops - > tlb_all ( ) ;
2011-01-29 01:08:21 +03:00
while ( ( unsigned long ) current_set [ cpuid ] < PAGE_OFFSET )
2005-04-17 02:20:36 +04:00
barrier ( ) ;
2011-01-29 01:08:21 +03:00
while ( current_set [ cpuid ] - > cpu ! = cpuid )
2005-04-17 02:20:36 +04:00
barrier ( ) ;
2011-01-29 01:08:21 +03:00
2005-04-17 02:20:36 +04:00
/* Fix idle thread fields. */
__asm__ __volatile__ ( " ld [%0], %%g6 \n \t "
: : " r " ( & current_set [ cpuid ] )
: " memory " /* paranoid */ ) ;
cpu_leds [ cpuid ] = 0x9 ;
show_leds ( cpuid ) ;
2011-01-29 01:08:21 +03:00
2005-04-17 02:20:36 +04:00
/* Attach to the address space of init_task. */
atomic_inc ( & init_mm . mm_count ) ;
current - > active_mm = & init_mm ;
2012-05-14 07:49:31 +04:00
local_ops - > cache_all ( ) ;
local_ops - > tlb_all ( ) ;
2011-01-29 01:08:21 +03:00
2011-05-17 00:38:07 +04:00
while ( ! cpumask_test_cpu ( cpuid , & smp_commenced_mask ) )
2005-04-17 02:20:36 +04:00
barrier ( ) ;
spin_lock_irqsave ( & sun4d_imsk_lock , flags ) ;
cc_set_imsk ( cc_get_imsk ( ) & ~ 0x4000 ) ; /* Allow PIL 14 as well */
spin_unlock_irqrestore ( & sun4d_imsk_lock , flags ) ;
}
/*
* Cycle through the processors asking the PROM to start each one .
*/
void __init smp4d_boot_cpus ( void )
{
2011-05-02 04:08:54 +04:00
smp4d_ipi_init ( ) ;
2005-04-17 02:20:36 +04:00
if ( boot_cpu_id )
current_set [ 0 ] = NULL ;
2012-05-14 07:49:31 +04:00
local_ops - > cache_all ( ) ;
2006-07-18 08:57:09 +04:00
}
2012-04-20 17:05:56 +04:00
int __cpuinit smp4d_boot_one_cpu ( int i , struct task_struct * idle )
2006-07-18 08:57:09 +04:00
{
2011-01-29 01:08:21 +03:00
unsigned long * entry = & sun4d_cpu_startup ;
int timeout ;
int cpu_node ;
2005-04-17 02:20:36 +04:00
2011-01-29 01:08:21 +03:00
cpu_find_by_instance ( i , & cpu_node , NULL ) ;
2012-04-20 17:05:56 +04:00
current_set [ i ] = task_thread_info ( idle ) ;
2011-01-29 01:08:21 +03:00
/*
* Initialize the contexts table
* Since the call to prom_startcpu ( ) trashes the structure ,
* we need to re - initialize it for each cpu
*/
smp_penguin_ctable . which_io = 0 ;
smp_penguin_ctable . phys_addr = ( unsigned int ) srmmu_ctx_table_phys ;
smp_penguin_ctable . reg_size = 0 ;
/* whirrr, whirrr, whirrrrrrrrr... */
printk ( KERN_INFO " Starting CPU %d at %p \n " , i , entry ) ;
2012-05-14 07:49:31 +04:00
local_ops - > cache_all ( ) ;
2011-01-29 01:08:21 +03:00
prom_startcpu ( cpu_node ,
& smp_penguin_ctable , 0 , ( char * ) entry ) ;
printk ( KERN_INFO " prom_startcpu returned :) \n " ) ;
/* wheee... it's going... */
for ( timeout = 0 ; timeout < 10000 ; timeout + + ) {
if ( cpu_callin_map [ i ] )
break ;
udelay ( 200 ) ;
}
2005-04-17 02:20:36 +04:00
2006-07-18 08:57:09 +04:00
if ( ! ( cpu_callin_map [ i ] ) ) {
2011-01-29 01:08:21 +03:00
printk ( KERN_ERR " Processor %d is stuck. \n " , i ) ;
2006-07-18 08:57:09 +04:00
return - ENODEV ;
2005-04-17 02:20:36 +04:00
}
2012-05-14 07:49:31 +04:00
local_ops - > cache_all ( ) ;
2006-07-18 08:57:09 +04:00
return 0 ;
}
void __init smp4d_smp_done ( void )
{
int i , first ;
int * prev ;
/* setup cpu list for irq rotation */
first = 0 ;
prev = & first ;
2009-03-16 07:10:24 +03:00
for_each_online_cpu ( i ) {
* prev = i ;
prev = & cpu_data ( i ) . next ;
}
2006-07-18 08:57:09 +04:00
* prev = first ;
2012-05-14 07:49:31 +04:00
local_ops - > cache_all ( ) ;
2005-04-17 02:20:36 +04:00
/* Ok, they are spinning and ready to go. */
smp_processors_ready = 1 ;
sun4d_distribute_irqs ( ) ;
}
2011-05-02 04:08:54 +04:00
/* Memory structure giving interrupt handler information about IPI generated */
struct sun4d_ipi_work {
int single ;
int msk ;
int resched ;
} ;
static DEFINE_PER_CPU_SHARED_ALIGNED ( struct sun4d_ipi_work , sun4d_ipi_work ) ;
/* Initialize IPIs on the SUN4D SMP machine */
static void __init smp4d_ipi_init ( void )
{
int cpu ;
struct sun4d_ipi_work * work ;
printk ( KERN_INFO " smp4d: setup IPI at IRQ %d \n " , SUN4D_IPI_IRQ ) ;
for_each_possible_cpu ( cpu ) {
work = & per_cpu ( sun4d_ipi_work , cpu ) ;
work - > single = work - > msk = work - > resched = 0 ;
}
}
void sun4d_ipi_interrupt ( void )
{
struct sun4d_ipi_work * work = & __get_cpu_var ( sun4d_ipi_work ) ;
if ( work - > single ) {
work - > single = 0 ;
smp_call_function_single_interrupt ( ) ;
}
if ( work - > msk ) {
work - > msk = 0 ;
smp_call_function_interrupt ( ) ;
}
if ( work - > resched ) {
work - > resched = 0 ;
smp_resched_interrupt ( ) ;
}
}
2012-05-14 17:14:36 +04:00
/* +-------+-------------+-----------+------------------------------------+
* | bcast | devid | sid | levels mask |
* + - - - - - - - + - - - - - - - - - - - - - + - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
* 31 30 23 22 15 14 0
*/
# define IGEN_MESSAGE(bcast, devid, sid, levels) \
( ( ( bcast ) < < 31 ) | ( ( devid ) < < 23 ) | ( ( sid ) < < 15 ) | ( levels ) )
static void sun4d_send_ipi ( int cpu , int level )
{
cc_set_igen ( IGEN_MESSAGE ( 0 , cpu < < 3 , 6 + ( ( level > > 1 ) & 7 ) , 1 < < ( level - 1 ) ) ) ;
}
static void sun4d_ipi_single ( int cpu )
2011-05-02 04:08:54 +04:00
{
struct sun4d_ipi_work * work = & per_cpu ( sun4d_ipi_work , cpu ) ;
/* Mark work */
work - > single = 1 ;
/* Generate IRQ on the CPU */
sun4d_send_ipi ( cpu , SUN4D_IPI_IRQ ) ;
}
2012-05-14 17:14:36 +04:00
static void sun4d_ipi_mask_one ( int cpu )
2011-05-02 04:08:54 +04:00
{
struct sun4d_ipi_work * work = & per_cpu ( sun4d_ipi_work , cpu ) ;
/* Mark work */
work - > msk = 1 ;
/* Generate IRQ on the CPU */
sun4d_send_ipi ( cpu , SUN4D_IPI_IRQ ) ;
}
2012-05-14 17:14:36 +04:00
static void sun4d_ipi_resched ( int cpu )
2011-05-02 04:08:54 +04:00
{
struct sun4d_ipi_work * work = & per_cpu ( sun4d_ipi_work , cpu ) ;
/* Mark work */
work - > resched = 1 ;
/* Generate IRQ on the CPU (any IRQ will cause resched) */
sun4d_send_ipi ( cpu , SUN4D_IPI_IRQ ) ;
}
2005-04-17 02:20:36 +04:00
static struct smp_funcall {
smpfunc_t func ;
unsigned long arg1 ;
unsigned long arg2 ;
unsigned long arg3 ;
unsigned long arg4 ;
unsigned long arg5 ;
unsigned char processors_in [ NR_CPUS ] ; /* Set when ipi entered. */
unsigned char processors_out [ NR_CPUS ] ; /* Set when ipi exited. */
} ccall_info __attribute__ ( ( aligned ( 8 ) ) ) ;
static DEFINE_SPINLOCK ( cross_call_lock ) ;
/* Cross calls must be serialized, at least currently. */
2012-05-14 17:14:36 +04:00
static void sun4d_cross_call ( smpfunc_t func , cpumask_t mask , unsigned long arg1 ,
2008-08-28 07:03:22 +04:00
unsigned long arg2 , unsigned long arg3 ,
unsigned long arg4 )
2005-04-17 02:20:36 +04:00
{
2011-01-29 01:08:21 +03:00
if ( smp_processors_ready ) {
2005-04-17 02:20:36 +04:00
register int high = smp_highest_cpu ;
unsigned long flags ;
spin_lock_irqsave ( & cross_call_lock , flags ) ;
{
2011-01-29 01:08:21 +03:00
/*
* If you make changes here , make sure
* gcc generates proper code . . .
*/
2005-04-17 02:20:36 +04:00
register smpfunc_t f asm ( " i0 " ) = func ;
register unsigned long a1 asm ( " i1 " ) = arg1 ;
register unsigned long a2 asm ( " i2 " ) = arg2 ;
register unsigned long a3 asm ( " i3 " ) = arg3 ;
register unsigned long a4 asm ( " i4 " ) = arg4 ;
2008-08-28 07:03:22 +04:00
register unsigned long a5 asm ( " i5 " ) = 0 ;
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ (
" std %0, [%6] \n \t "
" std %2, [%6 + 8] \n \t "
" std %4, [%6 + 16] \n \t " : :
" r " ( f ) , " r " ( a1 ) , " r " ( a2 ) , " r " ( a3 ) , " r " ( a4 ) , " r " ( a5 ) ,
" r " ( & ccall_info . func ) ) ;
}
/* Init receive/complete mapping, plus fire the IPI's off. */
{
register int i ;
2011-05-17 00:38:07 +04:00
cpumask_clear_cpu ( smp_processor_id ( ) , & mask ) ;
cpumask_and ( & mask , cpu_online_mask , & mask ) ;
2011-01-29 01:08:21 +03:00
for ( i = 0 ; i < = high ; i + + ) {
2011-05-17 00:38:07 +04:00
if ( cpumask_test_cpu ( i , & mask ) ) {
2005-04-17 02:20:36 +04:00
ccall_info . processors_in [ i ] = 0 ;
ccall_info . processors_out [ i ] = 0 ;
sun4d_send_ipi ( i , IRQ_CROSS_CALL ) ;
}
}
}
{
register int i ;
i = 0 ;
do {
2011-05-17 00:38:07 +04:00
if ( ! cpumask_test_cpu ( i , & mask ) )
2008-08-28 07:03:22 +04:00
continue ;
2011-01-29 01:08:21 +03:00
while ( ! ccall_info . processors_in [ i ] )
2005-04-17 02:20:36 +04:00
barrier ( ) ;
2011-01-29 01:08:21 +03:00
} while ( + + i < = high ) ;
2005-04-17 02:20:36 +04:00
i = 0 ;
do {
2011-05-17 00:38:07 +04:00
if ( ! cpumask_test_cpu ( i , & mask ) )
2008-08-28 07:03:22 +04:00
continue ;
2011-01-29 01:08:21 +03:00
while ( ! ccall_info . processors_out [ i ] )
2005-04-17 02:20:36 +04:00
barrier ( ) ;
2011-01-29 01:08:21 +03:00
} while ( + + i < = high ) ;
2005-04-17 02:20:36 +04:00
}
spin_unlock_irqrestore ( & cross_call_lock , flags ) ;
}
}
/* Running cross calls. */
void smp4d_cross_call_irq ( void )
{
2012-05-14 10:09:04 +04:00
int i = hard_smp_processor_id ( ) ;
2005-04-17 02:20:36 +04:00
ccall_info . processors_in [ i ] = 1 ;
ccall_info . func ( ccall_info . arg1 , ccall_info . arg2 , ccall_info . arg3 ,
ccall_info . arg4 , ccall_info . arg5 ) ;
ccall_info . processors_out [ i ] = 1 ;
}
void smp4d_percpu_timer_interrupt ( struct pt_regs * regs )
{
2006-10-08 17:30:44 +04:00
struct pt_regs * old_regs ;
2012-05-14 10:09:04 +04:00
int cpu = hard_smp_processor_id ( ) ;
2012-04-04 23:49:26 +04:00
struct clock_event_device * ce ;
2005-04-17 02:20:36 +04:00
static int cpu_tick [ NR_CPUS ] ;
static char led_mask [ ] = { 0xe , 0xd , 0xb , 0x7 , 0xb , 0xd } ;
2006-10-08 17:30:44 +04:00
old_regs = set_irq_regs ( regs ) ;
2011-01-29 01:08:21 +03:00
bw_get_prof_limit ( cpu ) ;
2005-04-17 02:20:36 +04:00
bw_clear_intr_mask ( 0 , 1 ) ; /* INTR_TABLE[0] & 1 is Profile IRQ */
cpu_tick [ cpu ] + + ;
if ( ! ( cpu_tick [ cpu ] & 15 ) ) {
if ( cpu_tick [ cpu ] = = 0x60 )
cpu_tick [ cpu ] = 0 ;
cpu_leds [ cpu ] = led_mask [ cpu_tick [ cpu ] > > 4 ] ;
show_leds ( cpu ) ;
}
2012-04-04 23:49:26 +04:00
ce = & per_cpu ( sparc32_clockevent , cpu ) ;
2005-04-17 02:20:36 +04:00
2012-04-04 23:49:26 +04:00
irq_enter ( ) ;
ce - > event_handler ( ce ) ;
irq_exit ( ) ;
2005-04-17 02:20:36 +04:00
2006-10-08 17:30:44 +04:00
set_irq_regs ( old_regs ) ;
2005-04-17 02:20:36 +04:00
}
2012-05-14 17:14:36 +04:00
static const struct sparc32_ipi_ops sun4d_ipi_ops = {
. cross_call = sun4d_cross_call ,
. resched = sun4d_ipi_resched ,
. single = sun4d_ipi_single ,
. mask_one = sun4d_ipi_mask_one ,
} ;
2005-04-17 02:20:36 +04:00
void __init sun4d_init_smp ( void )
{
int i ;
/* Patch ipi15 trap table */
t_nmi [ 1 ] = t_nmi [ 1 ] + ( linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m ) ;
2011-01-29 01:08:21 +03:00
2012-05-14 17:14:36 +04:00
sparc32_ipi_ops = & sun4d_ipi_ops ;
2011-01-29 01:08:21 +03:00
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < NR_CPUS ; i + + ) {
ccall_info . processors_in [ i ] = 1 ;
ccall_info . processors_out [ i ] = 1 ;
}
}