2005-04-17 02:20:36 +04:00
/*
* arch / sh / kernel / smp . c
*
* SMP support for the SuperH processors .
*
2007-09-21 13:32:32 +04:00
* Copyright ( C ) 2002 - 2007 Paul Mundt
* Copyright ( C ) 2006 - 2007 Akio Idehara
2005-04-17 02:20:36 +04:00
*
2007-09-21 13:32:32 +04:00
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
2005-04-17 02:20:36 +04:00
*/
2007-05-31 08:46:21 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/cache.h>
# include <linux/cpumask.h>
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/spinlock.h>
2007-09-21 13:32:32 +04:00
# include <linux/mm.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
2007-09-21 13:32:32 +04:00
# include <linux/interrupt.h>
2005-04-17 02:20:36 +04:00
# include <asm/atomic.h>
# include <asm/processor.h>
# include <asm/system.h>
# include <asm/mmu_context.h>
# include <asm/smp.h>
2007-09-21 13:32:32 +04:00
# include <asm/cacheflush.h>
# include <asm/sections.h>
2005-04-17 02:20:36 +04:00
2007-09-21 13:32:32 +04:00
int __cpu_number_map [ NR_CPUS ] ; /* Map physical to logical */
int __cpu_logical_map [ NR_CPUS ] ; /* Map logical to physical */
2005-04-17 02:20:36 +04:00
cpumask_t cpu_possible_map ;
2005-10-14 01:41:23 +04:00
EXPORT_SYMBOL ( cpu_possible_map ) ;
2005-04-17 02:20:36 +04:00
cpumask_t cpu_online_map ;
2006-10-02 13:17:40 +04:00
EXPORT_SYMBOL ( cpu_online_map ) ;
2005-04-17 02:20:36 +04:00
2007-09-21 13:32:32 +04:00
static atomic_t cpus_booted = ATOMIC_INIT ( 0 ) ;
2005-04-17 02:20:36 +04:00
/*
* Run specified function on a particular processor .
*/
void __smp_call_function ( unsigned int cpu ) ;
static inline void __init smp_store_cpu_info ( unsigned int cpu )
{
2007-09-21 13:32:32 +04:00
struct sh_cpuinfo * c = cpu_data + cpu ;
c - > loops_per_jiffy = loops_per_jiffy ;
2005-04-17 02:20:36 +04:00
}
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
unsigned int cpu = smp_processor_id ( ) ;
2007-09-21 13:32:32 +04:00
init_new_context ( current , & init_mm ) ;
current_thread_info ( ) - > cpu = cpu ;
plat_prepare_cpus ( max_cpus ) ;
# ifndef CONFIG_HOTPLUG_CPU
cpu_present_map = cpu_possible_map ;
# endif
2005-04-17 02:20:36 +04:00
}
void __devinit smp_prepare_boot_cpu ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
2007-09-21 13:32:32 +04:00
__cpu_number_map [ 0 ] = cpu ;
__cpu_logical_map [ 0 ] = cpu ;
2005-04-17 02:20:36 +04:00
cpu_set ( cpu , cpu_online_map ) ;
cpu_set ( cpu , cpu_possible_map ) ;
}
2007-09-21 13:32:32 +04:00
asmlinkage void __cpuinit start_secondary ( void )
2005-04-17 02:20:36 +04:00
{
2007-09-21 13:32:32 +04:00
unsigned int cpu ;
struct mm_struct * mm = & init_mm ;
2005-04-17 02:20:36 +04:00
2007-09-21 13:32:32 +04:00
atomic_inc ( & mm - > mm_count ) ;
atomic_inc ( & mm - > mm_users ) ;
current - > active_mm = mm ;
BUG_ON ( current - > mm ) ;
enter_lazy_tlb ( mm , current ) ;
per_cpu_trap_init ( ) ;
preempt_disable ( ) ;
local_irq_enable ( ) ;
2005-04-17 02:20:36 +04:00
2007-09-21 13:32:32 +04:00
calibrate_delay ( ) ;
cpu = smp_processor_id ( ) ;
smp_store_cpu_info ( cpu ) ;
2005-04-17 02:20:36 +04:00
cpu_set ( cpu , cpu_online_map ) ;
2007-09-21 13:32:32 +04:00
cpu_idle ( ) ;
2005-04-17 02:20:36 +04:00
}
2007-09-21 13:32:32 +04:00
extern struct {
unsigned long sp ;
unsigned long bss_start ;
unsigned long bss_end ;
void * start_kernel_fn ;
void * cpu_init_fn ;
void * thread_info ;
} stack_start ;
int __cpuinit __cpu_up ( unsigned int cpu )
2005-04-17 02:20:36 +04:00
{
2007-09-21 13:32:32 +04:00
struct task_struct * tsk ;
unsigned long timeout ;
2005-11-09 08:39:01 +03:00
2007-09-21 13:32:32 +04:00
tsk = fork_idle ( cpu ) ;
if ( IS_ERR ( tsk ) ) {
printk ( KERN_ERR " Failed forking idle task for cpu %d \n " , cpu ) ;
return PTR_ERR ( tsk ) ;
}
2005-04-17 02:20:36 +04:00
2007-09-21 13:32:32 +04:00
/* Fill in data in head.S for secondary cpus */
stack_start . sp = tsk - > thread . sp ;
stack_start . thread_info = tsk - > stack ;
stack_start . bss_start = 0 ; /* don't clear bss for secondary cpus */
stack_start . start_kernel_fn = start_secondary ;
2005-04-17 02:20:36 +04:00
2007-09-21 13:32:32 +04:00
flush_cache_all ( ) ;
2005-04-17 02:20:36 +04:00
2007-09-21 13:32:32 +04:00
plat_start_cpu ( cpu , ( unsigned long ) _stext ) ;
2005-04-17 02:20:36 +04:00
2007-09-21 13:32:32 +04:00
timeout = jiffies + HZ ;
while ( time_before ( jiffies , timeout ) ) {
if ( cpu_online ( cpu ) )
break ;
udelay ( 10 ) ;
}
if ( cpu_online ( cpu ) )
return 0 ;
return - ENOENT ;
2005-04-17 02:20:36 +04:00
}
void __init smp_cpus_done ( unsigned int max_cpus )
{
2007-09-21 13:32:32 +04:00
unsigned long bogosum = 0 ;
int cpu ;
for_each_online_cpu ( cpu )
bogosum + = cpu_data [ cpu ] . loops_per_jiffy ;
printk ( KERN_INFO " SMP: Total of %d processors activated "
" (%lu.%02lu BogoMIPS). \n " , num_online_cpus ( ) ,
bogosum / ( 500000 / HZ ) ,
( bogosum / ( 5000 / HZ ) ) % 100 ) ;
2005-04-17 02:20:36 +04:00
}
void smp_send_reschedule ( int cpu )
{
2007-09-21 13:32:32 +04:00
plat_send_ipi ( cpu , SMP_MSG_RESCHEDULE ) ;
2005-04-17 02:20:36 +04:00
}
static void stop_this_cpu ( void * unused )
{
cpu_clear ( smp_processor_id ( ) , cpu_online_map ) ;
local_irq_disable ( ) ;
for ( ; ; )
cpu_relax ( ) ;
}
void smp_send_stop ( void )
{
smp_call_function ( stop_this_cpu , 0 , 1 , 0 ) ;
}
struct smp_fn_call_struct smp_fn_call = {
2008-03-15 19:49:10 +03:00
. lock = __SPIN_LOCK_UNLOCKED ( smp_fn_call . lock ) ,
2005-04-17 02:20:36 +04:00
. finished = ATOMIC_INIT ( 0 ) ,
} ;
/*
* The caller of this wants the passed function to run on every cpu . If wait
* is set , wait until all cpus have finished the function before returning .
* The lock is here to protect the call structure .
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler .
*/
int smp_call_function ( void ( * func ) ( void * info ) , void * info , int retry , int wait )
{
unsigned int nr_cpus = atomic_read ( & cpus_booted ) ;
int i ;
/* Can deadlock when called with interrupts disabled */
WARN_ON ( irqs_disabled ( ) ) ;
spin_lock ( & smp_fn_call . lock ) ;
atomic_set ( & smp_fn_call . finished , 0 ) ;
smp_fn_call . fn = func ;
smp_fn_call . data = info ;
for ( i = 0 ; i < nr_cpus ; i + + )
if ( i ! = smp_processor_id ( ) )
2007-09-21 13:32:32 +04:00
plat_send_ipi ( i , SMP_MSG_FUNCTION ) ;
2005-04-17 02:20:36 +04:00
if ( wait )
while ( atomic_read ( & smp_fn_call . finished ) ! = ( nr_cpus - 1 ) ) ;
spin_unlock ( & smp_fn_call . lock ) ;
return 0 ;
}
/* Not really SMP stuff ... */
int setup_profiling_timer ( unsigned int multiplier )
{
return 0 ;
}
2007-09-21 13:09:55 +04:00
static void flush_tlb_all_ipi ( void * info )
{
local_flush_tlb_all ( ) ;
}
void flush_tlb_all ( void )
{
on_each_cpu ( flush_tlb_all_ipi , 0 , 1 , 1 ) ;
}
static void flush_tlb_mm_ipi ( void * mm )
{
local_flush_tlb_mm ( ( struct mm_struct * ) mm ) ;
}
/*
* The following tlb flush calls are invoked when old translations are
* being torn down , or pte attributes are changing . For single threaded
* address spaces , a new context is obtained on the current cpu , and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time , should the mm ever be used on other cpus . For
* multithreaded address spaces , intercpu interrupts have to be sent .
* Another case where intercpu interrupts are required is when the target
* mm might be active on another cpu ( eg debuggers doing the flushes on
* behalf of debugees , kswapd stealing pages from another process etc ) .
* Kanoj 07 / 00.
*/
void flush_tlb_mm ( struct mm_struct * mm )
{
preempt_disable ( ) ;
if ( ( atomic_read ( & mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = mm ) ) {
smp_call_function ( flush_tlb_mm_ipi , ( void * ) mm , 1 , 1 ) ;
} else {
int i ;
for ( i = 0 ; i < num_online_cpus ( ) ; i + + )
if ( smp_processor_id ( ) ! = i )
cpu_context ( i , mm ) = 0 ;
}
local_flush_tlb_mm ( mm ) ;
preempt_enable ( ) ;
}
struct flush_tlb_data {
struct vm_area_struct * vma ;
unsigned long addr1 ;
unsigned long addr2 ;
} ;
static void flush_tlb_range_ipi ( void * info )
{
struct flush_tlb_data * fd = ( struct flush_tlb_data * ) info ;
local_flush_tlb_range ( fd - > vma , fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end )
{
struct mm_struct * mm = vma - > vm_mm ;
preempt_disable ( ) ;
if ( ( atomic_read ( & mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = mm ) ) {
struct flush_tlb_data fd ;
fd . vma = vma ;
fd . addr1 = start ;
fd . addr2 = end ;
smp_call_function ( flush_tlb_range_ipi , ( void * ) & fd , 1 , 1 ) ;
} else {
int i ;
for ( i = 0 ; i < num_online_cpus ( ) ; i + + )
if ( smp_processor_id ( ) ! = i )
cpu_context ( i , mm ) = 0 ;
}
local_flush_tlb_range ( vma , start , end ) ;
preempt_enable ( ) ;
}
static void flush_tlb_kernel_range_ipi ( void * info )
{
struct flush_tlb_data * fd = ( struct flush_tlb_data * ) info ;
local_flush_tlb_kernel_range ( fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_kernel_range ( unsigned long start , unsigned long end )
{
struct flush_tlb_data fd ;
fd . addr1 = start ;
fd . addr2 = end ;
on_each_cpu ( flush_tlb_kernel_range_ipi , ( void * ) & fd , 1 , 1 ) ;
}
static void flush_tlb_page_ipi ( void * info )
{
struct flush_tlb_data * fd = ( struct flush_tlb_data * ) info ;
local_flush_tlb_page ( fd - > vma , fd - > addr1 ) ;
}
void flush_tlb_page ( struct vm_area_struct * vma , unsigned long page )
{
preempt_disable ( ) ;
if ( ( atomic_read ( & vma - > vm_mm - > mm_users ) ! = 1 ) | |
( current - > mm ! = vma - > vm_mm ) ) {
struct flush_tlb_data fd ;
fd . vma = vma ;
fd . addr1 = page ;
smp_call_function ( flush_tlb_page_ipi , ( void * ) & fd , 1 , 1 ) ;
} else {
int i ;
for ( i = 0 ; i < num_online_cpus ( ) ; i + + )
if ( smp_processor_id ( ) ! = i )
cpu_context ( i , vma - > vm_mm ) = 0 ;
}
local_flush_tlb_page ( vma , page ) ;
preempt_enable ( ) ;
}
static void flush_tlb_one_ipi ( void * info )
{
struct flush_tlb_data * fd = ( struct flush_tlb_data * ) info ;
local_flush_tlb_one ( fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_one ( unsigned long asid , unsigned long vaddr )
{
struct flush_tlb_data fd ;
fd . addr1 = asid ;
fd . addr2 = vaddr ;
smp_call_function ( flush_tlb_one_ipi , ( void * ) & fd , 1 , 1 ) ;
local_flush_tlb_one ( asid , vaddr ) ;
}