2005-04-16 15:20:36 -07:00
/*
* arch / sh / kernel / smp . c
*
* SMP support for the SuperH processors .
*
2010-03-30 12:38:01 +09:00
* Copyright ( C ) 2002 - 2010 Paul Mundt
2007-09-21 18:32:32 +09:00
* Copyright ( C ) 2006 - 2007 Akio Idehara
2005-04-16 15:20:36 -07:00
*
2007-09-21 18:32:32 +09:00
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
2005-04-16 15:20:36 -07:00
*/
2007-05-31 13:46:21 +09:00
# include <linux/err.h>
2005-04-16 15:20:36 -07:00
# include <linux/cache.h>
# include <linux/cpumask.h>
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/spinlock.h>
2007-09-21 18:32:32 +09:00
# include <linux/mm.h>
2005-04-16 15:20:36 -07:00
# include <linux/module.h>
2008-10-21 12:39:24 +09:00
# include <linux/cpu.h>
2007-09-21 18:32:32 +09:00
# include <linux/interrupt.h>
2011-04-05 17:23:39 +02:00
# include <linux/sched.h>
2011-07-26 16:09:06 -07:00
# include <linux/atomic.h>
2005-04-16 15:20:36 -07:00
# include <asm/processor.h>
# include <asm/mmu_context.h>
# include <asm/smp.h>
2007-09-21 18:32:32 +09:00
# include <asm/cacheflush.h>
# include <asm/sections.h>
2012-03-30 19:29:57 +09:00
# include <asm/setup.h>
2005-04-16 15:20:36 -07:00
2007-09-21 18:32:32 +09:00
int __cpu_number_map [ NR_CPUS ] ; /* Map physical to logical */
int __cpu_logical_map [ NR_CPUS ] ; /* Map logical to physical */
2005-04-16 15:20:36 -07:00
2010-03-30 12:38:01 +09:00
struct plat_smp_ops * mp_ops = NULL ;
2010-04-26 18:49:58 +09:00
/* State of each CPU */
DEFINE_PER_CPU ( int , cpu_state ) = { 0 } ;
2010-03-30 12:38:01 +09:00
void __cpuinit register_smp_ops ( struct plat_smp_ops * ops )
{
if ( mp_ops )
printk ( KERN_WARNING " Overriding previously set SMP ops \n " ) ;
mp_ops = ops ;
}
2010-04-26 18:55:01 +09:00
static inline void __cpuinit smp_store_cpu_info ( unsigned int cpu )
2005-04-16 15:20:36 -07:00
{
2007-09-21 18:32:32 +09:00
struct sh_cpuinfo * c = cpu_data + cpu ;
2009-10-14 14:14:30 +09:00
memcpy ( c , & boot_cpu_data , sizeof ( struct sh_cpuinfo ) ) ;
2007-09-21 18:32:32 +09:00
c - > loops_per_jiffy = loops_per_jiffy ;
2005-04-16 15:20:36 -07:00
}
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
unsigned int cpu = smp_processor_id ( ) ;
2007-09-21 18:32:32 +09:00
init_new_context ( current , & init_mm ) ;
current_thread_info ( ) - > cpu = cpu ;
2010-03-30 12:38:01 +09:00
mp_ops - > prepare_cpus ( max_cpus ) ;
2007-09-21 18:32:32 +09:00
# ifndef CONFIG_HOTPLUG_CPU
2012-02-15 15:28:04 +10:30
init_cpu_present ( cpu_possible_mask ) ;
2007-09-21 18:32:32 +09:00
# endif
2005-04-16 15:20:36 -07:00
}
2010-04-26 18:55:01 +09:00
void __init smp_prepare_boot_cpu ( void )
2005-04-16 15:20:36 -07:00
{
unsigned int cpu = smp_processor_id ( ) ;
2007-09-21 18:32:32 +09:00
__cpu_number_map [ 0 ] = cpu ;
__cpu_logical_map [ 0 ] = cpu ;
2009-06-12 22:33:14 +09:30
set_cpu_online ( cpu , true ) ;
set_cpu_possible ( cpu , true ) ;
2010-04-26 18:49:58 +09:00
per_cpu ( cpu_state , cpu ) = CPU_ONLINE ;
2005-04-16 15:20:36 -07:00
}
2010-04-26 19:08:55 +09:00
# ifdef CONFIG_HOTPLUG_CPU
void native_cpu_die ( unsigned int cpu )
{
unsigned int i ;
for ( i = 0 ; i < 10 ; i + + ) {
smp_rmb ( ) ;
if ( per_cpu ( cpu_state , cpu ) = = CPU_DEAD ) {
if ( system_state = = SYSTEM_RUNNING )
pr_info ( " CPU %u is now offline \n " , cpu ) ;
return ;
}
msleep ( 100 ) ;
}
pr_err ( " CPU %u didn't die... \n " , cpu ) ;
}
int native_cpu_disable ( unsigned int cpu )
{
return cpu = = 0 ? - EPERM : 0 ;
}
void play_dead_common ( void )
{
idle_task_exit ( ) ;
irq_ctx_exit ( raw_smp_processor_id ( ) ) ;
mb ( ) ;
__get_cpu_var ( cpu_state ) = CPU_DEAD ;
local_irq_disable ( ) ;
}
void native_play_dead ( void )
{
play_dead_common ( ) ;
}
int __cpu_disable ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
int ret ;
ret = mp_ops - > cpu_disable ( cpu ) ;
if ( ret )
return ret ;
/*
* Take this CPU offline . Once we clear this , we can ' t return ,
* and we must not schedule until we ' re ready to give up the cpu .
*/
set_cpu_online ( cpu , false ) ;
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs ( ) ;
/*
* Stop the local timer for this CPU .
*/
local_timer_stop ( cpu ) ;
/*
* Flush user cache and TLB mappings , and then remove this CPU
* from the vm mask set of all processes .
*/
flush_cache_all ( ) ;
local_flush_tlb_all ( ) ;
2012-05-31 16:26:23 -07:00
clear_tasks_mm_cpumask ( cpu ) ;
2010-04-26 19:08:55 +09:00
return 0 ;
}
# else /* ... !CONFIG_HOTPLUG_CPU */
2010-04-28 11:11:30 +00:00
int native_cpu_disable ( unsigned int cpu )
2010-04-26 19:08:55 +09:00
{
return - ENOSYS ;
}
void native_cpu_die ( unsigned int cpu )
{
/* We said "no" in __cpu_disable */
BUG ( ) ;
}
void native_play_dead ( void )
{
BUG ( ) ;
}
# endif
2007-09-21 18:32:32 +09:00
asmlinkage void __cpuinit start_secondary ( void )
2005-04-16 15:20:36 -07:00
{
2010-04-26 18:49:58 +09:00
unsigned int cpu = smp_processor_id ( ) ;
2007-09-21 18:32:32 +09:00
struct mm_struct * mm = & init_mm ;
2005-04-16 15:20:36 -07:00
2010-03-28 20:08:25 +00:00
enable_mmu ( ) ;
2007-09-21 18:32:32 +09:00
atomic_inc ( & mm - > mm_count ) ;
atomic_inc ( & mm - > mm_users ) ;
current - > active_mm = mm ;
enter_lazy_tlb ( mm , current ) ;
2010-04-26 19:08:55 +09:00
local_flush_tlb_all ( ) ;
2007-09-21 18:32:32 +09:00
per_cpu_trap_init ( ) ;
preempt_disable ( ) ;
2010-04-26 18:49:58 +09:00
notify_cpu_starting ( cpu ) ;
2008-09-07 16:57:22 +02:00
2007-09-21 18:32:32 +09:00
local_irq_enable ( ) ;
2005-04-16 15:20:36 -07:00
2008-08-06 18:37:07 +09:00
/* Enable local timers */
local_timer_setup ( cpu ) ;
2007-09-21 18:32:32 +09:00
calibrate_delay ( ) ;
smp_store_cpu_info ( cpu ) ;
2005-04-16 15:20:36 -07:00
2010-04-26 18:39:50 +09:00
set_cpu_online ( cpu , true ) ;
2010-04-26 18:49:58 +09:00
per_cpu ( cpu_state , cpu ) = CPU_ONLINE ;
2005-04-16 15:20:36 -07:00
2007-09-21 18:32:32 +09:00
cpu_idle ( ) ;
2005-04-16 15:20:36 -07:00
}
2007-09-21 18:32:32 +09:00
extern struct {
unsigned long sp ;
unsigned long bss_start ;
unsigned long bss_end ;
void * start_kernel_fn ;
void * cpu_init_fn ;
void * thread_info ;
} stack_start ;
2012-04-20 13:05:54 +00:00
int __cpuinit __cpu_up ( unsigned int cpu , struct task_struct * tsk )
2005-04-16 15:20:36 -07:00
{
2007-09-21 18:32:32 +09:00
unsigned long timeout ;
2005-11-08 21:39:01 -08:00
2010-04-26 18:49:58 +09:00
per_cpu ( cpu_state , cpu ) = CPU_UP_PREPARE ;
2007-09-21 18:32:32 +09:00
/* Fill in data in head.S for secondary cpus */
stack_start . sp = tsk - > thread . sp ;
stack_start . thread_info = tsk - > stack ;
stack_start . bss_start = 0 ; /* don't clear bss for secondary cpus */
stack_start . start_kernel_fn = start_secondary ;
2005-04-16 15:20:36 -07:00
2009-10-14 11:51:28 +09:00
flush_icache_range ( ( unsigned long ) & stack_start ,
( unsigned long ) & stack_start + sizeof ( stack_start ) ) ;
wmb ( ) ;
2005-04-16 15:20:36 -07:00
2010-03-30 12:38:01 +09:00
mp_ops - > start_cpu ( cpu , ( unsigned long ) _stext ) ;
2005-04-16 15:20:36 -07:00
2007-09-21 18:32:32 +09:00
timeout = jiffies + HZ ;
while ( time_before ( jiffies , timeout ) ) {
if ( cpu_online ( cpu ) )
break ;
udelay ( 10 ) ;
2010-04-26 19:08:55 +09:00
barrier ( ) ;
2007-09-21 18:32:32 +09:00
}
if ( cpu_online ( cpu ) )
return 0 ;
return - ENOENT ;
2005-04-16 15:20:36 -07:00
}
void __init smp_cpus_done ( unsigned int max_cpus )
{
2007-09-21 18:32:32 +09:00
unsigned long bogosum = 0 ;
int cpu ;
for_each_online_cpu ( cpu )
bogosum + = cpu_data [ cpu ] . loops_per_jiffy ;
printk ( KERN_INFO " SMP: Total of %d processors activated "
" (%lu.%02lu BogoMIPS). \n " , num_online_cpus ( ) ,
bogosum / ( 500000 / HZ ) ,
( bogosum / ( 5000 / HZ ) ) % 100 ) ;
2005-04-16 15:20:36 -07:00
}
void smp_send_reschedule ( int cpu )
{
2010-03-30 12:38:01 +09:00
mp_ops - > send_ipi ( cpu , SMP_MSG_RESCHEDULE ) ;
2005-04-16 15:20:36 -07:00
}
void smp_send_stop ( void )
{
2008-06-06 11:18:06 +02:00
smp_call_function ( stop_this_cpu , 0 , 0 ) ;
2005-04-16 15:20:36 -07:00
}
2009-06-12 22:32:35 +09:30
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
2005-04-16 15:20:36 -07:00
{
2008-06-10 20:52:59 +02:00
int cpu ;
2005-04-16 15:20:36 -07:00
2009-06-12 22:32:35 +09:30
for_each_cpu ( cpu , mask )
2010-03-30 12:38:01 +09:00
mp_ops - > send_ipi ( cpu , SMP_MSG_FUNCTION ) ;
2008-06-10 20:52:59 +02:00
}
2005-04-16 15:20:36 -07:00
2008-06-10 20:52:59 +02:00
void arch_send_call_function_single_ipi ( int cpu )
{
2010-03-30 12:38:01 +09:00
mp_ops - > send_ipi ( cpu , SMP_MSG_FUNCTION_SINGLE ) ;
2005-04-16 15:20:36 -07:00
}
2008-12-13 21:20:26 +10:30
void smp_timer_broadcast ( const struct cpumask * mask )
2008-08-06 18:21:03 +09:00
{
int cpu ;
2008-12-13 21:20:26 +10:30
for_each_cpu ( cpu , mask )
2010-03-30 12:38:01 +09:00
mp_ops - > send_ipi ( cpu , SMP_MSG_TIMER ) ;
2008-08-06 18:21:03 +09:00
}
static void ipi_timer ( void )
{
irq_enter ( ) ;
2008-08-06 18:37:07 +09:00
local_timer_interrupt ( ) ;
2008-08-06 18:21:03 +09:00
irq_exit ( ) ;
}
2008-08-06 18:02:48 +09:00
void smp_message_recv ( unsigned int msg )
{
switch ( msg ) {
case SMP_MSG_FUNCTION :
generic_smp_call_function_interrupt ( ) ;
break ;
case SMP_MSG_RESCHEDULE :
2011-04-05 17:23:39 +02:00
scheduler_ipi ( ) ;
2008-08-06 18:02:48 +09:00
break ;
case SMP_MSG_FUNCTION_SINGLE :
generic_smp_call_function_single_interrupt ( ) ;
break ;
2008-08-06 18:21:03 +09:00
case SMP_MSG_TIMER :
ipi_timer ( ) ;
break ;
2008-08-06 18:02:48 +09:00
default :
printk ( KERN_WARNING " SMP %d: %s(): unknown IPI %d \n " ,
smp_processor_id ( ) , __func__ , msg ) ;
break ;
}
}
2005-04-16 15:20:36 -07:00
/* Not really SMP stuff ... */
int setup_profiling_timer ( unsigned int multiplier )
{
return 0 ;
}
2007-09-21 18:09:55 +09:00
static void flush_tlb_all_ipi ( void * info )
{
local_flush_tlb_all ( ) ;
}
void flush_tlb_all ( void )
{
2008-05-09 09:39:44 +02:00
on_each_cpu ( flush_tlb_all_ipi , 0 , 1 ) ;
2007-09-21 18:09:55 +09:00
}
static void flush_tlb_mm_ipi ( void * mm )
{
local_flush_tlb_mm ( ( struct mm_struct * ) mm ) ;
}
/*
* The following tlb flush calls are invoked when old translations are
* being torn down , or pte attributes are changing . For single threaded
* address spaces , a new context is obtained on the current cpu , and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time , should the mm ever be used on other cpus . For
* multithreaded address spaces , intercpu interrupts have to be sent .
* Another case where intercpu interrupts are required is when the target
* mm might be active on another cpu ( eg debuggers doing the flushes on
* behalf of debugees , kswapd stealing pages from another process etc ) .
* Kanoj 07 / 00.
*/
void flush_tlb_mm ( struct mm_struct * mm )
{
preempt_disable ( ) ;
if ( ( atomic_read ( & mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = mm ) ) {
2008-06-06 11:18:06 +02:00
smp_call_function ( flush_tlb_mm_ipi , ( void * ) mm , 1 ) ;
2007-09-21 18:09:55 +09:00
} else {
int i ;
for ( i = 0 ; i < num_online_cpus ( ) ; i + + )
if ( smp_processor_id ( ) ! = i )
cpu_context ( i , mm ) = 0 ;
}
local_flush_tlb_mm ( mm ) ;
preempt_enable ( ) ;
}
struct flush_tlb_data {
struct vm_area_struct * vma ;
unsigned long addr1 ;
unsigned long addr2 ;
} ;
static void flush_tlb_range_ipi ( void * info )
{
struct flush_tlb_data * fd = ( struct flush_tlb_data * ) info ;
local_flush_tlb_range ( fd - > vma , fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end )
{
struct mm_struct * mm = vma - > vm_mm ;
preempt_disable ( ) ;
if ( ( atomic_read ( & mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = mm ) ) {
struct flush_tlb_data fd ;
fd . vma = vma ;
fd . addr1 = start ;
fd . addr2 = end ;
2008-06-06 11:18:06 +02:00
smp_call_function ( flush_tlb_range_ipi , ( void * ) & fd , 1 ) ;
2007-09-21 18:09:55 +09:00
} else {
int i ;
for ( i = 0 ; i < num_online_cpus ( ) ; i + + )
if ( smp_processor_id ( ) ! = i )
cpu_context ( i , mm ) = 0 ;
}
local_flush_tlb_range ( vma , start , end ) ;
preempt_enable ( ) ;
}
static void flush_tlb_kernel_range_ipi ( void * info )
{
struct flush_tlb_data * fd = ( struct flush_tlb_data * ) info ;
local_flush_tlb_kernel_range ( fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_kernel_range ( unsigned long start , unsigned long end )
{
struct flush_tlb_data fd ;
fd . addr1 = start ;
fd . addr2 = end ;
2008-05-09 09:39:44 +02:00
on_each_cpu ( flush_tlb_kernel_range_ipi , ( void * ) & fd , 1 ) ;
2007-09-21 18:09:55 +09:00
}
static void flush_tlb_page_ipi ( void * info )
{
struct flush_tlb_data * fd = ( struct flush_tlb_data * ) info ;
local_flush_tlb_page ( fd - > vma , fd - > addr1 ) ;
}
void flush_tlb_page ( struct vm_area_struct * vma , unsigned long page )
{
preempt_disable ( ) ;
if ( ( atomic_read ( & vma - > vm_mm - > mm_users ) ! = 1 ) | |
( current - > mm ! = vma - > vm_mm ) ) {
struct flush_tlb_data fd ;
fd . vma = vma ;
fd . addr1 = page ;
2008-06-06 11:18:06 +02:00
smp_call_function ( flush_tlb_page_ipi , ( void * ) & fd , 1 ) ;
2007-09-21 18:09:55 +09:00
} else {
int i ;
for ( i = 0 ; i < num_online_cpus ( ) ; i + + )
if ( smp_processor_id ( ) ! = i )
cpu_context ( i , vma - > vm_mm ) = 0 ;
}
local_flush_tlb_page ( vma , page ) ;
preempt_enable ( ) ;
}
static void flush_tlb_one_ipi ( void * info )
{
struct flush_tlb_data * fd = ( struct flush_tlb_data * ) info ;
local_flush_tlb_one ( fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_one ( unsigned long asid , unsigned long vaddr )
{
struct flush_tlb_data fd ;
fd . addr1 = asid ;
fd . addr2 = vaddr ;
2008-06-06 11:18:06 +02:00
smp_call_function ( flush_tlb_one_ipi , ( void * ) & fd , 1 ) ;
2007-09-21 18:09:55 +09:00
local_flush_tlb_one ( asid , vaddr ) ;
}