2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / kernel / smp . c
*
* Copyright ( C ) 2002 ARM Limited , All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2006-10-25 16:59:16 +04:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/spinlock.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/cache.h>
# include <linux/profile.h>
# include <linux/errno.h>
# include <linux/mm.h>
2007-07-30 02:36:13 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/cpu.h>
# include <linux/smp.h>
# include <linux/seq_file.h>
2006-10-25 16:59:16 +04:00
# include <linux/irq.h>
2009-05-17 21:58:34 +04:00
# include <linux/percpu.h>
# include <linux/clockchips.h>
2010-11-30 14:07:35 +03:00
# include <linux/completion.h>
2005-04-17 02:20:36 +04:00
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2005-04-17 02:20:36 +04:00
# include <asm/cacheflush.h>
# include <asm/cpu.h>
2009-06-11 18:35:00 +04:00
# include <asm/cputype.h>
2011-10-08 14:20:42 +04:00
# include <asm/exception.h>
2011-09-30 14:43:29 +04:00
# include <asm/idmap.h>
2011-08-08 16:21:59 +04:00
# include <asm/topology.h>
2005-06-18 12:33:31 +04:00
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/pgalloc.h>
2005-04-17 02:20:36 +04:00
# include <asm/processor.h>
2010-10-01 18:38:24 +04:00
# include <asm/sections.h>
2005-04-17 02:20:36 +04:00
# include <asm/tlbflush.h>
# include <asm/ptrace.h>
2009-05-17 21:58:34 +04:00
# include <asm/localtimer.h>
2011-08-24 01:19:29 +04:00
# include <asm/smp_plat.h>
2005-04-17 02:20:36 +04:00
2005-06-18 12:33:31 +04:00
/*
* as from 2.5 , kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
* where to place its SVC stack
*/
struct secondary_data secondary_data ;
2005-04-17 02:20:36 +04:00
enum ipi_msg_type {
2010-11-15 12:54:18 +03:00
IPI_TIMER = 2 ,
2005-04-17 02:20:36 +04:00
IPI_RESCHEDULE ,
IPI_CALL_FUNC ,
2008-06-10 22:48:30 +04:00
IPI_CALL_FUNC_SINGLE ,
2005-04-17 02:20:36 +04:00
IPI_CPU_STOP ,
} ;
2012-01-18 19:59:45 +04:00
static DECLARE_COMPLETION ( cpu_running ) ;
2012-04-20 17:05:50 +04:00
int __cpuinit __cpu_up ( unsigned int cpu , struct task_struct * idle )
2005-04-17 02:20:36 +04:00
{
int ret ;
2005-06-18 12:33:31 +04:00
/*
* We need to tell the secondary core where to find
* its stack and the page tables .
*/
2006-01-12 12:05:58 +03:00
secondary_data . stack = task_stack_page ( idle ) + THREAD_START_SP ;
2011-11-23 16:26:25 +04:00
secondary_data . pgdir = virt_to_phys ( idmap_pgd ) ;
2011-05-26 14:22:44 +04:00
secondary_data . swapper_pg_dir = virt_to_phys ( swapper_pg_dir ) ;
2010-02-12 17:36:24 +03:00
__cpuc_flush_dcache_area ( & secondary_data , sizeof ( secondary_data ) ) ;
outer_clean_range ( __pa ( & secondary_data ) , __pa ( & secondary_data + 1 ) ) ;
2005-06-18 12:33:31 +04:00
2005-04-17 02:20:36 +04:00
/*
* Now bring the CPU into our world .
*/
ret = boot_secondary ( cpu , idle ) ;
2005-06-18 12:33:31 +04:00
if ( ret = = 0 ) {
/*
* CPU was successfully started , wait for it
* to come online or time out .
*/
2012-01-18 19:59:45 +04:00
wait_for_completion_timeout ( & cpu_running ,
msecs_to_jiffies ( 1000 ) ) ;
2005-06-18 12:33:31 +04:00
2010-12-18 15:34:39 +03:00
if ( ! cpu_online ( cpu ) ) {
pr_crit ( " CPU%u: failed to come online \n " , cpu ) ;
2005-06-18 12:33:31 +04:00
ret = - EIO ;
2010-12-18 15:34:39 +03:00
}
} else {
pr_err ( " CPU%u: failed to boot: %d \n " , cpu , ret ) ;
2005-06-18 12:33:31 +04:00
}
2005-11-08 13:44:46 +03:00
secondary_data . stack = NULL ;
2005-06-18 12:33:31 +04:00
secondary_data . pgdir = 0 ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2005-11-03 01:24:33 +03:00
# ifdef CONFIG_HOTPLUG_CPU
2010-12-20 17:28:02 +03:00
static void percpu_timer_stop ( void ) ;
2005-11-03 01:24:33 +03:00
/*
* __cpu_disable runs on the processor to be shutdown .
*/
2009-09-28 00:04:48 +04:00
int __cpu_disable ( void )
2005-11-03 01:24:33 +03:00
{
unsigned int cpu = smp_processor_id ( ) ;
struct task_struct * p ;
int ret ;
2010-05-15 13:18:05 +04:00
ret = platform_cpu_disable ( cpu ) ;
2005-11-03 01:24:33 +03:00
if ( ret )
return ret ;
/*
* Take this CPU offline . Once we clear this , we can ' t return ,
* and we must not schedule until we ' re ready to give up the cpu .
*/
2009-05-28 17:16:52 +04:00
set_cpu_online ( cpu , false ) ;
2005-11-03 01:24:33 +03:00
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs ( ) ;
2005-11-08 22:08:05 +03:00
/*
* Stop the local timer for this CPU .
*/
2010-12-20 17:28:02 +03:00
percpu_timer_stop ( ) ;
2005-11-08 22:08:05 +03:00
2005-11-03 01:24:33 +03:00
/*
* Flush user cache and TLB mappings , and then remove this CPU
* from the vm mask set of all processes .
*/
flush_cache_all ( ) ;
local_flush_tlb_all ( ) ;
read_lock ( & tasklist_lock ) ;
for_each_process ( p ) {
if ( p - > mm )
2009-09-24 19:34:49 +04:00
cpumask_clear_cpu ( cpu , mm_cpumask ( p - > mm ) ) ;
2005-11-03 01:24:33 +03:00
}
read_unlock ( & tasklist_lock ) ;
return 0 ;
}
2010-11-30 14:07:35 +03:00
static DECLARE_COMPLETION ( cpu_died ) ;
2005-11-03 01:24:33 +03:00
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed , or it is timed out .
*/
2009-09-28 00:04:48 +04:00
void __cpu_die ( unsigned int cpu )
2005-11-03 01:24:33 +03:00
{
2010-11-30 14:07:35 +03:00
if ( ! wait_for_completion_timeout ( & cpu_died , msecs_to_jiffies ( 5000 ) ) ) {
pr_err ( " CPU%u: cpu didn't die \n " , cpu ) ;
return ;
}
printk ( KERN_NOTICE " CPU%u: shutdown \n " , cpu ) ;
2005-11-03 01:24:33 +03:00
if ( ! platform_cpu_kill ( cpu ) )
printk ( " CPU%u: unable to kill \n " , cpu ) ;
}
/*
* Called from the idle thread for the CPU which has been shutdown .
*
* Note that we disable IRQs here , but do not re - enable them
* before returning to the caller . This is also the behaviour
* of the other hotplug - cpu capable cores , so presumably coming
* out of idle fixes this .
*/
2009-09-28 00:04:48 +04:00
void __ref cpu_die ( void )
2005-11-03 01:24:33 +03:00
{
unsigned int cpu = smp_processor_id ( ) ;
idle_task_exit ( ) ;
2010-11-30 15:21:30 +03:00
local_irq_disable ( ) ;
mb ( ) ;
2010-11-30 14:07:35 +03:00
/* Tell __cpu_die() that this CPU is now safe to dispose of */
complete ( & cpu_died ) ;
2005-11-03 01:24:33 +03:00
/*
* actual CPU shutdown procedure is at least platform ( if not
2010-11-30 14:07:35 +03:00
* CPU ) specific .
2005-11-03 01:24:33 +03:00
*/
platform_cpu_die ( cpu ) ;
/*
* Do not return to the idle loop - jump back to the secondary
* cpu initialisation . There ' s some initialisation which needs
* to be repeated to undo the effects of taking the CPU offline .
*/
__asm__ ( " mov sp, %0 \n "
2010-12-20 19:58:19 +03:00
" mov fp, #0 \n "
2005-11-03 01:24:33 +03:00
" b secondary_start_kernel "
:
2006-01-12 12:05:58 +03:00
: " r " ( task_stack_page ( current ) + THREAD_SIZE - 8 ) ) ;
2005-11-03 01:24:33 +03:00
}
# endif /* CONFIG_HOTPLUG_CPU */
2010-12-03 14:09:48 +03:00
/*
* Called by both boot and secondaries to move global data into
* per - processor storage .
*/
static void __cpuinit smp_store_cpu_info ( unsigned int cpuid )
{
struct cpuinfo_arm * cpu_info = & per_cpu ( cpu_data , cpuid ) ;
cpu_info - > loops_per_jiffy = loops_per_jiffy ;
2011-08-08 16:21:59 +04:00
store_cpu_topology ( cpuid ) ;
2010-12-03 14:09:48 +03:00
}
2012-01-11 03:38:25 +04:00
static void percpu_timer_setup ( void ) ;
2005-06-18 12:33:31 +04:00
/*
* This is the secondary CPU boot entry . We ' re using this CPUs
* idle thread stack , but a set of temporary page tables .
*/
2005-07-18 00:35:41 +04:00
asmlinkage void __cpuinit secondary_start_kernel ( void )
2005-06-18 12:33:31 +04:00
{
struct mm_struct * mm = & init_mm ;
2005-11-12 20:21:47 +03:00
unsigned int cpu = smp_processor_id ( ) ;
2005-06-18 12:33:31 +04:00
/*
* All kernel threads share the same mm context ; grab a
* reference and switch to it .
*/
atomic_inc ( & mm - > mm_count ) ;
current - > active_mm = mm ;
2009-09-24 19:34:49 +04:00
cpumask_set_cpu ( cpu , mm_cpumask ( mm ) ) ;
2005-06-18 12:33:31 +04:00
cpu_switch_mm ( mm - > pgd , mm ) ;
enter_lazy_tlb ( mm , current ) ;
2005-07-28 23:32:47 +04:00
local_flush_tlb_all ( ) ;
2005-06-18 12:33:31 +04:00
2012-05-05 23:58:13 +04:00
printk ( " CPU%u: Booted secondary processor \n " , cpu ) ;
2005-06-18 12:33:31 +04:00
cpu_init ( ) ;
2005-11-09 08:39:01 +03:00
preempt_disable ( ) ;
2010-12-03 18:00:49 +03:00
trace_hardirqs_off ( ) ;
2005-06-18 12:33:31 +04:00
/*
* Give the platform a chance to do its own initialisation .
*/
platform_secondary_init ( cpu ) ;
2008-09-07 18:57:22 +04:00
notify_cpu_starting ( cpu ) ;
2008-02-04 19:30:57 +03:00
2005-06-18 12:33:31 +04:00
calibrate_delay ( ) ;
smp_store_cpu_info ( cpu ) ;
/*
2011-06-20 19:46:01 +04:00
* OK , now it ' s safe to let the boot CPU continue . Wait for
* the CPU migration code to notice that the CPU is online
2012-01-18 19:59:45 +04:00
* before we continue - which happens after __cpu_up returns .
2005-06-18 12:33:31 +04:00
*/
2009-05-28 17:16:52 +04:00
set_cpu_online ( cpu , true ) ;
2012-01-18 19:59:45 +04:00
complete ( & cpu_running ) ;
2011-10-14 15:44:41 +04:00
/*
* Setup the percpu timer for this CPU .
*/
percpu_timer_setup ( ) ;
local_irq_enable ( ) ;
local_fiq_enable ( ) ;
2005-06-18 12:33:31 +04:00
/*
* OK , it ' s off to the idle thread for us
*/
cpu_idle ( ) ;
}
2005-04-17 02:20:36 +04:00
void __init smp_cpus_done ( unsigned int max_cpus )
{
int cpu ;
unsigned long bogosum = 0 ;
for_each_online_cpu ( cpu )
bogosum + = per_cpu ( cpu_data , cpu ) . loops_per_jiffy ;
printk ( KERN_INFO " SMP: Total of %d processors activated "
" (%lu.%02lu BogoMIPS). \n " ,
num_online_cpus ( ) ,
bogosum / ( 500000 / HZ ) ,
( bogosum / ( 5000 / HZ ) ) % 100 ) ;
}
void __init smp_prepare_boot_cpu ( void )
{
}
2010-12-03 14:09:48 +03:00
void __init smp_prepare_cpus ( unsigned int max_cpus )
2005-04-17 02:20:36 +04:00
{
2010-12-03 14:09:48 +03:00
unsigned int ncores = num_possible_cpus ( ) ;
2005-04-17 02:20:36 +04:00
2011-08-08 16:21:59 +04:00
init_cpu_topology ( ) ;
2010-12-03 14:09:48 +03:00
smp_store_cpu_info ( smp_processor_id ( ) ) ;
2005-04-17 02:20:36 +04:00
/*
2010-12-03 14:09:48 +03:00
* are we trying to boot more cores than exist ?
2005-04-17 02:20:36 +04:00
*/
2010-12-03 14:09:48 +03:00
if ( max_cpus > ncores )
max_cpus = ncores ;
2011-07-07 04:56:51 +04:00
if ( ncores > 1 & & max_cpus ) {
2010-12-03 14:09:48 +03:00
/*
* Enable the local timer or broadcast device for the
* boot CPU , but only if we have more than one CPU .
*/
percpu_timer_setup ( ) ;
2005-04-17 02:20:36 +04:00
2011-07-07 04:56:51 +04:00
/*
* Initialise the present map , which describes the set of CPUs
* actually populated at the present time . A platform should
* re - initialize the map in platform_smp_prepare_cpus ( ) if
* present ! = possible ( e . g . physical hotplug ) .
*/
2012-03-29 09:08:30 +04:00
init_cpu_present ( cpu_possible_mask ) ;
2011-07-07 04:56:51 +04:00
2010-12-03 14:09:48 +03:00
/*
* Initialise the SCU if there are more than one CPU
* and let them know where to start .
*/
platform_smp_prepare_cpus ( max_cpus ) ;
}
2005-04-17 02:20:36 +04:00
}
2011-04-03 16:01:30 +04:00
static void ( * smp_cross_call ) ( const struct cpumask * , unsigned int ) ;
void __init set_smp_cross_call ( void ( * fn ) ( const struct cpumask * , unsigned int ) )
{
smp_cross_call = fn ;
}
2009-05-17 19:20:18 +04:00
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
2005-04-17 02:20:36 +04:00
{
2010-12-20 17:47:19 +03:00
smp_cross_call ( mask , IPI_CALL_FUNC ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 22:48:30 +04:00
void arch_send_call_function_single_ipi ( int cpu )
2008-02-04 19:28:56 +03:00
{
2010-12-20 17:47:19 +03:00
smp_cross_call ( cpumask_of ( cpu ) , IPI_CALL_FUNC_SINGLE ) ;
2008-02-04 19:28:56 +03:00
}
2010-11-15 17:40:29 +03:00
static const char * ipi_types [ NR_IPI ] = {
# define S(x,s) [x - IPI_TIMER] = s
S ( IPI_TIMER , " Timer broadcast interrupts " ) ,
S ( IPI_RESCHEDULE , " Rescheduling interrupts " ) ,
S ( IPI_CALL_FUNC , " Function call interrupts " ) ,
S ( IPI_CALL_FUNC_SINGLE , " Single function call interrupts " ) ,
S ( IPI_CPU_STOP , " CPU stop interrupts " ) ,
} ;
2010-11-15 17:33:51 +03:00
void show_ipi_list ( struct seq_file * p , int prec )
2005-04-17 02:20:36 +04:00
{
2010-11-15 17:40:29 +03:00
unsigned int cpu , i ;
2005-04-17 02:20:36 +04:00
2010-11-15 17:40:29 +03:00
for ( i = 0 ; i < NR_IPI ; i + + ) {
seq_printf ( p , " %*s%u: " , prec - 1 , " IPI " , i ) ;
2005-04-17 02:20:36 +04:00
2010-11-15 17:40:29 +03:00
for_each_present_cpu ( cpu )
seq_printf ( p , " %10u " ,
__get_irq_stat ( cpu , ipi_irqs [ i ] ) ) ;
2005-04-17 02:20:36 +04:00
2010-11-15 17:40:29 +03:00
seq_printf ( p , " %s \n " , ipi_types [ i ] ) ;
}
2005-04-17 02:20:36 +04:00
}
2010-11-15 17:46:46 +03:00
u64 smp_irq_stat_cpu ( unsigned int cpu )
2005-11-08 22:08:05 +03:00
{
2010-11-15 17:46:46 +03:00
u64 sum = 0 ;
int i ;
2005-11-08 22:08:05 +03:00
2010-11-15 17:46:46 +03:00
for ( i = 0 ; i < NR_IPI ; i + + )
sum + = __get_irq_stat ( cpu , ipi_irqs [ i ] ) ;
2005-11-08 22:08:05 +03:00
2010-11-15 17:46:46 +03:00
return sum ;
2005-11-08 22:08:05 +03:00
}
2009-05-17 21:58:34 +04:00
/*
* Timer ( local or broadcast ) support
*/
static DEFINE_PER_CPU ( struct clock_event_device , percpu_clockevent ) ;
2006-10-25 16:59:16 +04:00
static void ipi_timer ( void )
2005-04-17 02:20:36 +04:00
{
2009-05-17 21:58:34 +04:00
struct clock_event_device * evt = & __get_cpu_var ( percpu_clockevent ) ;
evt - > event_handler ( evt ) ;
2005-04-17 02:20:36 +04:00
}
2009-05-17 21:58:34 +04:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static void smp_timer_broadcast ( const struct cpumask * mask )
{
2010-12-20 17:47:19 +03:00
smp_cross_call ( mask , IPI_TIMER ) ;
2009-05-17 21:58:34 +04:00
}
2010-07-26 16:19:43 +04:00
# else
# define smp_timer_broadcast NULL
# endif
2009-05-17 21:58:34 +04:00
static void broadcast_timer_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
}
2011-04-27 04:34:27 +04:00
static void __cpuinit broadcast_timer_setup ( struct clock_event_device * evt )
2009-05-17 21:58:34 +04:00
{
evt - > name = " dummy_timer " ;
evt - > features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY ;
evt - > rating = 400 ;
evt - > mult = 1 ;
evt - > set_mode = broadcast_timer_set_mode ;
clockevents_register_device ( evt ) ;
}
2012-01-10 23:26:45 +04:00
static struct local_timer_ops * lt_ops ;
# ifdef CONFIG_LOCAL_TIMERS
int local_timer_register ( struct local_timer_ops * ops )
{
2012-02-14 15:25:58 +04:00
if ( ! is_smp ( ) | | ! setup_max_cpus )
return - ENXIO ;
2012-01-10 23:26:45 +04:00
if ( lt_ops )
return - EBUSY ;
lt_ops = ops ;
return 0 ;
}
# endif
2012-01-11 03:38:25 +04:00
static void __cpuinit percpu_timer_setup ( void )
2009-05-17 21:58:34 +04:00
{
unsigned int cpu = smp_processor_id ( ) ;
struct clock_event_device * evt = & per_cpu ( percpu_clockevent , cpu ) ;
evt - > cpumask = cpumask_of ( cpu ) ;
2010-07-26 16:19:43 +04:00
evt - > broadcast = smp_timer_broadcast ;
2009-05-17 21:58:34 +04:00
2012-01-11 03:38:25 +04:00
if ( ! lt_ops | | lt_ops - > setup ( evt ) )
2011-02-23 20:53:15 +03:00
broadcast_timer_setup ( evt ) ;
2009-05-17 21:58:34 +04:00
}
2010-12-20 17:28:02 +03:00
# ifdef CONFIG_HOTPLUG_CPU
/*
* The generic clock events code purposely does not stop the local timer
* on CPU_DEAD / CPU_DEAD_FROZEN hotplug events , so we have to do it
* manually here .
*/
static void percpu_timer_stop ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
struct clock_event_device * evt = & per_cpu ( percpu_clockevent , cpu ) ;
2012-01-11 03:38:25 +04:00
if ( lt_ops )
lt_ops - > stop ( evt ) ;
2010-12-20 17:28:02 +03:00
}
# endif
2009-07-03 17:44:46 +04:00
static DEFINE_RAW_SPINLOCK ( stop_lock ) ;
2005-04-17 02:20:36 +04:00
/*
* ipi_cpu_stop - handle IPI from smp_send_stop ( )
*/
static void ipi_cpu_stop ( unsigned int cpu )
{
2010-07-26 16:31:27 +04:00
if ( system_state = = SYSTEM_BOOTING | |
system_state = = SYSTEM_RUNNING ) {
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & stop_lock ) ;
2010-07-26 16:31:27 +04:00
printk ( KERN_CRIT " CPU%u: stopping \n " , cpu ) ;
dump_stack ( ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & stop_lock ) ;
2010-07-26 16:31:27 +04:00
}
2005-04-17 02:20:36 +04:00
2009-05-28 17:16:52 +04:00
set_cpu_online ( cpu , false ) ;
2005-04-17 02:20:36 +04:00
local_fiq_disable ( ) ;
local_irq_disable ( ) ;
while ( 1 )
cpu_relax ( ) ;
}
/*
* Main handler for inter - processor interrupts
*/
2011-01-07 01:32:52 +03:00
asmlinkage void __exception_irq_entry do_IPI ( int ipinr , struct pt_regs * regs )
2011-10-06 18:18:14 +04:00
{
handle_IPI ( ipinr , regs ) ;
}
void handle_IPI ( int ipinr , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
unsigned int cpu = smp_processor_id ( ) ;
2006-10-25 16:59:16 +04:00
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2005-04-17 02:20:36 +04:00
2010-11-15 17:40:29 +03:00
if ( ipinr > = IPI_TIMER & & ipinr < IPI_TIMER + NR_IPI )
__inc_irq_stat ( cpu , ipi_irqs [ ipinr - IPI_TIMER ] ) ;
2005-04-17 02:20:36 +04:00
2010-11-15 12:54:18 +03:00
switch ( ipinr ) {
case IPI_TIMER :
2012-01-19 19:20:58 +04:00
irq_enter ( ) ;
2010-11-15 12:54:18 +03:00
ipi_timer ( ) ;
2012-01-19 19:20:58 +04:00
irq_exit ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2005-04-17 02:20:36 +04:00
2010-11-15 12:54:18 +03:00
case IPI_RESCHEDULE :
2011-04-05 19:23:39 +04:00
scheduler_ipi ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2005-04-17 02:20:36 +04:00
2010-11-15 12:54:18 +03:00
case IPI_CALL_FUNC :
2012-01-19 19:20:58 +04:00
irq_enter ( ) ;
2010-11-15 12:54:18 +03:00
generic_smp_call_function_interrupt ( ) ;
2012-01-19 19:20:58 +04:00
irq_exit ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2008-06-10 22:48:30 +04:00
2010-11-15 12:54:18 +03:00
case IPI_CALL_FUNC_SINGLE :
2012-01-19 19:20:58 +04:00
irq_enter ( ) ;
2010-11-15 12:54:18 +03:00
generic_smp_call_function_single_interrupt ( ) ;
2012-01-19 19:20:58 +04:00
irq_exit ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2005-04-17 02:20:36 +04:00
2010-11-15 12:54:18 +03:00
case IPI_CPU_STOP :
2012-01-19 19:20:58 +04:00
irq_enter ( ) ;
2010-11-15 12:54:18 +03:00
ipi_cpu_stop ( cpu ) ;
2012-01-19 19:20:58 +04:00
irq_exit ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2005-04-17 02:20:36 +04:00
2010-11-15 12:54:18 +03:00
default :
printk ( KERN_CRIT " CPU%u: Unknown IPI message 0x%x \n " ,
cpu , ipinr ) ;
break ;
2005-04-17 02:20:36 +04:00
}
2006-10-25 16:59:16 +04:00
set_irq_regs ( old_regs ) ;
2005-04-17 02:20:36 +04:00
}
void smp_send_reschedule ( int cpu )
{
2010-12-20 17:47:19 +03:00
smp_cross_call ( cpumask_of ( cpu ) , IPI_RESCHEDULE ) ;
2005-04-17 02:20:36 +04:00
}
2012-04-27 15:51:43 +04:00
# ifdef CONFIG_HOTPLUG_CPU
static void smp_kill_cpus ( cpumask_t * mask )
{
unsigned int cpu ;
for_each_cpu ( cpu , mask )
platform_cpu_kill ( cpu ) ;
}
# else
static void smp_kill_cpus ( cpumask_t * mask ) { }
# endif
2005-04-17 02:20:36 +04:00
void smp_send_stop ( void )
{
2010-12-02 12:53:54 +03:00
unsigned long timeout ;
2012-04-27 15:51:43 +04:00
struct cpumask mask ;
2005-04-17 02:20:36 +04:00
2012-04-27 15:51:43 +04:00
cpumask_copy ( & mask , cpu_online_mask ) ;
cpumask_clear_cpu ( smp_processor_id ( ) , & mask ) ;
smp_cross_call ( & mask , IPI_CPU_STOP ) ;
2005-06-28 16:49:16 +04:00
2010-12-02 12:53:54 +03:00
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC ;
while ( num_online_cpus ( ) > 1 & & timeout - - )
udelay ( 1 ) ;
2005-06-28 16:49:16 +04:00
2010-12-02 12:53:54 +03:00
if ( num_online_cpus ( ) > 1 )
pr_warning ( " SMP: failed to stop secondary CPUs \n " ) ;
2012-04-27 15:51:43 +04:00
smp_kill_cpus ( & mask ) ;
2005-06-28 16:49:16 +04:00
}
/*
2005-04-17 02:20:36 +04:00
* not supported here
2005-06-28 16:49:16 +04:00
*/
2007-07-23 15:59:46 +04:00
int setup_profiling_timer ( unsigned int multiplier )
2005-06-28 16:49:16 +04:00
{
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2005-06-28 16:49:16 +04:00
}