2005-04-16 15:20:36 -07:00
/*
* linux / arch / arm / kernel / smp . c
*
* Copyright ( C ) 2002 ARM Limited , All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2006-10-25 13:59:16 +01:00
# include <linux/module.h>
2005-04-16 15:20:36 -07:00
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/spinlock.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/cache.h>
# include <linux/profile.h>
# include <linux/errno.h>
# include <linux/mm.h>
2007-07-30 02:36:13 +04:00
# include <linux/err.h>
2005-04-16 15:20:36 -07:00
# include <linux/cpu.h>
# include <linux/seq_file.h>
2006-10-25 13:59:16 +01:00
# include <linux/irq.h>
2009-05-17 18:58:34 +01:00
# include <linux/percpu.h>
# include <linux/clockchips.h>
2010-11-30 11:07:35 +00:00
# include <linux/completion.h>
2012-09-05 01:08:59 +02:00
# include <linux/cpufreq.h>
2005-04-16 15:20:36 -07:00
2011-07-26 16:09:06 -07:00
# include <linux/atomic.h>
2011-09-08 09:06:10 +01:00
# include <asm/smp.h>
2005-04-16 15:20:36 -07:00
# include <asm/cacheflush.h>
# include <asm/cpu.h>
2009-06-11 15:35:00 +01:00
# include <asm/cputype.h>
2011-10-08 11:20:42 +01:00
# include <asm/exception.h>
2011-09-30 11:43:29 +01:00
# include <asm/idmap.h>
2011-08-08 13:21:59 +01:00
# include <asm/topology.h>
2005-06-18 09:33:31 +01:00
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/pgalloc.h>
2005-04-16 15:20:36 -07:00
# include <asm/processor.h>
2010-10-01 15:38:24 +01:00
# include <asm/sections.h>
2005-04-16 15:20:36 -07:00
# include <asm/tlbflush.h>
# include <asm/ptrace.h>
2009-05-17 18:58:34 +01:00
# include <asm/localtimer.h>
2011-08-23 22:19:29 +01:00
# include <asm/smp_plat.h>
2012-02-17 16:54:28 +00:00
# include <asm/virt.h>
2011-09-08 09:06:10 +01:00
# include <asm/mach/arch.h>
2005-04-16 15:20:36 -07:00
2005-06-18 09:33:31 +01:00
/*
* as from 2.5 , kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
* where to place its SVC stack
*/
struct secondary_data secondary_data ;
2012-06-12 11:16:27 +01:00
/*
* control for which core is the next to come out of the secondary
* boot " holding pen "
*/
volatile int __cpuinitdata pen_release = - 1 ;
2005-04-16 15:20:36 -07:00
enum ipi_msg_type {
2012-09-19 08:16:07 +01:00
IPI_WAKEUP ,
IPI_TIMER ,
2005-04-16 15:20:36 -07:00
IPI_RESCHEDULE ,
IPI_CALL_FUNC ,
2008-06-10 20:48:30 +02:00
IPI_CALL_FUNC_SINGLE ,
2005-04-16 15:20:36 -07:00
IPI_CPU_STOP ,
} ;
2012-01-18 15:59:45 +00:00
static DECLARE_COMPLETION ( cpu_running ) ;
2011-09-08 09:06:10 +01:00
static struct smp_operations smp_ops ;
void __init smp_set_ops ( struct smp_operations * ops )
{
if ( ops )
smp_ops = * ops ;
} ;
2012-04-20 13:05:50 +00:00
int __cpuinit __cpu_up ( unsigned int cpu , struct task_struct * idle )
2005-04-16 15:20:36 -07:00
{
int ret ;
2005-06-18 09:33:31 +01:00
/*
* We need to tell the secondary core where to find
* its stack and the page tables .
*/
2006-01-12 01:05:58 -08:00
secondary_data . stack = task_stack_page ( idle ) + THREAD_START_SP ;
2011-11-23 12:26:25 +00:00
secondary_data . pgdir = virt_to_phys ( idmap_pgd ) ;
2011-05-26 11:22:44 +01:00
secondary_data . swapper_pg_dir = virt_to_phys ( swapper_pg_dir ) ;
2010-02-12 14:36:24 +00:00
__cpuc_flush_dcache_area ( & secondary_data , sizeof ( secondary_data ) ) ;
outer_clean_range ( __pa ( & secondary_data ) , __pa ( & secondary_data + 1 ) ) ;
2005-06-18 09:33:31 +01:00
2005-04-16 15:20:36 -07:00
/*
* Now bring the CPU into our world .
*/
ret = boot_secondary ( cpu , idle ) ;
2005-06-18 09:33:31 +01:00
if ( ret = = 0 ) {
/*
* CPU was successfully started , wait for it
* to come online or time out .
*/
2012-01-18 15:59:45 +00:00
wait_for_completion_timeout ( & cpu_running ,
msecs_to_jiffies ( 1000 ) ) ;
2005-06-18 09:33:31 +01:00
2010-12-18 12:34:39 +00:00
if ( ! cpu_online ( cpu ) ) {
pr_crit ( " CPU%u: failed to come online \n " , cpu ) ;
2005-06-18 09:33:31 +01:00
ret = - EIO ;
2010-12-18 12:34:39 +00:00
}
} else {
pr_err ( " CPU%u: failed to boot: %d \n " , cpu , ret ) ;
2005-06-18 09:33:31 +01:00
}
2005-11-08 10:44:46 +00:00
secondary_data . stack = NULL ;
2005-06-18 09:33:31 +01:00
secondary_data . pgdir = 0 ;
2005-04-16 15:20:36 -07:00
return ret ;
}
2011-09-08 09:06:10 +01:00
/* platform specific SMP operations */
2011-09-27 14:48:23 +01:00
void __init smp_init_cpus ( void )
2011-09-08 09:06:10 +01:00
{
if ( smp_ops . smp_init_cpus )
smp_ops . smp_init_cpus ( ) ;
}
2011-09-27 14:48:23 +01:00
static void __init platform_smp_prepare_cpus ( unsigned int max_cpus )
2011-09-08 09:06:10 +01:00
{
if ( smp_ops . smp_prepare_cpus )
smp_ops . smp_prepare_cpus ( max_cpus ) ;
}
2011-09-27 14:48:23 +01:00
static void __cpuinit platform_secondary_init ( unsigned int cpu )
2011-09-08 09:06:10 +01:00
{
if ( smp_ops . smp_secondary_init )
smp_ops . smp_secondary_init ( cpu ) ;
}
2011-09-27 14:48:23 +01:00
int __cpuinit boot_secondary ( unsigned int cpu , struct task_struct * idle )
2011-09-08 09:06:10 +01:00
{
if ( smp_ops . smp_boot_secondary )
return smp_ops . smp_boot_secondary ( cpu , idle ) ;
return - ENOSYS ;
}
2005-11-02 22:24:33 +00:00
# ifdef CONFIG_HOTPLUG_CPU
2010-12-20 14:28:02 +00:00
static void percpu_timer_stop ( void ) ;
2011-09-27 14:48:23 +01:00
static int platform_cpu_kill ( unsigned int cpu )
2011-09-08 09:06:10 +01:00
{
if ( smp_ops . cpu_kill )
return smp_ops . cpu_kill ( cpu ) ;
return 1 ;
}
2011-09-27 14:48:23 +01:00
static void platform_cpu_die ( unsigned int cpu )
2011-09-08 09:06:10 +01:00
{
if ( smp_ops . cpu_die )
smp_ops . cpu_die ( cpu ) ;
}
2011-09-27 14:48:23 +01:00
static int platform_cpu_disable ( unsigned int cpu )
2011-09-08 09:06:10 +01:00
{
if ( smp_ops . cpu_disable )
return smp_ops . cpu_disable ( cpu ) ;
/*
* By default , allow disabling all CPUs except the first one ,
* since this is special on a lot of platforms , e . g . because
* of clock tick interrupts .
*/
return cpu = = 0 ? - EPERM : 0 ;
}
2005-11-02 22:24:33 +00:00
/*
* __cpu_disable runs on the processor to be shutdown .
*/
2011-09-27 14:48:23 +01:00
int __cpuinit __cpu_disable ( void )
2005-11-02 22:24:33 +00:00
{
unsigned int cpu = smp_processor_id ( ) ;
int ret ;
2010-05-15 10:18:05 +01:00
ret = platform_cpu_disable ( cpu ) ;
2005-11-02 22:24:33 +00:00
if ( ret )
return ret ;
/*
* Take this CPU offline . Once we clear this , we can ' t return ,
* and we must not schedule until we ' re ready to give up the cpu .
*/
2009-05-28 14:16:52 +01:00
set_cpu_online ( cpu , false ) ;
2005-11-02 22:24:33 +00:00
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs ( ) ;
2005-11-08 19:08:05 +00:00
/*
* Stop the local timer for this CPU .
*/
2010-12-20 14:28:02 +00:00
percpu_timer_stop ( ) ;
2005-11-08 19:08:05 +00:00
2005-11-02 22:24:33 +00:00
/*
* Flush user cache and TLB mappings , and then remove this CPU
* from the vm mask set of all processes .
2012-09-07 11:09:15 +05:30
*
* Caches are flushed to the Level of Unification Inner Shareable
* to write - back dirty lines to unified caches shared by all CPUs .
2005-11-02 22:24:33 +00:00
*/
2012-09-07 11:09:15 +05:30
flush_cache_louis ( ) ;
2005-11-02 22:24:33 +00:00
local_flush_tlb_all ( ) ;
2012-05-31 16:26:22 -07:00
clear_tasks_mm_cpumask ( cpu ) ;
2005-11-02 22:24:33 +00:00
return 0 ;
}
2010-11-30 11:07:35 +00:00
static DECLARE_COMPLETION ( cpu_died ) ;
2005-11-02 22:24:33 +00:00
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed , or it is timed out .
*/
2011-09-27 14:48:23 +01:00
void __cpuinit __cpu_die ( unsigned int cpu )
2005-11-02 22:24:33 +00:00
{
2010-11-30 11:07:35 +00:00
if ( ! wait_for_completion_timeout ( & cpu_died , msecs_to_jiffies ( 5000 ) ) ) {
pr_err ( " CPU%u: cpu didn't die \n " , cpu ) ;
return ;
}
printk ( KERN_NOTICE " CPU%u: shutdown \n " , cpu ) ;
2005-11-02 22:24:33 +00:00
if ( ! platform_cpu_kill ( cpu ) )
printk ( " CPU%u: unable to kill \n " , cpu ) ;
}
/*
* Called from the idle thread for the CPU which has been shutdown .
*
* Note that we disable IRQs here , but do not re - enable them
* before returning to the caller . This is also the behaviour
* of the other hotplug - cpu capable cores , so presumably coming
* out of idle fixes this .
*/
2009-09-27 21:04:48 +01:00
void __ref cpu_die ( void )
2005-11-02 22:24:33 +00:00
{
unsigned int cpu = smp_processor_id ( ) ;
idle_task_exit ( ) ;
2010-11-30 12:21:30 +00:00
local_irq_disable ( ) ;
mb ( ) ;
2010-11-30 11:07:35 +00:00
/* Tell __cpu_die() that this CPU is now safe to dispose of */
2012-07-06 22:03:42 +01:00
RCU_NONIDLE ( complete ( & cpu_died ) ) ;
2010-11-30 11:07:35 +00:00
2005-11-02 22:24:33 +00:00
/*
* actual CPU shutdown procedure is at least platform ( if not
2010-11-30 11:07:35 +00:00
* CPU ) specific .
2005-11-02 22:24:33 +00:00
*/
platform_cpu_die ( cpu ) ;
/*
* Do not return to the idle loop - jump back to the secondary
* cpu initialisation . There ' s some initialisation which needs
* to be repeated to undo the effects of taking the CPU offline .
*/
__asm__ ( " mov sp, %0 \n "
2010-12-20 16:58:19 +00:00
" mov fp, #0 \n "
2005-11-02 22:24:33 +00:00
" b secondary_start_kernel "
:
2006-01-12 01:05:58 -08:00
: " r " ( task_stack_page ( current ) + THREAD_SIZE - 8 ) ) ;
2005-11-02 22:24:33 +00:00
}
# endif /* CONFIG_HOTPLUG_CPU */
2010-12-03 11:09:48 +00:00
/*
* Called by both boot and secondaries to move global data into
* per - processor storage .
*/
static void __cpuinit smp_store_cpu_info ( unsigned int cpuid )
{
struct cpuinfo_arm * cpu_info = & per_cpu ( cpu_data , cpuid ) ;
cpu_info - > loops_per_jiffy = loops_per_jiffy ;
2012-11-06 11:57:43 +00:00
cpu_info - > cpuid = read_cpuid_id ( ) ;
2011-08-08 13:21:59 +01:00
store_cpu_topology ( cpuid ) ;
2010-12-03 11:09:48 +00:00
}
2012-01-10 23:38:25 +00:00
static void percpu_timer_setup ( void ) ;
2005-06-18 09:33:31 +01:00
/*
* This is the secondary CPU boot entry . We ' re using this CPUs
* idle thread stack , but a set of temporary page tables .
*/
2005-07-17 21:35:41 +01:00
asmlinkage void __cpuinit secondary_start_kernel ( void )
2005-06-18 09:33:31 +01:00
{
struct mm_struct * mm = & init_mm ;
2012-10-19 17:53:01 +01:00
unsigned int cpu ;
/*
* The identity mapping is uncached ( strongly ordered ) , so
* switch away from it before attempting any exclusive accesses .
*/
cpu_switch_mm ( mm - > pgd , mm ) ;
enter_lazy_tlb ( mm , current ) ;
local_flush_tlb_all ( ) ;
2005-06-18 09:33:31 +01:00
/*
* All kernel threads share the same mm context ; grab a
* reference and switch to it .
*/
2012-10-19 17:53:01 +01:00
cpu = smp_processor_id ( ) ;
2005-06-18 09:33:31 +01:00
atomic_inc ( & mm - > mm_count ) ;
current - > active_mm = mm ;
2009-09-24 09:34:49 -06:00
cpumask_set_cpu ( cpu , mm_cpumask ( mm ) ) ;
2005-06-18 09:33:31 +01:00
2012-11-29 20:39:54 +01:00
cpu_init ( ) ;
2012-05-05 20:58:13 +01:00
printk ( " CPU%u: Booted secondary processor \n " , cpu ) ;
2005-11-08 21:39:01 -08:00
preempt_disable ( ) ;
2010-12-03 15:00:49 +00:00
trace_hardirqs_off ( ) ;
2005-06-18 09:33:31 +01:00
/*
* Give the platform a chance to do its own initialisation .
*/
platform_secondary_init ( cpu ) ;
2008-09-07 16:57:22 +02:00
notify_cpu_starting ( cpu ) ;
2008-02-04 17:30:57 +01:00
2005-06-18 09:33:31 +01:00
calibrate_delay ( ) ;
smp_store_cpu_info ( cpu ) ;
/*
2011-06-20 16:46:01 +01:00
* OK , now it ' s safe to let the boot CPU continue . Wait for
* the CPU migration code to notice that the CPU is online
2012-01-18 15:59:45 +00:00
* before we continue - which happens after __cpu_up returns .
2005-06-18 09:33:31 +01:00
*/
2009-05-28 14:16:52 +01:00
set_cpu_online ( cpu , true ) ;
2012-01-18 15:59:45 +00:00
complete ( & cpu_running ) ;
2011-10-14 12:44:41 +01:00
/*
* Setup the percpu timer for this CPU .
*/
percpu_timer_setup ( ) ;
local_irq_enable ( ) ;
local_fiq_enable ( ) ;
2005-06-18 09:33:31 +01:00
/*
* OK , it ' s off to the idle thread for us
*/
cpu_idle ( ) ;
}
2005-04-16 15:20:36 -07:00
void __init smp_cpus_done ( unsigned int max_cpus )
{
int cpu ;
unsigned long bogosum = 0 ;
for_each_online_cpu ( cpu )
bogosum + = per_cpu ( cpu_data , cpu ) . loops_per_jiffy ;
printk ( KERN_INFO " SMP: Total of %d processors activated "
" (%lu.%02lu BogoMIPS). \n " ,
num_online_cpus ( ) ,
bogosum / ( 500000 / HZ ) ,
( bogosum / ( 5000 / HZ ) ) % 100 ) ;
2012-02-17 16:54:28 +00:00
hyp_mode_check ( ) ;
2005-04-16 15:20:36 -07:00
}
void __init smp_prepare_boot_cpu ( void )
{
2012-11-29 20:39:54 +01:00
set_my_cpu_offset ( per_cpu_offset ( smp_processor_id ( ) ) ) ;
2005-04-16 15:20:36 -07:00
}
2010-12-03 11:09:48 +00:00
void __init smp_prepare_cpus ( unsigned int max_cpus )
2005-04-16 15:20:36 -07:00
{
2010-12-03 11:09:48 +00:00
unsigned int ncores = num_possible_cpus ( ) ;
2005-04-16 15:20:36 -07:00
2011-08-08 13:21:59 +01:00
init_cpu_topology ( ) ;
2010-12-03 11:09:48 +00:00
smp_store_cpu_info ( smp_processor_id ( ) ) ;
2005-04-16 15:20:36 -07:00
/*
2010-12-03 11:09:48 +00:00
* are we trying to boot more cores than exist ?
2005-04-16 15:20:36 -07:00
*/
2010-12-03 11:09:48 +00:00
if ( max_cpus > ncores )
max_cpus = ncores ;
2011-07-07 01:56:51 +01:00
if ( ncores > 1 & & max_cpus ) {
2010-12-03 11:09:48 +00:00
/*
* Enable the local timer or broadcast device for the
* boot CPU , but only if we have more than one CPU .
*/
percpu_timer_setup ( ) ;
2005-04-16 15:20:36 -07:00
2011-07-07 01:56:51 +01:00
/*
* Initialise the present map , which describes the set of CPUs
* actually populated at the present time . A platform should
* re - initialize the map in platform_smp_prepare_cpus ( ) if
* present ! = possible ( e . g . physical hotplug ) .
*/
2012-03-29 15:38:30 +10:30
init_cpu_present ( cpu_possible_mask ) ;
2011-07-07 01:56:51 +01:00
2010-12-03 11:09:48 +00:00
/*
* Initialise the SCU if there are more than one CPU
* and let them know where to start .
*/
platform_smp_prepare_cpus ( max_cpus ) ;
}
2005-04-16 15:20:36 -07:00
}
2011-04-03 13:01:30 +01:00
static void ( * smp_cross_call ) ( const struct cpumask * , unsigned int ) ;
void __init set_smp_cross_call ( void ( * fn ) ( const struct cpumask * , unsigned int ) )
{
smp_cross_call = fn ;
}
2009-05-17 16:20:18 +01:00
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
2005-04-16 15:20:36 -07:00
{
2010-12-20 14:47:19 +00:00
smp_cross_call ( mask , IPI_CALL_FUNC ) ;
2005-04-16 15:20:36 -07:00
}
2012-11-06 03:48:40 +01:00
void arch_send_wakeup_ipi_mask ( const struct cpumask * mask )
{
smp_cross_call ( mask , IPI_WAKEUP ) ;
}
2008-06-10 20:48:30 +02:00
void arch_send_call_function_single_ipi ( int cpu )
2008-02-04 17:28:56 +01:00
{
2010-12-20 14:47:19 +00:00
smp_cross_call ( cpumask_of ( cpu ) , IPI_CALL_FUNC_SINGLE ) ;
2008-02-04 17:28:56 +01:00
}
2010-11-15 14:40:29 +00:00
static const char * ipi_types [ NR_IPI ] = {
2012-09-19 08:16:07 +01:00
# define S(x,s) [x] = s
S ( IPI_WAKEUP , " CPU wakeup interrupts " ) ,
2010-11-15 14:40:29 +00:00
S ( IPI_TIMER , " Timer broadcast interrupts " ) ,
S ( IPI_RESCHEDULE , " Rescheduling interrupts " ) ,
S ( IPI_CALL_FUNC , " Function call interrupts " ) ,
S ( IPI_CALL_FUNC_SINGLE , " Single function call interrupts " ) ,
S ( IPI_CPU_STOP , " CPU stop interrupts " ) ,
} ;
2010-11-15 14:33:51 +00:00
void show_ipi_list ( struct seq_file * p , int prec )
2005-04-16 15:20:36 -07:00
{
2010-11-15 14:40:29 +00:00
unsigned int cpu , i ;
2005-04-16 15:20:36 -07:00
2010-11-15 14:40:29 +00:00
for ( i = 0 ; i < NR_IPI ; i + + ) {
seq_printf ( p , " %*s%u: " , prec - 1 , " IPI " , i ) ;
2005-04-16 15:20:36 -07:00
2012-12-03 21:13:03 +01:00
for_each_online_cpu ( cpu )
2010-11-15 14:40:29 +00:00
seq_printf ( p , " %10u " ,
__get_irq_stat ( cpu , ipi_irqs [ i ] ) ) ;
2005-04-16 15:20:36 -07:00
2010-11-15 14:40:29 +00:00
seq_printf ( p , " %s \n " , ipi_types [ i ] ) ;
}
2005-04-16 15:20:36 -07:00
}
2010-11-15 14:46:46 +00:00
u64 smp_irq_stat_cpu ( unsigned int cpu )
2005-11-08 19:08:05 +00:00
{
2010-11-15 14:46:46 +00:00
u64 sum = 0 ;
int i ;
2005-11-08 19:08:05 +00:00
2010-11-15 14:46:46 +00:00
for ( i = 0 ; i < NR_IPI ; i + + )
sum + = __get_irq_stat ( cpu , ipi_irqs [ i ] ) ;
2005-11-08 19:08:05 +00:00
2010-11-15 14:46:46 +00:00
return sum ;
2005-11-08 19:08:05 +00:00
}
2009-05-17 18:58:34 +01:00
/*
* Timer ( local or broadcast ) support
*/
static DEFINE_PER_CPU ( struct clock_event_device , percpu_clockevent ) ;
2006-10-25 13:59:16 +01:00
static void ipi_timer ( void )
2005-04-16 15:20:36 -07:00
{
2009-05-17 18:58:34 +01:00
struct clock_event_device * evt = & __get_cpu_var ( percpu_clockevent ) ;
evt - > event_handler ( evt ) ;
2005-04-16 15:20:36 -07:00
}
2009-05-17 18:58:34 +01:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static void smp_timer_broadcast ( const struct cpumask * mask )
{
2010-12-20 14:47:19 +00:00
smp_cross_call ( mask , IPI_TIMER ) ;
2009-05-17 18:58:34 +01:00
}
2010-07-26 13:19:43 +01:00
# else
# define smp_timer_broadcast NULL
# endif
2009-05-17 18:58:34 +01:00
static void broadcast_timer_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
}
2011-04-27 01:34:27 +01:00
static void __cpuinit broadcast_timer_setup ( struct clock_event_device * evt )
2009-05-17 18:58:34 +01:00
{
evt - > name = " dummy_timer " ;
evt - > features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY ;
evt - > rating = 400 ;
evt - > mult = 1 ;
evt - > set_mode = broadcast_timer_set_mode ;
clockevents_register_device ( evt ) ;
}
2012-01-10 19:26:45 +00:00
static struct local_timer_ops * lt_ops ;
# ifdef CONFIG_LOCAL_TIMERS
int local_timer_register ( struct local_timer_ops * ops )
{
2012-02-14 11:25:58 +00:00
if ( ! is_smp ( ) | | ! setup_max_cpus )
return - ENXIO ;
2012-01-10 19:26:45 +00:00
if ( lt_ops )
return - EBUSY ;
lt_ops = ops ;
return 0 ;
}
# endif
2012-01-10 23:38:25 +00:00
static void __cpuinit percpu_timer_setup ( void )
2009-05-17 18:58:34 +01:00
{
unsigned int cpu = smp_processor_id ( ) ;
struct clock_event_device * evt = & per_cpu ( percpu_clockevent , cpu ) ;
evt - > cpumask = cpumask_of ( cpu ) ;
2010-07-26 13:19:43 +01:00
evt - > broadcast = smp_timer_broadcast ;
2009-05-17 18:58:34 +01:00
2012-01-10 23:38:25 +00:00
if ( ! lt_ops | | lt_ops - > setup ( evt ) )
2011-02-23 18:53:15 +01:00
broadcast_timer_setup ( evt ) ;
2009-05-17 18:58:34 +01:00
}
2010-12-20 14:28:02 +00:00
# ifdef CONFIG_HOTPLUG_CPU
/*
* The generic clock events code purposely does not stop the local timer
* on CPU_DEAD / CPU_DEAD_FROZEN hotplug events , so we have to do it
* manually here .
*/
static void percpu_timer_stop ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
struct clock_event_device * evt = & per_cpu ( percpu_clockevent , cpu ) ;
2012-01-10 23:38:25 +00:00
if ( lt_ops )
lt_ops - > stop ( evt ) ;
2010-12-20 14:28:02 +00:00
}
# endif
2009-07-03 08:44:46 -05:00
static DEFINE_RAW_SPINLOCK ( stop_lock ) ;
2005-04-16 15:20:36 -07:00
/*
* ipi_cpu_stop - handle IPI from smp_send_stop ( )
*/
static void ipi_cpu_stop ( unsigned int cpu )
{
2010-07-26 13:31:27 +01:00
if ( system_state = = SYSTEM_BOOTING | |
system_state = = SYSTEM_RUNNING ) {
2009-07-03 08:44:46 -05:00
raw_spin_lock ( & stop_lock ) ;
2010-07-26 13:31:27 +01:00
printk ( KERN_CRIT " CPU%u: stopping \n " , cpu ) ;
dump_stack ( ) ;
2009-07-03 08:44:46 -05:00
raw_spin_unlock ( & stop_lock ) ;
2010-07-26 13:31:27 +01:00
}
2005-04-16 15:20:36 -07:00
2009-05-28 14:16:52 +01:00
set_cpu_online ( cpu , false ) ;
2005-04-16 15:20:36 -07:00
local_fiq_disable ( ) ;
local_irq_disable ( ) ;
while ( 1 )
cpu_relax ( ) ;
}
/*
* Main handler for inter - processor interrupts
*/
2011-01-06 22:32:52 +00:00
asmlinkage void __exception_irq_entry do_IPI ( int ipinr , struct pt_regs * regs )
2011-10-06 15:18:14 +01:00
{
handle_IPI ( ipinr , regs ) ;
}
void handle_IPI ( int ipinr , struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
unsigned int cpu = smp_processor_id ( ) ;
2006-10-25 13:59:16 +01:00
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2005-04-16 15:20:36 -07:00
2012-09-19 08:16:07 +01:00
if ( ipinr < NR_IPI )
__inc_irq_stat ( cpu , ipi_irqs [ ipinr ] ) ;
2005-04-16 15:20:36 -07:00
2010-11-15 09:54:18 +00:00
switch ( ipinr ) {
2012-09-19 08:16:07 +01:00
case IPI_WAKEUP :
break ;
2010-11-15 09:54:18 +00:00
case IPI_TIMER :
2012-01-19 15:20:58 +00:00
irq_enter ( ) ;
2010-11-15 09:54:18 +00:00
ipi_timer ( ) ;
2012-01-19 15:20:58 +00:00
irq_exit ( ) ;
2010-11-15 09:54:18 +00:00
break ;
2005-04-16 15:20:36 -07:00
2010-11-15 09:54:18 +00:00
case IPI_RESCHEDULE :
2011-04-05 17:23:39 +02:00
scheduler_ipi ( ) ;
2010-11-15 09:54:18 +00:00
break ;
2005-04-16 15:20:36 -07:00
2010-11-15 09:54:18 +00:00
case IPI_CALL_FUNC :
2012-01-19 15:20:58 +00:00
irq_enter ( ) ;
2010-11-15 09:54:18 +00:00
generic_smp_call_function_interrupt ( ) ;
2012-01-19 15:20:58 +00:00
irq_exit ( ) ;
2010-11-15 09:54:18 +00:00
break ;
2008-06-10 20:48:30 +02:00
2010-11-15 09:54:18 +00:00
case IPI_CALL_FUNC_SINGLE :
2012-01-19 15:20:58 +00:00
irq_enter ( ) ;
2010-11-15 09:54:18 +00:00
generic_smp_call_function_single_interrupt ( ) ;
2012-01-19 15:20:58 +00:00
irq_exit ( ) ;
2010-11-15 09:54:18 +00:00
break ;
2005-04-16 15:20:36 -07:00
2010-11-15 09:54:18 +00:00
case IPI_CPU_STOP :
2012-01-19 15:20:58 +00:00
irq_enter ( ) ;
2010-11-15 09:54:18 +00:00
ipi_cpu_stop ( cpu ) ;
2012-01-19 15:20:58 +00:00
irq_exit ( ) ;
2010-11-15 09:54:18 +00:00
break ;
2005-04-16 15:20:36 -07:00
2010-11-15 09:54:18 +00:00
default :
printk ( KERN_CRIT " CPU%u: Unknown IPI message 0x%x \n " ,
cpu , ipinr ) ;
break ;
2005-04-16 15:20:36 -07:00
}
2006-10-25 13:59:16 +01:00
set_irq_regs ( old_regs ) ;
2005-04-16 15:20:36 -07:00
}
void smp_send_reschedule ( int cpu )
{
2010-12-20 14:47:19 +00:00
smp_cross_call ( cpumask_of ( cpu ) , IPI_RESCHEDULE ) ;
2005-04-16 15:20:36 -07:00
}
2012-04-27 12:51:43 +01:00
# ifdef CONFIG_HOTPLUG_CPU
static void smp_kill_cpus ( cpumask_t * mask )
{
unsigned int cpu ;
for_each_cpu ( cpu , mask )
platform_cpu_kill ( cpu ) ;
}
# else
static void smp_kill_cpus ( cpumask_t * mask ) { }
# endif
2005-04-16 15:20:36 -07:00
void smp_send_stop ( void )
{
2010-12-02 09:53:54 +00:00
unsigned long timeout ;
2012-04-27 12:51:43 +01:00
struct cpumask mask ;
2005-04-16 15:20:36 -07:00
2012-04-27 12:51:43 +01:00
cpumask_copy ( & mask , cpu_online_mask ) ;
cpumask_clear_cpu ( smp_processor_id ( ) , & mask ) ;
2012-07-28 15:19:55 +01:00
if ( ! cpumask_empty ( & mask ) )
smp_cross_call ( & mask , IPI_CPU_STOP ) ;
2005-06-28 13:49:16 +01:00
2010-12-02 09:53:54 +00:00
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC ;
while ( num_online_cpus ( ) > 1 & & timeout - - )
udelay ( 1 ) ;
2005-06-28 13:49:16 +01:00
2010-12-02 09:53:54 +00:00
if ( num_online_cpus ( ) > 1 )
pr_warning ( " SMP: failed to stop secondary CPUs \n " ) ;
2012-04-27 12:51:43 +01:00
smp_kill_cpus ( & mask ) ;
2005-06-28 13:49:16 +01:00
}
/*
2005-04-16 15:20:36 -07:00
* not supported here
2005-06-28 13:49:16 +01:00
*/
2007-07-23 12:59:46 +01:00
int setup_profiling_timer ( unsigned int multiplier )
2005-06-28 13:49:16 +01:00
{
2005-04-16 15:20:36 -07:00
return - EINVAL ;
2005-06-28 13:49:16 +01:00
}
2012-09-05 01:08:59 +02:00
# ifdef CONFIG_CPU_FREQ
static DEFINE_PER_CPU ( unsigned long , l_p_j_ref ) ;
static DEFINE_PER_CPU ( unsigned long , l_p_j_ref_freq ) ;
static unsigned long global_l_p_j_ref ;
static unsigned long global_l_p_j_ref_freq ;
static int cpufreq_callback ( struct notifier_block * nb ,
unsigned long val , void * data )
{
struct cpufreq_freqs * freq = data ;
int cpu = freq - > cpu ;
if ( freq - > flags & CPUFREQ_CONST_LOOPS )
return NOTIFY_OK ;
if ( ! per_cpu ( l_p_j_ref , cpu ) ) {
per_cpu ( l_p_j_ref , cpu ) =
per_cpu ( cpu_data , cpu ) . loops_per_jiffy ;
per_cpu ( l_p_j_ref_freq , cpu ) = freq - > old ;
if ( ! global_l_p_j_ref ) {
global_l_p_j_ref = loops_per_jiffy ;
global_l_p_j_ref_freq = freq - > old ;
}
}
if ( ( val = = CPUFREQ_PRECHANGE & & freq - > old < freq - > new ) | |
( val = = CPUFREQ_POSTCHANGE & & freq - > old > freq - > new ) | |
( val = = CPUFREQ_RESUMECHANGE | | val = = CPUFREQ_SUSPENDCHANGE ) ) {
loops_per_jiffy = cpufreq_scale ( global_l_p_j_ref ,
global_l_p_j_ref_freq ,
freq - > new ) ;
per_cpu ( cpu_data , cpu ) . loops_per_jiffy =
cpufreq_scale ( per_cpu ( l_p_j_ref , cpu ) ,
per_cpu ( l_p_j_ref_freq , cpu ) ,
freq - > new ) ;
}
return NOTIFY_OK ;
}
static struct notifier_block cpufreq_notifier = {
. notifier_call = cpufreq_callback ,
} ;
static int __init register_cpufreq_notifier ( void )
{
return cpufreq_register_notifier ( & cpufreq_notifier ,
CPUFREQ_TRANSITION_NOTIFIER ) ;
}
core_initcall ( register_cpufreq_notifier ) ;
# endif