2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / kernel / smp . c
*
* Copyright ( C ) 2002 ARM Limited , All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2006-10-25 16:59:16 +04:00
# include <linux/module.h>
2005-04-17 02:20:36 +04:00
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/spinlock.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/cache.h>
# include <linux/profile.h>
# include <linux/errno.h>
# include <linux/mm.h>
2007-07-30 02:36:13 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/cpu.h>
# include <linux/seq_file.h>
2006-10-25 16:59:16 +04:00
# include <linux/irq.h>
2009-05-17 21:58:34 +04:00
# include <linux/percpu.h>
# include <linux/clockchips.h>
2010-11-30 14:07:35 +03:00
# include <linux/completion.h>
2012-09-05 03:08:59 +04:00
# include <linux/cpufreq.h>
2013-10-29 23:32:56 +04:00
# include <linux/irq_work.h>
2005-04-17 02:20:36 +04:00
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2011-09-08 12:06:10 +04:00
# include <asm/smp.h>
2005-04-17 02:20:36 +04:00
# include <asm/cacheflush.h>
# include <asm/cpu.h>
2009-06-11 18:35:00 +04:00
# include <asm/cputype.h>
2011-10-08 14:20:42 +04:00
# include <asm/exception.h>
2011-09-30 14:43:29 +04:00
# include <asm/idmap.h>
2011-08-08 16:21:59 +04:00
# include <asm/topology.h>
2005-06-18 12:33:31 +04:00
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/pgalloc.h>
2005-04-17 02:20:36 +04:00
# include <asm/processor.h>
2010-10-01 18:38:24 +04:00
# include <asm/sections.h>
2005-04-17 02:20:36 +04:00
# include <asm/tlbflush.h>
# include <asm/ptrace.h>
2011-08-24 01:19:29 +04:00
# include <asm/smp_plat.h>
2012-02-17 20:54:28 +04:00
# include <asm/virt.h>
2011-09-08 12:06:10 +04:00
# include <asm/mach/arch.h>
2013-02-22 22:51:30 +04:00
# include <asm/mpu.h>
2005-04-17 02:20:36 +04:00
2014-07-26 00:05:31 +04:00
# define CREATE_TRACE_POINTS
# include <trace/events/ipi.h>
2005-06-18 12:33:31 +04:00
/*
* as from 2.5 , kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
* where to place its SVC stack
*/
struct secondary_data secondary_data ;
2012-06-12 14:16:27 +04:00
/*
* control for which core is the next to come out of the secondary
* boot " holding pen "
*/
2013-06-17 23:43:14 +04:00
volatile int pen_release = - 1 ;
2012-06-12 14:16:27 +04:00
2005-04-17 02:20:36 +04:00
enum ipi_msg_type {
2012-09-19 11:16:07 +04:00
IPI_WAKEUP ,
IPI_TIMER ,
2005-04-17 02:20:36 +04:00
IPI_RESCHEDULE ,
IPI_CALL_FUNC ,
2008-06-10 22:48:30 +04:00
IPI_CALL_FUNC_SINGLE ,
2005-04-17 02:20:36 +04:00
IPI_CPU_STOP ,
2013-10-29 23:32:56 +04:00
IPI_IRQ_WORK ,
2012-11-28 06:54:41 +04:00
IPI_COMPLETION ,
2005-04-17 02:20:36 +04:00
} ;
2012-01-18 19:59:45 +04:00
static DECLARE_COMPLETION ( cpu_running ) ;
2011-09-08 12:06:10 +04:00
static struct smp_operations smp_ops ;
void __init smp_set_ops ( struct smp_operations * ops )
{
if ( ops )
smp_ops = * ops ;
} ;
2012-07-21 23:55:04 +04:00
static unsigned long get_arch_pgd ( pgd_t * pgd )
{
2013-07-31 20:44:42 +04:00
phys_addr_t pgdir = virt_to_idmap ( pgd ) ;
2012-07-21 23:55:04 +04:00
BUG_ON ( pgdir & ARCH_PGD_MASK ) ;
return pgdir > > ARCH_PGD_SHIFT ;
}
2013-06-17 23:43:14 +04:00
int __cpu_up ( unsigned int cpu , struct task_struct * idle )
2005-04-17 02:20:36 +04:00
{
int ret ;
2014-08-20 23:49:54 +04:00
if ( ! smp_ops . smp_boot_secondary )
return - ENOSYS ;
2005-06-18 12:33:31 +04:00
/*
* We need to tell the secondary core where to find
* its stack and the page tables .
*/
2006-01-12 12:05:58 +03:00
secondary_data . stack = task_stack_page ( idle ) + THREAD_START_SP ;
2013-02-22 22:51:30 +04:00
# ifdef CONFIG_ARM_MPU
secondary_data . mpu_rgn_szr = mpu_rgn_info . rgns [ MPU_RAM_REGION ] . drsr ;
# endif
2012-02-28 17:02:59 +04:00
# ifdef CONFIG_MMU
2012-07-21 23:55:04 +04:00
secondary_data . pgdir = get_arch_pgd ( idmap_pgd ) ;
secondary_data . swapper_pg_dir = get_arch_pgd ( swapper_pg_dir ) ;
2012-02-28 17:02:59 +04:00
# endif
2013-12-09 19:10:18 +04:00
sync_cache_w ( & secondary_data ) ;
2005-06-18 12:33:31 +04:00
2005-04-17 02:20:36 +04:00
/*
* Now bring the CPU into our world .
*/
2014-08-20 23:49:54 +04:00
ret = smp_ops . smp_boot_secondary ( cpu , idle ) ;
2005-06-18 12:33:31 +04:00
if ( ret = = 0 ) {
/*
* CPU was successfully started , wait for it
* to come online or time out .
*/
2012-01-18 19:59:45 +04:00
wait_for_completion_timeout ( & cpu_running ,
msecs_to_jiffies ( 1000 ) ) ;
2005-06-18 12:33:31 +04:00
2010-12-18 15:34:39 +03:00
if ( ! cpu_online ( cpu ) ) {
pr_crit ( " CPU%u: failed to come online \n " , cpu ) ;
2005-06-18 12:33:31 +04:00
ret = - EIO ;
2010-12-18 15:34:39 +03:00
}
} else {
pr_err ( " CPU%u: failed to boot: %d \n " , cpu , ret ) ;
2005-06-18 12:33:31 +04:00
}
2013-02-22 22:51:30 +04:00
memset ( & secondary_data , 0 , sizeof ( secondary_data ) ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2011-09-08 12:06:10 +04:00
/* platform specific SMP operations */
2011-09-27 17:48:23 +04:00
void __init smp_init_cpus ( void )
2011-09-08 12:06:10 +04:00
{
if ( smp_ops . smp_init_cpus )
smp_ops . smp_init_cpus ( ) ;
}
2013-08-02 23:52:49 +04:00
int platform_can_cpu_hotplug ( void )
{
# ifdef CONFIG_HOTPLUG_CPU
if ( smp_ops . cpu_kill )
return 1 ;
# endif
return 0 ;
}
2005-11-03 01:24:33 +03:00
# ifdef CONFIG_HOTPLUG_CPU
2011-09-27 17:48:23 +04:00
static int platform_cpu_kill ( unsigned int cpu )
2011-09-08 12:06:10 +04:00
{
if ( smp_ops . cpu_kill )
return smp_ops . cpu_kill ( cpu ) ;
return 1 ;
}
2011-09-27 17:48:23 +04:00
static int platform_cpu_disable ( unsigned int cpu )
2011-09-08 12:06:10 +04:00
{
if ( smp_ops . cpu_disable )
return smp_ops . cpu_disable ( cpu ) ;
/*
* By default , allow disabling all CPUs except the first one ,
* since this is special on a lot of platforms , e . g . because
* of clock tick interrupts .
*/
return cpu = = 0 ? - EPERM : 0 ;
}
2005-11-03 01:24:33 +03:00
/*
* __cpu_disable runs on the processor to be shutdown .
*/
2013-06-17 23:43:14 +04:00
int __cpu_disable ( void )
2005-11-03 01:24:33 +03:00
{
unsigned int cpu = smp_processor_id ( ) ;
int ret ;
2010-05-15 13:18:05 +04:00
ret = platform_cpu_disable ( cpu ) ;
2005-11-03 01:24:33 +03:00
if ( ret )
return ret ;
/*
* Take this CPU offline . Once we clear this , we can ' t return ,
* and we must not schedule until we ' re ready to give up the cpu .
*/
2009-05-28 17:16:52 +04:00
set_cpu_online ( cpu , false ) ;
2005-11-03 01:24:33 +03:00
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs ( ) ;
/*
* Flush user cache and TLB mappings , and then remove this CPU
* from the vm mask set of all processes .
2012-09-07 09:39:15 +04:00
*
* Caches are flushed to the Level of Unification Inner Shareable
* to write - back dirty lines to unified caches shared by all CPUs .
2005-11-03 01:24:33 +03:00
*/
2012-09-07 09:39:15 +04:00
flush_cache_louis ( ) ;
2005-11-03 01:24:33 +03:00
local_flush_tlb_all ( ) ;
2012-06-01 03:26:22 +04:00
clear_tasks_mm_cpumask ( cpu ) ;
2005-11-03 01:24:33 +03:00
return 0 ;
}
2010-11-30 14:07:35 +03:00
static DECLARE_COMPLETION ( cpu_died ) ;
2005-11-03 01:24:33 +03:00
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed , or it is timed out .
*/
2013-06-17 23:43:14 +04:00
void __cpu_die ( unsigned int cpu )
2005-11-03 01:24:33 +03:00
{
2010-11-30 14:07:35 +03:00
if ( ! wait_for_completion_timeout ( & cpu_died , msecs_to_jiffies ( 5000 ) ) ) {
pr_err ( " CPU%u: cpu didn't die \n " , cpu ) ;
return ;
}
2014-10-28 14:26:42 +03:00
pr_notice ( " CPU%u: shutdown \n " , cpu ) ;
2010-11-30 14:07:35 +03:00
2013-04-18 21:05:29 +04:00
/*
* platform_cpu_kill ( ) is generally expected to do the powering off
* and / or cutting of clocks to the dying CPU . Optionally , this may
* be done by the CPU which is dying in preference to supporting
* this call , but that means there is _no_ synchronisation between
* the requesting CPU and the dying CPU actually losing power .
*/
2005-11-03 01:24:33 +03:00
if ( ! platform_cpu_kill ( cpu ) )
2014-10-28 14:26:42 +03:00
pr_err ( " CPU%u: unable to kill \n " , cpu ) ;
2005-11-03 01:24:33 +03:00
}
/*
* Called from the idle thread for the CPU which has been shutdown .
*
* Note that we disable IRQs here , but do not re - enable them
* before returning to the caller . This is also the behaviour
* of the other hotplug - cpu capable cores , so presumably coming
* out of idle fixes this .
*/
2009-09-28 00:04:48 +04:00
void __ref cpu_die ( void )
2005-11-03 01:24:33 +03:00
{
unsigned int cpu = smp_processor_id ( ) ;
idle_task_exit ( ) ;
2010-11-30 15:21:30 +03:00
local_irq_disable ( ) ;
2013-04-18 21:05:29 +04:00
/*
* Flush the data out of the L1 cache for this CPU . This must be
* before the completion to ensure that data is safely written out
* before platform_cpu_kill ( ) gets called - which may disable
* * this * CPU and power down its cache .
*/
flush_cache_louis ( ) ;
/*
* Tell __cpu_die ( ) that this CPU is now safe to dispose of . Once
* this returns , power and / or clocks can be removed at any point
* from this CPU and its cache by platform_cpu_kill ( ) .
*/
2013-05-21 04:57:16 +04:00
complete ( & cpu_died ) ;
2010-11-30 14:07:35 +03:00
2005-11-03 01:24:33 +03:00
/*
2013-04-18 21:05:29 +04:00
* Ensure that the cache lines associated with that completion are
* written out . This covers the case where _this_ CPU is doing the
* powering down , to ensure that the completion is visible to the
* CPU waiting for this one .
*/
flush_cache_louis ( ) ;
/*
* The actual CPU shutdown procedure is at least platform ( if not
* CPU ) specific . This may remove power , or it may simply spin .
*
* Platforms are generally expected * NOT * to return from this call ,
* although there are some which do because they have no way to
* power down the CPU . These platforms are the _only_ reason we
* have a return path which uses the fragment of assembly below .
*
* The return path should not be used for platforms which can
* power off the CPU .
2005-11-03 01:24:33 +03:00
*/
2013-01-14 19:54:28 +04:00
if ( smp_ops . cpu_die )
smp_ops . cpu_die ( cpu ) ;
2005-11-03 01:24:33 +03:00
2014-01-11 15:25:37 +04:00
pr_warn ( " CPU%u: smp_ops.cpu_die() returned, trying to resuscitate \n " ,
cpu ) ;
2005-11-03 01:24:33 +03:00
/*
* Do not return to the idle loop - jump back to the secondary
* cpu initialisation . There ' s some initialisation which needs
* to be repeated to undo the effects of taking the CPU offline .
*/
__asm__ ( " mov sp, %0 \n "
2010-12-20 19:58:19 +03:00
" mov fp, #0 \n "
2005-11-03 01:24:33 +03:00
" b secondary_start_kernel "
:
2006-01-12 12:05:58 +03:00
: " r " ( task_stack_page ( current ) + THREAD_SIZE - 8 ) ) ;
2005-11-03 01:24:33 +03:00
}
# endif /* CONFIG_HOTPLUG_CPU */
2010-12-03 14:09:48 +03:00
/*
* Called by both boot and secondaries to move global data into
* per - processor storage .
*/
2013-06-17 23:43:14 +04:00
static void smp_store_cpu_info ( unsigned int cpuid )
2010-12-03 14:09:48 +03:00
{
struct cpuinfo_arm * cpu_info = & per_cpu ( cpu_data , cpuid ) ;
cpu_info - > loops_per_jiffy = loops_per_jiffy ;
2012-11-06 15:57:43 +04:00
cpu_info - > cpuid = read_cpuid_id ( ) ;
2011-08-08 16:21:59 +04:00
store_cpu_topology ( cpuid ) ;
2010-12-03 14:09:48 +03:00
}
2005-06-18 12:33:31 +04:00
/*
* This is the secondary CPU boot entry . We ' re using this CPUs
* idle thread stack , but a set of temporary page tables .
*/
2013-06-17 23:43:14 +04:00
asmlinkage void secondary_start_kernel ( void )
2005-06-18 12:33:31 +04:00
{
struct mm_struct * mm = & init_mm ;
2012-10-19 20:53:01 +04:00
unsigned int cpu ;
/*
* The identity mapping is uncached ( strongly ordered ) , so
* switch away from it before attempting any exclusive accesses .
*/
cpu_switch_mm ( mm - > pgd , mm ) ;
2013-02-28 20:48:40 +04:00
local_flush_bp_all ( ) ;
2012-10-19 20:53:01 +04:00
enter_lazy_tlb ( mm , current ) ;
local_flush_tlb_all ( ) ;
2005-06-18 12:33:31 +04:00
/*
* All kernel threads share the same mm context ; grab a
* reference and switch to it .
*/
2012-10-19 20:53:01 +04:00
cpu = smp_processor_id ( ) ;
2005-06-18 12:33:31 +04:00
atomic_inc ( & mm - > mm_count ) ;
current - > active_mm = mm ;
2009-09-24 19:34:49 +04:00
cpumask_set_cpu ( cpu , mm_cpumask ( mm ) ) ;
2005-06-18 12:33:31 +04:00
2012-11-29 23:39:54 +04:00
cpu_init ( ) ;
2014-10-28 15:08:34 +03:00
pr_debug ( " CPU%u: Booted secondary processor \n " , cpu ) ;
2012-05-05 23:58:13 +04:00
2005-11-09 08:39:01 +03:00
preempt_disable ( ) ;
2010-12-03 18:00:49 +03:00
trace_hardirqs_off ( ) ;
2005-06-18 12:33:31 +04:00
/*
* Give the platform a chance to do its own initialisation .
*/
2013-01-14 19:54:28 +04:00
if ( smp_ops . smp_secondary_init )
smp_ops . smp_secondary_init ( cpu ) ;
2005-06-18 12:33:31 +04:00
2008-09-07 18:57:22 +04:00
notify_cpu_starting ( cpu ) ;
2008-02-04 19:30:57 +03:00
2005-06-18 12:33:31 +04:00
calibrate_delay ( ) ;
smp_store_cpu_info ( cpu ) ;
/*
2011-06-20 19:46:01 +04:00
* OK , now it ' s safe to let the boot CPU continue . Wait for
* the CPU migration code to notice that the CPU is online
2012-01-18 19:59:45 +04:00
* before we continue - which happens after __cpu_up returns .
2005-06-18 12:33:31 +04:00
*/
2009-05-28 17:16:52 +04:00
set_cpu_online ( cpu , true ) ;
2012-01-18 19:59:45 +04:00
complete ( & cpu_running ) ;
2011-10-14 15:44:41 +04:00
local_irq_enable ( ) ;
local_fiq_enable ( ) ;
2005-06-18 12:33:31 +04:00
/*
* OK , it ' s off to the idle thread for us
*/
2013-03-22 01:49:38 +04:00
cpu_startup_entry ( CPUHP_ONLINE ) ;
2005-06-18 12:33:31 +04:00
}
2005-04-17 02:20:36 +04:00
void __init smp_cpus_done ( unsigned int max_cpus )
{
2015-01-04 22:01:23 +03:00
int cpu ;
unsigned long bogosum = 0 ;
for_each_online_cpu ( cpu )
bogosum + = per_cpu ( cpu_data , cpu ) . loops_per_jiffy ;
printk ( KERN_INFO " SMP: Total of %d processors activated "
" (%lu.%02lu BogoMIPS). \n " ,
num_online_cpus ( ) ,
bogosum / ( 500000 / HZ ) ,
( bogosum / ( 5000 / HZ ) ) % 100 ) ;
2012-02-17 20:54:28 +04:00
hyp_mode_check ( ) ;
2005-04-17 02:20:36 +04:00
}
void __init smp_prepare_boot_cpu ( void )
{
2012-11-29 23:39:54 +04:00
set_my_cpu_offset ( per_cpu_offset ( smp_processor_id ( ) ) ) ;
2005-04-17 02:20:36 +04:00
}
2010-12-03 14:09:48 +03:00
void __init smp_prepare_cpus ( unsigned int max_cpus )
2005-04-17 02:20:36 +04:00
{
2010-12-03 14:09:48 +03:00
unsigned int ncores = num_possible_cpus ( ) ;
2005-04-17 02:20:36 +04:00
2011-08-08 16:21:59 +04:00
init_cpu_topology ( ) ;
2010-12-03 14:09:48 +03:00
smp_store_cpu_info ( smp_processor_id ( ) ) ;
2005-04-17 02:20:36 +04:00
/*
2010-12-03 14:09:48 +03:00
* are we trying to boot more cores than exist ?
2005-04-17 02:20:36 +04:00
*/
2010-12-03 14:09:48 +03:00
if ( max_cpus > ncores )
max_cpus = ncores ;
2011-07-07 04:56:51 +04:00
if ( ncores > 1 & & max_cpus ) {
/*
* Initialise the present map , which describes the set of CPUs
* actually populated at the present time . A platform should
2013-01-14 19:54:28 +04:00
* re - initialize the map in the platforms smp_prepare_cpus ( )
* if present ! = possible ( e . g . physical hotplug ) .
2011-07-07 04:56:51 +04:00
*/
2012-03-29 09:08:30 +04:00
init_cpu_present ( cpu_possible_mask ) ;
2011-07-07 04:56:51 +04:00
2010-12-03 14:09:48 +03:00
/*
* Initialise the SCU if there are more than one CPU
* and let them know where to start .
*/
2013-01-14 19:54:28 +04:00
if ( smp_ops . smp_prepare_cpus )
smp_ops . smp_prepare_cpus ( max_cpus ) ;
2010-12-03 14:09:48 +03:00
}
2005-04-17 02:20:36 +04:00
}
2014-07-26 00:05:31 +04:00
static void ( * __smp_cross_call ) ( const struct cpumask * , unsigned int ) ;
2011-04-03 16:01:30 +04:00
void __init set_smp_cross_call ( void ( * fn ) ( const struct cpumask * , unsigned int ) )
{
2014-07-26 00:05:31 +04:00
if ( ! __smp_cross_call )
__smp_cross_call = fn ;
2008-02-04 19:28:56 +03:00
}
2014-07-26 00:05:31 +04:00
static const char * ipi_types [ NR_IPI ] __tracepoint_string = {
2012-09-19 11:16:07 +04:00
# define S(x,s) [x] = s
S ( IPI_WAKEUP , " CPU wakeup interrupts " ) ,
2010-11-15 17:40:29 +03:00
S ( IPI_TIMER , " Timer broadcast interrupts " ) ,
S ( IPI_RESCHEDULE , " Rescheduling interrupts " ) ,
S ( IPI_CALL_FUNC , " Function call interrupts " ) ,
S ( IPI_CALL_FUNC_SINGLE , " Single function call interrupts " ) ,
S ( IPI_CPU_STOP , " CPU stop interrupts " ) ,
2013-10-29 23:32:56 +04:00
S ( IPI_IRQ_WORK , " IRQ work interrupts " ) ,
2012-11-28 06:54:41 +04:00
S ( IPI_COMPLETION , " completion interrupts " ) ,
2010-11-15 17:40:29 +03:00
} ;
2014-07-26 00:05:31 +04:00
static void smp_cross_call ( const struct cpumask * target , unsigned int ipinr )
{
trace_ipi_raise ( target , ipi_types [ ipinr ] ) ;
__smp_cross_call ( target , ipinr ) ;
}
2010-11-15 17:33:51 +03:00
void show_ipi_list ( struct seq_file * p , int prec )
2005-04-17 02:20:36 +04:00
{
2010-11-15 17:40:29 +03:00
unsigned int cpu , i ;
2005-04-17 02:20:36 +04:00
2010-11-15 17:40:29 +03:00
for ( i = 0 ; i < NR_IPI ; i + + ) {
seq_printf ( p , " %*s%u: " , prec - 1 , " IPI " , i ) ;
2005-04-17 02:20:36 +04:00
2012-12-04 00:13:03 +04:00
for_each_online_cpu ( cpu )
2010-11-15 17:40:29 +03:00
seq_printf ( p , " %10u " ,
__get_irq_stat ( cpu , ipi_irqs [ i ] ) ) ;
2005-04-17 02:20:36 +04:00
2010-11-15 17:40:29 +03:00
seq_printf ( p , " %s \n " , ipi_types [ i ] ) ;
}
2005-04-17 02:20:36 +04:00
}
2010-11-15 17:46:46 +03:00
u64 smp_irq_stat_cpu ( unsigned int cpu )
2005-11-08 22:08:05 +03:00
{
2010-11-15 17:46:46 +03:00
u64 sum = 0 ;
int i ;
2005-11-08 22:08:05 +03:00
2010-11-15 17:46:46 +03:00
for ( i = 0 ; i < NR_IPI ; i + + )
sum + = __get_irq_stat ( cpu , ipi_irqs [ i ] ) ;
2005-11-08 22:08:05 +03:00
2010-11-15 17:46:46 +03:00
return sum ;
2005-11-08 22:08:05 +03:00
}
2014-07-26 00:05:31 +04:00
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
{
smp_cross_call ( mask , IPI_CALL_FUNC ) ;
}
void arch_send_wakeup_ipi_mask ( const struct cpumask * mask )
{
smp_cross_call ( mask , IPI_WAKEUP ) ;
}
void arch_send_call_function_single_ipi ( int cpu )
{
smp_cross_call ( cpumask_of ( cpu ) , IPI_CALL_FUNC_SINGLE ) ;
}
# ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise ( void )
{
2014-08-16 20:47:53 +04:00
if ( arch_irq_work_has_interrupt ( ) )
2014-07-26 00:05:31 +04:00
smp_cross_call ( cpumask_of ( smp_processor_id ( ) ) , IPI_IRQ_WORK ) ;
}
# endif
2009-05-17 21:58:34 +04:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
2012-10-30 16:13:42 +04:00
void tick_broadcast ( const struct cpumask * mask )
2009-05-17 21:58:34 +04:00
{
2010-12-20 17:47:19 +03:00
smp_cross_call ( mask , IPI_TIMER ) ;
2009-05-17 21:58:34 +04:00
}
2010-07-26 16:19:43 +04:00
# endif
2009-05-17 21:58:34 +04:00
2009-07-03 17:44:46 +04:00
static DEFINE_RAW_SPINLOCK ( stop_lock ) ;
2005-04-17 02:20:36 +04:00
/*
* ipi_cpu_stop - handle IPI from smp_send_stop ( )
*/
static void ipi_cpu_stop ( unsigned int cpu )
{
2010-07-26 16:31:27 +04:00
if ( system_state = = SYSTEM_BOOTING | |
system_state = = SYSTEM_RUNNING ) {
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & stop_lock ) ;
2014-10-28 14:26:42 +03:00
pr_crit ( " CPU%u: stopping \n " , cpu ) ;
2010-07-26 16:31:27 +04:00
dump_stack ( ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & stop_lock ) ;
2010-07-26 16:31:27 +04:00
}
2005-04-17 02:20:36 +04:00
2009-05-28 17:16:52 +04:00
set_cpu_online ( cpu , false ) ;
2005-04-17 02:20:36 +04:00
local_fiq_disable ( ) ;
local_irq_disable ( ) ;
while ( 1 )
cpu_relax ( ) ;
}
2012-11-28 06:54:41 +04:00
static DEFINE_PER_CPU ( struct completion * , cpu_completion ) ;
int register_ipi_completion ( struct completion * completion , int cpu )
{
per_cpu ( cpu_completion , cpu ) = completion ;
return IPI_COMPLETION ;
}
static void ipi_complete ( unsigned int cpu )
{
complete ( per_cpu ( cpu_completion , cpu ) ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Main handler for inter - processor interrupts
*/
2011-01-07 01:32:52 +03:00
asmlinkage void __exception_irq_entry do_IPI ( int ipinr , struct pt_regs * regs )
2011-10-06 18:18:14 +04:00
{
handle_IPI ( ipinr , regs ) ;
}
void handle_IPI ( int ipinr , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
unsigned int cpu = smp_processor_id ( ) ;
2006-10-25 16:59:16 +04:00
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2005-04-17 02:20:36 +04:00
2014-07-26 00:05:31 +04:00
if ( ( unsigned ) ipinr < NR_IPI ) {
trace_ipi_entry ( ipi_types [ ipinr ] ) ;
2012-09-19 11:16:07 +04:00
__inc_irq_stat ( cpu , ipi_irqs [ ipinr ] ) ;
2014-07-26 00:05:31 +04:00
}
2005-04-17 02:20:36 +04:00
2010-11-15 12:54:18 +03:00
switch ( ipinr ) {
2012-09-19 11:16:07 +04:00
case IPI_WAKEUP :
break ;
2012-10-30 15:17:01 +04:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
2010-11-15 12:54:18 +03:00
case IPI_TIMER :
2012-01-19 19:20:58 +04:00
irq_enter ( ) ;
2012-10-30 15:17:01 +04:00
tick_receive_broadcast ( ) ;
2012-01-19 19:20:58 +04:00
irq_exit ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2012-10-30 15:17:01 +04:00
# endif
2005-04-17 02:20:36 +04:00
2010-11-15 12:54:18 +03:00
case IPI_RESCHEDULE :
2011-04-05 19:23:39 +04:00
scheduler_ipi ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2005-04-17 02:20:36 +04:00
2010-11-15 12:54:18 +03:00
case IPI_CALL_FUNC :
2012-01-19 19:20:58 +04:00
irq_enter ( ) ;
2010-11-15 12:54:18 +03:00
generic_smp_call_function_interrupt ( ) ;
2012-01-19 19:20:58 +04:00
irq_exit ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2008-06-10 22:48:30 +04:00
2010-11-15 12:54:18 +03:00
case IPI_CALL_FUNC_SINGLE :
2012-01-19 19:20:58 +04:00
irq_enter ( ) ;
2010-11-15 12:54:18 +03:00
generic_smp_call_function_single_interrupt ( ) ;
2012-01-19 19:20:58 +04:00
irq_exit ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2005-04-17 02:20:36 +04:00
2010-11-15 12:54:18 +03:00
case IPI_CPU_STOP :
2012-01-19 19:20:58 +04:00
irq_enter ( ) ;
2010-11-15 12:54:18 +03:00
ipi_cpu_stop ( cpu ) ;
2012-01-19 19:20:58 +04:00
irq_exit ( ) ;
2010-11-15 12:54:18 +03:00
break ;
2005-04-17 02:20:36 +04:00
2013-10-29 23:32:56 +04:00
# ifdef CONFIG_IRQ_WORK
case IPI_IRQ_WORK :
irq_enter ( ) ;
irq_work_run ( ) ;
irq_exit ( ) ;
break ;
# endif
2012-11-28 06:54:41 +04:00
case IPI_COMPLETION :
irq_enter ( ) ;
ipi_complete ( cpu ) ;
irq_exit ( ) ;
break ;
2010-11-15 12:54:18 +03:00
default :
2014-10-28 14:26:42 +03:00
pr_crit ( " CPU%u: Unknown IPI message 0x%x \n " ,
cpu , ipinr ) ;
2010-11-15 12:54:18 +03:00
break ;
2005-04-17 02:20:36 +04:00
}
2014-07-26 00:05:31 +04:00
if ( ( unsigned ) ipinr < NR_IPI )
trace_ipi_exit ( ipi_types [ ipinr ] ) ;
2006-10-25 16:59:16 +04:00
set_irq_regs ( old_regs ) ;
2005-04-17 02:20:36 +04:00
}
void smp_send_reschedule ( int cpu )
{
2010-12-20 17:47:19 +03:00
smp_cross_call ( cpumask_of ( cpu ) , IPI_RESCHEDULE ) ;
2005-04-17 02:20:36 +04:00
}
void smp_send_stop ( void )
{
2010-12-02 12:53:54 +03:00
unsigned long timeout ;
2012-04-27 15:51:43 +04:00
struct cpumask mask ;
2005-04-17 02:20:36 +04:00
2012-04-27 15:51:43 +04:00
cpumask_copy ( & mask , cpu_online_mask ) ;
cpumask_clear_cpu ( smp_processor_id ( ) , & mask ) ;
2012-07-28 18:19:55 +04:00
if ( ! cpumask_empty ( & mask ) )
smp_cross_call ( & mask , IPI_CPU_STOP ) ;
2005-06-28 16:49:16 +04:00
2010-12-02 12:53:54 +03:00
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC ;
while ( num_online_cpus ( ) > 1 & & timeout - - )
udelay ( 1 ) ;
2005-06-28 16:49:16 +04:00
2010-12-02 12:53:54 +03:00
if ( num_online_cpus ( ) > 1 )
2014-09-16 23:41:43 +04:00
pr_warn ( " SMP: failed to stop secondary CPUs \n " ) ;
2005-06-28 16:49:16 +04:00
}
/*
2005-04-17 02:20:36 +04:00
* not supported here
2005-06-28 16:49:16 +04:00
*/
2007-07-23 15:59:46 +04:00
int setup_profiling_timer ( unsigned int multiplier )
2005-06-28 16:49:16 +04:00
{
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2005-06-28 16:49:16 +04:00
}
2012-09-05 03:08:59 +04:00
# ifdef CONFIG_CPU_FREQ
static DEFINE_PER_CPU ( unsigned long , l_p_j_ref ) ;
static DEFINE_PER_CPU ( unsigned long , l_p_j_ref_freq ) ;
static unsigned long global_l_p_j_ref ;
static unsigned long global_l_p_j_ref_freq ;
static int cpufreq_callback ( struct notifier_block * nb ,
unsigned long val , void * data )
{
struct cpufreq_freqs * freq = data ;
int cpu = freq - > cpu ;
if ( freq - > flags & CPUFREQ_CONST_LOOPS )
return NOTIFY_OK ;
if ( ! per_cpu ( l_p_j_ref , cpu ) ) {
per_cpu ( l_p_j_ref , cpu ) =
per_cpu ( cpu_data , cpu ) . loops_per_jiffy ;
per_cpu ( l_p_j_ref_freq , cpu ) = freq - > old ;
if ( ! global_l_p_j_ref ) {
global_l_p_j_ref = loops_per_jiffy ;
global_l_p_j_ref_freq = freq - > old ;
}
}
if ( ( val = = CPUFREQ_PRECHANGE & & freq - > old < freq - > new ) | |
2014-03-19 09:54:58 +04:00
( val = = CPUFREQ_POSTCHANGE & & freq - > old > freq - > new ) ) {
2012-09-05 03:08:59 +04:00
loops_per_jiffy = cpufreq_scale ( global_l_p_j_ref ,
global_l_p_j_ref_freq ,
freq - > new ) ;
per_cpu ( cpu_data , cpu ) . loops_per_jiffy =
cpufreq_scale ( per_cpu ( l_p_j_ref , cpu ) ,
per_cpu ( l_p_j_ref_freq , cpu ) ,
freq - > new ) ;
}
return NOTIFY_OK ;
}
static struct notifier_block cpufreq_notifier = {
. notifier_call = cpufreq_callback ,
} ;
static int __init register_cpufreq_notifier ( void )
{
return cpufreq_register_notifier ( & cpufreq_notifier ,
CPUFREQ_TRANSITION_NOTIFIER ) ;
}
core_initcall ( register_cpufreq_notifier ) ;
# endif