2012-03-05 15:49:30 +04:00
/*
* SMP initialisation and IPI support
* Based on arch / arm / kernel / smp . c
*
* Copyright ( C ) 2012 ARM Ltd .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/spinlock.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/cache.h>
# include <linux/profile.h>
# include <linux/errno.h>
# include <linux/mm.h>
# include <linux/err.h>
# include <linux/cpu.h>
# include <linux/smp.h>
# include <linux/seq_file.h>
# include <linux/irq.h>
# include <linux/percpu.h>
# include <linux/clockchips.h>
# include <linux/completion.h>
# include <linux/of.h>
# include <asm/atomic.h>
# include <asm/cacheflush.h>
# include <asm/cputype.h>
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/pgalloc.h>
# include <asm/processor.h>
2012-08-29 12:47:19 +04:00
# include <asm/smp_plat.h>
2012-03-05 15:49:30 +04:00
# include <asm/sections.h>
# include <asm/tlbflush.h>
# include <asm/ptrace.h>
/*
* as from 2.5 , kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
* where to place its SVC stack
*/
struct secondary_data secondary_data ;
2013-02-01 00:09:04 +04:00
volatile unsigned long secondary_holding_pen_release = INVALID_HWID ;
2012-03-05 15:49:30 +04:00
enum ipi_msg_type {
IPI_RESCHEDULE ,
IPI_CALL_FUNC ,
IPI_CALL_FUNC_SINGLE ,
IPI_CPU_STOP ,
} ;
static DEFINE_RAW_SPINLOCK ( boot_lock ) ;
/*
* Write secondary_holding_pen_release in a way that is guaranteed to be
* visible to all observers , irrespective of whether they ' re taking part
* in coherency or not . This is necessary for the hotplug code to work
* reliably .
*/
2013-02-01 00:09:04 +04:00
static void __cpuinit write_pen_release ( u64 val )
2012-03-05 15:49:30 +04:00
{
void * start = ( void * ) & secondary_holding_pen_release ;
unsigned long size = sizeof ( secondary_holding_pen_release ) ;
secondary_holding_pen_release = val ;
__flush_dcache_area ( start , size ) ;
}
/*
* Boot a secondary CPU , and assign it the specified idle task .
* This also gives us the initial stack to use for this CPU .
*/
static int __cpuinit boot_secondary ( unsigned int cpu , struct task_struct * idle )
{
unsigned long timeout ;
/*
* Set synchronisation state between this boot processor
* and the secondary one
*/
raw_spin_lock ( & boot_lock ) ;
/*
* Update the pen release flag .
*/
2012-08-29 12:47:19 +04:00
write_pen_release ( cpu_logical_map ( cpu ) ) ;
2012-03-05 15:49:30 +04:00
/*
* Send an event , causing the secondaries to read pen_release .
*/
sev ( ) ;
timeout = jiffies + ( 1 * HZ ) ;
while ( time_before ( jiffies , timeout ) ) {
2013-02-01 00:09:04 +04:00
if ( secondary_holding_pen_release = = INVALID_HWID )
2012-03-05 15:49:30 +04:00
break ;
udelay ( 10 ) ;
}
/*
* Now the secondary core is starting up let it run its
* calibrations , then wait for it to finish
*/
raw_spin_unlock ( & boot_lock ) ;
2013-02-01 00:09:04 +04:00
return secondary_holding_pen_release ! = INVALID_HWID ? - ENOSYS : 0 ;
2012-03-05 15:49:30 +04:00
}
static DECLARE_COMPLETION ( cpu_running ) ;
int __cpuinit __cpu_up ( unsigned int cpu , struct task_struct * idle )
{
int ret ;
/*
* We need to tell the secondary core where to find its stack and the
* page tables .
*/
secondary_data . stack = task_stack_page ( idle ) + THREAD_START_SP ;
__flush_dcache_area ( & secondary_data , sizeof ( secondary_data ) ) ;
/*
* Now bring the CPU into our world .
*/
ret = boot_secondary ( cpu , idle ) ;
if ( ret = = 0 ) {
/*
* CPU was successfully started , wait for it to come online or
* time out .
*/
wait_for_completion_timeout ( & cpu_running ,
msecs_to_jiffies ( 1000 ) ) ;
if ( ! cpu_online ( cpu ) ) {
pr_crit ( " CPU%u: failed to come online \n " , cpu ) ;
ret = - EIO ;
}
} else {
pr_err ( " CPU%u: failed to boot: %d \n " , cpu , ret ) ;
}
secondary_data . stack = NULL ;
return ret ;
}
/*
* This is the secondary CPU boot entry . We ' re using this CPUs
* idle thread stack , but a set of temporary page tables .
*/
asmlinkage void __cpuinit secondary_start_kernel ( void )
{
struct mm_struct * mm = & init_mm ;
unsigned int cpu = smp_processor_id ( ) ;
printk ( " CPU%u: Booted secondary processor \n " , cpu ) ;
/*
* All kernel threads share the same mm context ; grab a
* reference and switch to it .
*/
atomic_inc ( & mm - > mm_count ) ;
current - > active_mm = mm ;
cpumask_set_cpu ( cpu , mm_cpumask ( mm ) ) ;
/*
* TTBR0 is only used for the identity mapping at this stage . Make it
* point to zero page to avoid speculatively fetching new entries .
*/
cpu_set_reserved_ttbr0 ( ) ;
flush_tlb_all ( ) ;
preempt_disable ( ) ;
trace_hardirqs_off ( ) ;
/*
* Let the primary processor know we ' re out of the
* pen , then head off into the C entry point
*/
2013-02-01 00:09:04 +04:00
write_pen_release ( INVALID_HWID ) ;
2012-03-05 15:49:30 +04:00
/*
* Synchronise with the boot thread .
*/
raw_spin_lock ( & boot_lock ) ;
raw_spin_unlock ( & boot_lock ) ;
/*
* Enable local interrupts .
*/
notify_cpu_starting ( cpu ) ;
local_irq_enable ( ) ;
local_fiq_enable ( ) ;
/*
* OK , now it ' s safe to let the boot CPU continue . Wait for
* the CPU migration code to notice that the CPU is online
* before we continue .
*/
set_cpu_online ( cpu , true ) ;
2012-11-07 21:00:05 +04:00
complete ( & cpu_running ) ;
2012-03-05 15:49:30 +04:00
/*
* OK , it ' s off to the idle thread for us
*/
cpu_idle ( ) ;
}
void __init smp_cpus_done ( unsigned int max_cpus )
{
unsigned long bogosum = loops_per_jiffy * num_online_cpus ( ) ;
pr_info ( " SMP: Total of %d processors activated (%lu.%02lu BogoMIPS). \n " ,
num_online_cpus ( ) , bogosum / ( 500000 / HZ ) ,
( bogosum / ( 5000 / HZ ) ) % 100 ) ;
}
void __init smp_prepare_boot_cpu ( void )
{
}
static void ( * smp_cross_call ) ( const struct cpumask * , unsigned int ) ;
2013-01-02 19:24:22 +04:00
static const struct smp_enable_ops * enable_ops [ ] __initconst = {
& smp_spin_table_ops ,
2013-01-02 19:34:50 +04:00
& smp_psci_ops ,
2013-01-02 19:24:22 +04:00
NULL ,
} ;
static const struct smp_enable_ops * smp_enable_ops [ NR_CPUS ] ;
static const struct smp_enable_ops * __init smp_get_enable_ops ( const char * name )
{
const struct smp_enable_ops * ops = enable_ops [ 0 ] ;
while ( ops ) {
if ( ! strcmp ( name , ops - > name ) )
return ops ;
ops + + ;
}
return NULL ;
}
2012-03-05 15:49:30 +04:00
/*
2012-08-29 12:47:19 +04:00
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
* cpus . Assumes that cpu_logical_map ( 0 ) has already been initialized .
2012-03-05 15:49:30 +04:00
*/
void __init smp_init_cpus ( void )
{
const char * enable_method ;
struct device_node * dn = NULL ;
2012-08-29 12:47:19 +04:00
int i , cpu = 1 ;
bool bootcpu_valid = false ;
2012-03-05 15:49:30 +04:00
while ( ( dn = of_find_node_by_type ( dn , " cpu " ) ) ) {
2013-04-22 21:28:55 +04:00
const u32 * cell ;
2012-08-29 12:47:19 +04:00
u64 hwid ;
/*
* A cpu node with missing " reg " property is
* considered invalid to build a cpu_logical_map
* entry .
*/
2013-04-22 21:28:55 +04:00
cell = of_get_property ( dn , " reg " , NULL ) ;
if ( ! cell ) {
2012-08-29 12:47:19 +04:00
pr_err ( " %s: missing reg property \n " , dn - > full_name ) ;
goto next ;
}
2013-04-22 21:28:55 +04:00
hwid = of_read_number ( cell , of_n_addr_cells ( dn ) ) ;
2012-08-29 12:47:19 +04:00
/*
* Non affinity bits must be set to 0 in the DT
*/
if ( hwid & ~ MPIDR_HWID_BITMASK ) {
pr_err ( " %s: invalid reg property \n " , dn - > full_name ) ;
goto next ;
}
/*
* Duplicate MPIDRs are a recipe for disaster . Scan
* all initialized entries and check for
* duplicates . If any is found just ignore the cpu .
* cpu_logical_map was initialized to INVALID_HWID to
* avoid matching valid MPIDR values .
*/
for ( i = 1 ; ( i < cpu ) & & ( i < NR_CPUS ) ; i + + ) {
if ( cpu_logical_map ( i ) = = hwid ) {
pr_err ( " %s: duplicate cpu reg properties in the DT \n " ,
dn - > full_name ) ;
goto next ;
}
}
/*
* The numbering scheme requires that the boot CPU
* must be assigned logical id 0. Record it so that
* the logical map built from DT is validated and can
* be used .
*/
if ( hwid = = cpu_logical_map ( 0 ) ) {
if ( bootcpu_valid ) {
pr_err ( " %s: duplicate boot cpu reg property in DT \n " ,
dn - > full_name ) ;
goto next ;
}
bootcpu_valid = true ;
/*
* cpu_logical_map has already been
* initialized and the boot cpu doesn ' t need
* the enable - method so continue without
* incrementing cpu .
*/
continue ;
}
2012-03-05 15:49:30 +04:00
if ( cpu > = NR_CPUS )
goto next ;
/*
* We currently support only the " spin-table " enable - method .
*/
enable_method = of_get_property ( dn , " enable-method " , NULL ) ;
2013-01-02 19:24:22 +04:00
if ( ! enable_method ) {
2012-08-29 12:47:19 +04:00
pr_err ( " %s: missing enable-method property \n " ,
dn - > full_name ) ;
2012-03-05 15:49:30 +04:00
goto next ;
}
2013-01-02 19:24:22 +04:00
smp_enable_ops [ cpu ] = smp_get_enable_ops ( enable_method ) ;
if ( ! smp_enable_ops [ cpu ] ) {
2012-08-29 12:47:19 +04:00
pr_err ( " %s: invalid enable-method property: %s \n " ,
dn - > full_name , enable_method ) ;
2012-03-05 15:49:30 +04:00
goto next ;
}
2013-01-02 19:24:22 +04:00
if ( smp_enable_ops [ cpu ] - > init_cpu ( dn , cpu ) )
goto next ;
2012-08-29 12:47:19 +04:00
pr_debug ( " cpu logical map 0x%llx \n " , hwid ) ;
cpu_logical_map ( cpu ) = hwid ;
2012-03-05 15:49:30 +04:00
next :
cpu + + ;
}
/* sanity check */
if ( cpu > NR_CPUS )
pr_warning ( " no. of cores (%d) greater than configured maximum of %d - clipping \n " ,
cpu , NR_CPUS ) ;
2012-08-29 12:47:19 +04:00
if ( ! bootcpu_valid ) {
pr_err ( " DT missing boot CPU MPIDR, not enabling secondaries \n " ) ;
return ;
}
/*
* All the cpus that made it to the cpu_logical_map have been
* validated so set them as possible cpus .
*/
for ( i = 0 ; i < NR_CPUS ; i + + )
if ( cpu_logical_map ( i ) ! = INVALID_HWID )
set_cpu_possible ( i , true ) ;
2012-03-05 15:49:30 +04:00
}
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
2013-01-02 19:24:22 +04:00
int cpu , err ;
2012-03-05 15:49:30 +04:00
unsigned int ncores = num_possible_cpus ( ) ;
/*
* are we trying to boot more cores than exist ?
*/
if ( max_cpus > ncores )
max_cpus = ncores ;
2013-01-02 19:24:22 +04:00
/* Don't bother if we're effectively UP */
if ( max_cpus < = 1 )
return ;
2012-03-05 15:49:30 +04:00
/*
* Initialise the present map ( which describes the set of CPUs
* actually populated at the present time ) and release the
* secondaries from the bootloader .
2013-01-02 19:24:22 +04:00
*
* Make sure we online at most ( max_cpus - 1 ) additional CPUs .
2012-03-05 15:49:30 +04:00
*/
2013-01-02 19:24:22 +04:00
max_cpus - - ;
2012-03-05 15:49:30 +04:00
for_each_possible_cpu ( cpu ) {
if ( max_cpus = = 0 )
break ;
2013-01-02 19:24:22 +04:00
if ( cpu = = smp_processor_id ( ) )
continue ;
if ( ! smp_enable_ops [ cpu ] )
2012-03-05 15:49:30 +04:00
continue ;
2013-01-02 19:24:22 +04:00
err = smp_enable_ops [ cpu ] - > prepare_cpu ( cpu ) ;
if ( err )
continue ;
2012-03-05 15:49:30 +04:00
set_cpu_present ( cpu , true ) ;
max_cpus - - ;
}
}
void __init set_smp_cross_call ( void ( * fn ) ( const struct cpumask * , unsigned int ) )
{
smp_cross_call = fn ;
}
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
{
smp_cross_call ( mask , IPI_CALL_FUNC ) ;
}
void arch_send_call_function_single_ipi ( int cpu )
{
smp_cross_call ( cpumask_of ( cpu ) , IPI_CALL_FUNC_SINGLE ) ;
}
static const char * ipi_types [ NR_IPI ] = {
# define S(x,s) [x - IPI_RESCHEDULE] = s
S ( IPI_RESCHEDULE , " Rescheduling interrupts " ) ,
S ( IPI_CALL_FUNC , " Function call interrupts " ) ,
S ( IPI_CALL_FUNC_SINGLE , " Single function call interrupts " ) ,
S ( IPI_CPU_STOP , " CPU stop interrupts " ) ,
} ;
void show_ipi_list ( struct seq_file * p , int prec )
{
unsigned int cpu , i ;
for ( i = 0 ; i < NR_IPI ; i + + ) {
seq_printf ( p , " %*s%u:%s " , prec - 1 , " IPI " , i + IPI_RESCHEDULE ,
prec > = 4 ? " " : " " ) ;
for_each_present_cpu ( cpu )
seq_printf ( p , " %10u " ,
__get_irq_stat ( cpu , ipi_irqs [ i ] ) ) ;
seq_printf ( p , " %s \n " , ipi_types [ i ] ) ;
}
}
u64 smp_irq_stat_cpu ( unsigned int cpu )
{
u64 sum = 0 ;
int i ;
for ( i = 0 ; i < NR_IPI ; i + + )
sum + = __get_irq_stat ( cpu , ipi_irqs [ i ] ) ;
return sum ;
}
static DEFINE_RAW_SPINLOCK ( stop_lock ) ;
/*
* ipi_cpu_stop - handle IPI from smp_send_stop ( )
*/
static void ipi_cpu_stop ( unsigned int cpu )
{
if ( system_state = = SYSTEM_BOOTING | |
system_state = = SYSTEM_RUNNING ) {
raw_spin_lock ( & stop_lock ) ;
pr_crit ( " CPU%u: stopping \n " , cpu ) ;
dump_stack ( ) ;
raw_spin_unlock ( & stop_lock ) ;
}
set_cpu_online ( cpu , false ) ;
local_fiq_disable ( ) ;
local_irq_disable ( ) ;
while ( 1 )
cpu_relax ( ) ;
}
/*
* Main handler for inter - processor interrupts
*/
void handle_IPI ( int ipinr , struct pt_regs * regs )
{
unsigned int cpu = smp_processor_id ( ) ;
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
if ( ipinr > = IPI_RESCHEDULE & & ipinr < IPI_RESCHEDULE + NR_IPI )
__inc_irq_stat ( cpu , ipi_irqs [ ipinr - IPI_RESCHEDULE ] ) ;
switch ( ipinr ) {
case IPI_RESCHEDULE :
scheduler_ipi ( ) ;
break ;
case IPI_CALL_FUNC :
irq_enter ( ) ;
generic_smp_call_function_interrupt ( ) ;
irq_exit ( ) ;
break ;
case IPI_CALL_FUNC_SINGLE :
irq_enter ( ) ;
generic_smp_call_function_single_interrupt ( ) ;
irq_exit ( ) ;
break ;
case IPI_CPU_STOP :
irq_enter ( ) ;
ipi_cpu_stop ( cpu ) ;
irq_exit ( ) ;
break ;
default :
pr_crit ( " CPU%u: Unknown IPI message 0x%x \n " , cpu , ipinr ) ;
break ;
}
set_irq_regs ( old_regs ) ;
}
void smp_send_reschedule ( int cpu )
{
smp_cross_call ( cpumask_of ( cpu ) , IPI_RESCHEDULE ) ;
}
void smp_send_stop ( void )
{
unsigned long timeout ;
if ( num_online_cpus ( ) > 1 ) {
cpumask_t mask ;
cpumask_copy ( & mask , cpu_online_mask ) ;
cpu_clear ( smp_processor_id ( ) , mask ) ;
smp_cross_call ( & mask , IPI_CPU_STOP ) ;
}
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC ;
while ( num_online_cpus ( ) > 1 & & timeout - - )
udelay ( 1 ) ;
if ( num_online_cpus ( ) > 1 )
pr_warning ( " SMP: failed to stop secondary CPUs \n " ) ;
}
/*
* not supported here
*/
int setup_profiling_timer ( unsigned int multiplier )
{
return - EINVAL ;
}