2014-01-15 14:31:53 +04:00
/*
* Copyright ( C ) 2013 Imagination Technologies
* Author : Paul Burton < paul . burton @ imgtec . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation ; either version 2 of the License , or ( at your
* option ) any later version .
*/
2015-09-22 21:12:14 +03:00
# include <linux/delay.h>
2014-01-15 14:31:53 +04:00
# include <linux/io.h>
2014-10-20 23:03:53 +04:00
# include <linux/irqchip/mips-gic.h>
2014-01-15 14:31:53 +04:00
# include <linux/sched.h>
2017-02-08 20:51:36 +03:00
# include <linux/sched/hotplug.h>
2014-01-15 14:31:53 +04:00
# include <linux/slab.h>
# include <linux/smp.h>
# include <linux/types.h>
2014-07-09 15:51:05 +04:00
# include <asm/bcache.h>
2014-01-15 14:31:53 +04:00
# include <asm/mips-cm.h>
# include <asm/mips-cpc.h>
# include <asm/mips_mt.h>
# include <asm/mipsregs.h>
2014-04-14 17:13:57 +04:00
# include <asm/pm-cps.h>
2014-07-09 15:51:05 +04:00
# include <asm/r4kcache.h>
2014-01-15 14:31:53 +04:00
# include <asm/smp-cps.h>
# include <asm/time.h>
# include <asm/uasm.h>
2016-02-03 06:15:34 +03:00
static bool threads_disabled ;
2014-01-15 14:31:53 +04:00
static DECLARE_BITMAP ( core_power , NR_CPUS ) ;
2014-04-14 15:04:27 +04:00
struct core_boot_config * mips_cps_core_bootcfg ;
2014-01-15 14:31:53 +04:00
2016-02-03 06:15:34 +03:00
static int __init setup_nothreads ( char * s )
{
threads_disabled = true ;
return 0 ;
}
early_param ( " nothreads " , setup_nothreads ) ;
2014-04-14 15:04:27 +04:00
static unsigned core_vpe_count ( unsigned core )
2014-01-15 14:31:53 +04:00
{
2014-04-14 15:04:27 +04:00
unsigned cfg ;
2014-01-15 14:31:53 +04:00
2016-02-03 06:15:34 +03:00
if ( threads_disabled )
return 1 ;
2016-08-03 23:45:50 +03:00
if ( ( ! IS_ENABLED ( CONFIG_MIPS_MT_SMP ) | | ! cpu_has_mipsmt )
& & ( ! IS_ENABLED ( CONFIG_CPU_MIPSR6 ) | | ! cpu_has_vp ) )
2014-04-14 15:04:27 +04:00
return 1 ;
2014-01-15 14:31:53 +04:00
2015-09-22 21:12:17 +03:00
mips_cm_lock_other ( core , 0 ) ;
2014-04-14 15:04:27 +04:00
cfg = read_gcr_co_config ( ) & CM_GCR_Cx_CONFIG_PVPE_MSK ;
2015-09-22 21:12:17 +03:00
mips_cm_unlock_other ( ) ;
2014-04-14 15:04:27 +04:00
return ( cfg > > CM_GCR_Cx_CONFIG_PVPE_SHF ) + 1 ;
2014-01-15 14:31:53 +04:00
}
static void __init cps_smp_setup ( void )
{
unsigned int ncores , nvpes , core_vpes ;
2016-02-03 06:15:33 +03:00
unsigned long core_entry ;
2014-01-15 14:31:53 +04:00
int c , v ;
/* Detect & record VPE topology */
ncores = mips_cm_numcores ( ) ;
2016-02-03 06:15:33 +03:00
pr_info ( " %s topology " , cpu_has_mips_r6 ? " VP " : " VPE " ) ;
2014-01-15 14:31:53 +04:00
for ( c = nvpes = 0 ; c < ncores ; c + + ) {
2014-04-14 15:04:27 +04:00
core_vpes = core_vpe_count ( c ) ;
2014-01-15 14:31:53 +04:00
pr_cont ( " %c%u " , c ? ' , ' : ' { ' , core_vpes ) ;
2014-04-14 15:04:27 +04:00
/* Use the number of VPEs in core 0 for smp_num_siblings */
if ( ! c )
smp_num_siblings = core_vpes ;
2014-01-15 14:31:53 +04:00
for ( v = 0 ; v < min_t ( int , core_vpes , NR_CPUS - nvpes ) ; v + + ) {
cpu_data [ nvpes + v ] . core = c ;
2016-02-03 06:15:33 +03:00
# if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
2014-01-15 14:31:53 +04:00
cpu_data [ nvpes + v ] . vpe_id = v ;
# endif
}
nvpes + = core_vpes ;
}
pr_cont ( " } total %u \n " , nvpes ) ;
/* Indicate present CPUs (CPU being synonymous with VPE) */
for ( v = 0 ; v < min_t ( unsigned , nvpes , NR_CPUS ) ; v + + ) {
set_cpu_possible ( v , true ) ;
set_cpu_present ( v , true ) ;
__cpu_number_map [ v ] = v ;
__cpu_logical_map [ v ] = v ;
}
2014-04-14 18:58:45 +04:00
/* Set a coherent default CCA (CWB) */
change_c0_config ( CONF_CM_CMASK , 0x5 ) ;
2014-01-15 14:31:53 +04:00
/* Core 0 is powered up (we're running on it) */
bitmap_set ( core_power , 0 , 1 ) ;
/* Initialise core 0 */
2014-04-14 15:04:27 +04:00
mips_cps_core_init ( ) ;
2014-01-15 14:31:53 +04:00
/* Make core 0 coherent with everything */
write_gcr_cl_coherence ( 0xff ) ;
2015-01-15 18:41:13 +03:00
2016-02-03 06:15:33 +03:00
if ( mips_cm_revision ( ) > = CM_REV_CM3 ) {
core_entry = CKSEG1ADDR ( ( unsigned long ) mips_cps_core_entry ) ;
write_gcr_bev_base ( core_entry ) ;
}
2015-01-15 18:41:13 +03:00
# ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if ( cpu_has_fpu )
2015-04-29 00:34:23 +03:00
cpumask_set_cpu ( 0 , & mt_fpu_cpumask ) ;
2015-01-15 18:41:13 +03:00
# endif /* CONFIG_MIPS_MT_FPAFF */
2014-01-15 14:31:53 +04:00
}
static void __init cps_prepare_cpus ( unsigned int max_cpus )
{
2014-04-14 18:21:25 +04:00
unsigned ncores , core_vpes , c , cca ;
bool cca_unsuitable ;
2014-04-14 15:21:49 +04:00
u32 * entry_code ;
2014-04-14 15:04:27 +04:00
2014-01-15 14:31:53 +04:00
mips_mt_set_cpuoptions ( ) ;
2014-04-14 15:04:27 +04:00
2014-04-14 18:21:25 +04:00
/* Detect whether the CCA is unsuited to multi-core SMP */
cca = read_c0_config ( ) & CONF_CM_CMASK ;
switch ( cca ) {
case 0x4 : /* CWBE */
case 0x5 : /* CWB */
/* The CCA is coherent, multi-core is fine */
cca_unsuitable = false ;
break ;
default :
/* CCA is not coherent, multi-core is not usable */
cca_unsuitable = true ;
}
/* Warn the user if the CCA prevents multi-core */
ncores = mips_cm_numcores ( ) ;
if ( cca_unsuitable & & ncores > 1 ) {
pr_warn ( " Using only one core due to unsuitable CCA 0x%x \n " ,
cca ) ;
for_each_present_cpu ( c ) {
if ( cpu_data [ c ] . core )
set_cpu_present ( c , false ) ;
}
}
2014-04-16 14:10:57 +04:00
/*
* Patch the start of mips_cps_core_entry to provide :
*
* s0 = kseg0 CCA
*/
2014-04-14 15:21:49 +04:00
entry_code = ( u32 * ) & mips_cps_core_entry ;
2014-04-16 14:10:57 +04:00
uasm_i_addiu ( & entry_code , 16 , 0 , cca ) ;
2014-07-09 15:51:05 +04:00
blast_dcache_range ( ( unsigned long ) & mips_cps_core_entry ,
( unsigned long ) entry_code ) ;
bc_wback_inv ( ( unsigned long ) & mips_cps_core_entry ,
( void * ) entry_code - ( void * ) & mips_cps_core_entry ) ;
__sync ( ) ;
2014-04-14 15:21:49 +04:00
2014-04-14 15:04:27 +04:00
/* Allocate core boot configuration structs */
mips_cps_core_bootcfg = kcalloc ( ncores , sizeof ( * mips_cps_core_bootcfg ) ,
GFP_KERNEL ) ;
if ( ! mips_cps_core_bootcfg ) {
pr_err ( " Failed to allocate boot config for %u cores \n " , ncores ) ;
goto err_out ;
}
/* Allocate VPE boot configuration structs */
for ( c = 0 ; c < ncores ; c + + ) {
core_vpes = core_vpe_count ( c ) ;
mips_cps_core_bootcfg [ c ] . vpe_config = kcalloc ( core_vpes ,
sizeof ( * mips_cps_core_bootcfg [ c ] . vpe_config ) ,
GFP_KERNEL ) ;
if ( ! mips_cps_core_bootcfg [ c ] . vpe_config ) {
pr_err ( " Failed to allocate %u VPE boot configs \n " ,
core_vpes ) ;
goto err_out ;
}
}
/* Mark this CPU as booted */
atomic_set ( & mips_cps_core_bootcfg [ current_cpu_data . core ] . vpe_mask ,
1 < < cpu_vpe_id ( & current_cpu_data ) ) ;
return ;
err_out :
/* Clean up allocations */
if ( mips_cps_core_bootcfg ) {
for ( c = 0 ; c < ncores ; c + + )
kfree ( mips_cps_core_bootcfg [ c ] . vpe_config ) ;
kfree ( mips_cps_core_bootcfg ) ;
mips_cps_core_bootcfg = NULL ;
}
/* Effectively disable SMP by declaring CPUs not present */
for_each_possible_cpu ( c ) {
if ( c = = 0 )
continue ;
set_cpu_present ( c , false ) ;
}
2014-01-15 14:31:53 +04:00
}
2016-07-07 10:50:38 +03:00
static void boot_core ( unsigned int core , unsigned int vpe_id )
2014-01-15 14:31:53 +04:00
{
2015-09-22 21:12:14 +03:00
u32 access , stat , seq_state ;
unsigned timeout ;
2014-01-15 14:31:53 +04:00
/* Select the appropriate core */
2015-09-22 21:12:17 +03:00
mips_cm_lock_other ( core , 0 ) ;
2014-01-15 14:31:53 +04:00
/* Set its reset vector */
write_gcr_co_reset_base ( CKSEG1ADDR ( ( unsigned long ) mips_cps_core_entry ) ) ;
/* Ensure its coherency is disabled */
write_gcr_co_coherence ( 0 ) ;
2015-12-18 15:47:00 +03:00
/* Start it with the legacy memory map and exception base */
write_gcr_co_reset_ext_base ( CM_GCR_RESET_EXT_BASE_UEB ) ;
2014-01-15 14:31:53 +04:00
/* Ensure the core can access the GCRs */
access = read_gcr_access ( ) ;
2014-04-14 15:04:27 +04:00
access | = 1 < < ( CM_GCR_ACCESS_ACCESSEN_SHF + core ) ;
2014-01-15 14:31:53 +04:00
write_gcr_access ( access ) ;
if ( mips_cpc_present ( ) ) {
/* Reset the core */
2014-03-07 14:42:52 +04:00
mips_cpc_lock_other ( core ) ;
2016-02-03 06:15:33 +03:00
if ( mips_cm_revision ( ) > = CM_REV_CM3 ) {
2016-07-07 10:50:38 +03:00
/* Run only the requested VP following the reset */
write_cpc_co_vp_stop ( 0xf ) ;
write_cpc_co_vp_run ( 1 < < vpe_id ) ;
2016-02-03 06:15:33 +03:00
/*
* Ensure that the VP_RUN register is written before the
* core leaves reset .
*/
wmb ( ) ;
}
2014-01-15 14:31:53 +04:00
write_cpc_co_cmd ( CPC_Cx_CMD_RESET ) ;
2015-09-22 21:12:14 +03:00
timeout = 100 ;
while ( true ) {
stat = read_cpc_co_stat_conf ( ) ;
seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK ;
/* U6 == coherent execution, ie. the core is up */
if ( seq_state = = CPC_Cx_STAT_CONF_SEQSTATE_U6 )
break ;
/* Delay a little while before we start warning */
if ( timeout ) {
timeout - - ;
mdelay ( 10 ) ;
continue ;
}
pr_warn ( " Waiting for core %u to start... STAT_CONF=0x%x \n " ,
core , stat ) ;
mdelay ( 1000 ) ;
}
2014-03-07 14:42:52 +04:00
mips_cpc_unlock_other ( ) ;
2014-01-15 14:31:53 +04:00
} else {
/* Take the core out of reset */
write_gcr_co_reset_release ( 0 ) ;
}
2015-09-22 21:12:17 +03:00
mips_cm_unlock_other ( ) ;
2014-01-15 14:31:53 +04:00
/* The core is now powered up */
2014-04-14 15:04:27 +04:00
bitmap_set ( core_power , core , 1 ) ;
2014-01-15 14:31:53 +04:00
}
2014-04-14 15:04:27 +04:00
static void remote_vpe_boot ( void * dummy )
2014-01-15 14:31:53 +04:00
{
2016-02-03 06:15:31 +03:00
unsigned core = current_cpu_data . core ;
struct core_boot_config * core_cfg = & mips_cps_core_bootcfg [ core ] ;
mips_cps_boot_vpes ( core_cfg , cpu_vpe_id ( & current_cpu_data ) ) ;
2014-01-15 14:31:53 +04:00
}
static void cps_boot_secondary ( int cpu , struct task_struct * idle )
{
2014-04-14 15:04:27 +04:00
unsigned core = cpu_data [ cpu ] . core ;
unsigned vpe_id = cpu_vpe_id ( & cpu_data [ cpu ] ) ;
struct core_boot_config * core_cfg = & mips_cps_core_bootcfg [ core ] ;
struct vpe_boot_config * vpe_cfg = & core_cfg - > vpe_config [ vpe_id ] ;
2016-02-03 06:15:33 +03:00
unsigned long core_entry ;
2014-01-15 14:31:53 +04:00
unsigned int remote ;
int err ;
2014-04-14 15:04:27 +04:00
vpe_cfg - > pc = ( unsigned long ) & smp_bootstrap ;
vpe_cfg - > sp = __KSTK_TOS ( idle ) ;
vpe_cfg - > gp = ( unsigned long ) task_thread_info ( idle ) ;
2014-01-15 14:31:53 +04:00
2014-04-14 15:04:27 +04:00
atomic_or ( 1 < < cpu_vpe_id ( & cpu_data [ cpu ] ) , & core_cfg - > vpe_mask ) ;
2014-04-14 17:13:57 +04:00
preempt_disable ( ) ;
2014-01-15 14:31:53 +04:00
2014-04-14 15:04:27 +04:00
if ( ! test_bit ( core , core_power ) ) {
2014-01-15 14:31:53 +04:00
/* Boot a VPE on a powered down core */
2016-07-07 10:50:38 +03:00
boot_core ( core , vpe_id ) ;
2014-04-14 17:13:57 +04:00
goto out ;
2014-01-15 14:31:53 +04:00
}
2016-02-03 06:15:33 +03:00
if ( cpu_has_vp ) {
mips_cm_lock_other ( core , vpe_id ) ;
core_entry = CKSEG1ADDR ( ( unsigned long ) mips_cps_core_entry ) ;
write_gcr_co_reset_base ( core_entry ) ;
mips_cm_unlock_other ( ) ;
}
2014-04-14 15:04:27 +04:00
if ( core ! = current_cpu_data . core ) {
2014-01-15 14:31:53 +04:00
/* Boot a VPE on another powered up core */
for ( remote = 0 ; remote < NR_CPUS ; remote + + ) {
2014-04-14 15:04:27 +04:00
if ( cpu_data [ remote ] . core ! = core )
2014-01-15 14:31:53 +04:00
continue ;
if ( cpu_online ( remote ) )
break ;
}
2016-11-04 12:28:58 +03:00
if ( remote > = NR_CPUS ) {
pr_crit ( " No online CPU in core %u to start CPU%d \n " ,
core , cpu ) ;
goto out ;
}
2014-01-15 14:31:53 +04:00
2014-04-14 15:04:27 +04:00
err = smp_call_function_single ( remote , remote_vpe_boot ,
NULL , 1 ) ;
2014-01-15 14:31:53 +04:00
if ( err )
panic ( " Failed to call remote CPU \n " ) ;
2014-04-14 17:13:57 +04:00
goto out ;
2014-01-15 14:31:53 +04:00
}
2016-02-03 06:15:33 +03:00
BUG_ON ( ! cpu_has_mipsmt & & ! cpu_has_vp ) ;
2014-01-15 14:31:53 +04:00
/* Boot a VPE on this core */
2016-02-03 06:15:31 +03:00
mips_cps_boot_vpes ( core_cfg , vpe_id ) ;
2014-04-14 17:13:57 +04:00
out :
preempt_enable ( ) ;
2014-01-15 14:31:53 +04:00
}
static void cps_init_secondary ( void )
{
/* Disable MT - we only want to run 1 TC per VPE */
if ( cpu_has_mipsmt )
dmt ( ) ;
2016-02-03 06:15:29 +03:00
if ( mips_cm_revision ( ) > = CM_REV_CM3 ) {
unsigned ident = gic_read_local_vp_id ( ) ;
/*
* Ensure that our calculation of the VP ID matches up with
* what the GIC reports , otherwise we ' ll have configured
* interrupts incorrectly .
*/
BUG_ON ( ident ! = mips_cm_vp_id ( smp_processor_id ( ) ) ) ;
}
2016-05-17 17:31:05 +03:00
if ( cpu_has_veic )
clear_c0_status ( ST0_IM ) ;
else
change_c0_status ( ST0_IM , STATUSF_IP2 | STATUSF_IP3 |
STATUSF_IP4 | STATUSF_IP5 |
STATUSF_IP6 | STATUSF_IP7 ) ;
2014-01-15 14:31:53 +04:00
}
static void cps_smp_finish ( void )
{
write_c0_compare ( read_c0_count ( ) + ( 8 * mips_hpt_frequency / HZ ) ) ;
# ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if ( cpu_has_fpu )
2015-03-05 03:19:17 +03:00
cpumask_set_cpu ( smp_processor_id ( ) , & mt_fpu_cpumask ) ;
2014-01-15 14:31:53 +04:00
# endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable ( ) ;
}
2014-04-14 17:13:57 +04:00
# ifdef CONFIG_HOTPLUG_CPU
static int cps_cpu_disable ( void )
{
unsigned cpu = smp_processor_id ( ) ;
struct core_boot_config * core_cfg ;
if ( ! cpu )
return - EBUSY ;
if ( ! cps_pm_support_state ( CPS_PM_POWER_GATED ) )
return - EINVAL ;
core_cfg = & mips_cps_core_bootcfg [ current_cpu_data . core ] ;
atomic_sub ( 1 < < cpu_vpe_id ( & current_cpu_data ) , & core_cfg - > vpe_mask ) ;
2014-06-11 14:00:56 +04:00
smp_mb__after_atomic ( ) ;
2014-04-14 17:13:57 +04:00
set_cpu_online ( cpu , false ) ;
2016-07-13 16:12:45 +03:00
calculate_cpu_foreign_map ( ) ;
2014-04-14 17:13:57 +04:00
return 0 ;
}
static DECLARE_COMPLETION ( cpu_death_chosen ) ;
static unsigned cpu_death_sibling ;
static enum {
CPU_DEATH_HALT ,
CPU_DEATH_POWER ,
} cpu_death ;
void play_dead ( void )
{
2016-07-07 10:50:39 +03:00
unsigned int cpu , core , vpe_id ;
2014-04-14 17:13:57 +04:00
local_irq_disable ( ) ;
idle_task_exit ( ) ;
cpu = smp_processor_id ( ) ;
cpu_death = CPU_DEATH_POWER ;
2016-07-07 10:50:39 +03:00
pr_debug ( " CPU%d going offline \n " , cpu ) ;
if ( cpu_has_mipsmt | | cpu_has_vp ) {
2014-04-14 17:13:57 +04:00
core = cpu_data [ cpu ] . core ;
/* Look for another online VPE within the core */
for_each_online_cpu ( cpu_death_sibling ) {
if ( cpu_data [ cpu_death_sibling ] . core ! = core )
continue ;
/*
* There is an online VPE within the core . Just halt
* this TC and leave the core alone .
*/
cpu_death = CPU_DEATH_HALT ;
break ;
}
}
/* This CPU has chosen its way out */
complete ( & cpu_death_chosen ) ;
if ( cpu_death = = CPU_DEATH_HALT ) {
2016-07-07 10:50:39 +03:00
vpe_id = cpu_vpe_id ( & cpu_data [ cpu ] ) ;
pr_debug ( " Halting core %d VP%d \n " , core , vpe_id ) ;
if ( cpu_has_mipsmt ) {
/* Halt this TC */
write_c0_tchalt ( TCHALT_H ) ;
instruction_hazard ( ) ;
} else if ( cpu_has_vp ) {
write_cpc_cl_vp_stop ( 1 < < vpe_id ) ;
/* Ensure that the VP_STOP register is written */
wmb ( ) ;
}
2014-04-14 17:13:57 +04:00
} else {
2016-07-07 10:50:39 +03:00
pr_debug ( " Gating power to core %d \n " , core ) ;
2014-04-14 17:13:57 +04:00
/* Power down the core */
cps_pm_enter_state ( CPS_PM_POWER_GATED ) ;
}
/* This should never be reached */
panic ( " Failed to offline CPU %u " , cpu ) ;
}
static void wait_for_sibling_halt ( void * ptr_cpu )
{
2015-07-01 11:13:28 +03:00
unsigned cpu = ( unsigned long ) ptr_cpu ;
2014-07-09 15:48:21 +04:00
unsigned vpe_id = cpu_vpe_id ( & cpu_data [ cpu ] ) ;
2014-04-14 17:13:57 +04:00
unsigned halted ;
unsigned long flags ;
do {
local_irq_save ( flags ) ;
settc ( vpe_id ) ;
halted = read_tc_c0_tchalt ( ) ;
local_irq_restore ( flags ) ;
} while ( ! ( halted & TCHALT_H ) ) ;
}
static void cps_cpu_die ( unsigned int cpu )
{
unsigned core = cpu_data [ cpu ] . core ;
2016-07-07 10:50:39 +03:00
unsigned int vpe_id = cpu_vpe_id ( & cpu_data [ cpu ] ) ;
2014-04-14 17:13:57 +04:00
unsigned stat ;
int err ;
/* Wait for the cpu to choose its way out */
if ( ! wait_for_completion_timeout ( & cpu_death_chosen ,
msecs_to_jiffies ( 5000 ) ) ) {
pr_err ( " CPU%u: didn't offline \n " , cpu ) ;
return ;
}
/*
* Now wait for the CPU to actually offline . Without doing this that
* offlining may race with one or more of :
*
* - Onlining the CPU again .
* - Powering down the core if another VPE within it is offlined .
* - A sibling VPE entering a non - coherent state .
*
* In the non - MT halt case ( ie . infinite loop ) the CPU is doing nothing
* with which we could race , so do nothing .
*/
if ( cpu_death = = CPU_DEATH_POWER ) {
/*
* Wait for the core to enter a powered down or clock gated
* state , the latter happening when a JTAG probe is connected
* in which case the CPC will refuse to power down the core .
*/
do {
2016-09-22 13:59:47 +03:00
mips_cm_lock_other ( core , 0 ) ;
2014-04-14 17:13:57 +04:00
mips_cpc_lock_other ( core ) ;
stat = read_cpc_co_stat_conf ( ) ;
stat & = CPC_Cx_STAT_CONF_SEQSTATE_MSK ;
mips_cpc_unlock_other ( ) ;
2016-07-07 10:50:39 +03:00
mips_cm_unlock_other ( ) ;
2014-04-14 17:13:57 +04:00
} while ( stat ! = CPC_Cx_STAT_CONF_SEQSTATE_D0 & &
stat ! = CPC_Cx_STAT_CONF_SEQSTATE_D2 & &
stat ! = CPC_Cx_STAT_CONF_SEQSTATE_U2 ) ;
/* Indicate the core is powered off */
bitmap_clear ( core_power , core , 1 ) ;
} else if ( cpu_has_mipsmt ) {
/*
* Have a CPU with access to the offlined CPUs registers wait
* for its TC to halt .
*/
err = smp_call_function_single ( cpu_death_sibling ,
wait_for_sibling_halt ,
2015-07-01 11:13:28 +03:00
( void * ) ( unsigned long ) cpu , 1 ) ;
2014-04-14 17:13:57 +04:00
if ( err )
panic ( " Failed to call remote sibling CPU \n " ) ;
2016-07-07 10:50:39 +03:00
} else if ( cpu_has_vp ) {
do {
mips_cm_lock_other ( core , vpe_id ) ;
stat = read_cpc_co_vp_running ( ) ;
mips_cm_unlock_other ( ) ;
} while ( stat & ( 1 < < vpe_id ) ) ;
2014-04-14 17:13:57 +04:00
}
}
# endif /* CONFIG_HOTPLUG_CPU */
2014-01-15 14:31:53 +04:00
static struct plat_smp_ops cps_smp_ops = {
. smp_setup = cps_smp_setup ,
. prepare_cpus = cps_prepare_cpus ,
. boot_secondary = cps_boot_secondary ,
. init_secondary = cps_init_secondary ,
. smp_finish = cps_smp_finish ,
2015-12-08 16:20:28 +03:00
. send_ipi_single = mips_smp_send_ipi_single ,
. send_ipi_mask = mips_smp_send_ipi_mask ,
2014-04-14 17:13:57 +04:00
# ifdef CONFIG_HOTPLUG_CPU
. cpu_disable = cps_cpu_disable ,
. cpu_die = cps_cpu_die ,
# endif
2014-01-15 14:31:53 +04:00
} ;
2014-03-14 20:06:16 +04:00
bool mips_cps_smp_in_use ( void )
{
extern struct plat_smp_ops * mp_ops ;
return mp_ops = = & cps_smp_ops ;
}
2014-01-15 14:31:53 +04:00
int register_cps_smp_ops ( void )
{
if ( ! mips_cm_present ( ) ) {
pr_warn ( " MIPS CPS SMP unable to proceed without a CM \n " ) ;
return - ENODEV ;
}
/* check we have a GIC - we need one for IPIs */
if ( ! ( read_gcr_gic_status ( ) & CM_GCR_GIC_STATUS_EX_MSK ) ) {
pr_warn ( " MIPS CPS SMP unable to proceed without a GIC \n " ) ;
return - ENODEV ;
}
register_smp_ops ( & cps_smp_ops ) ;
return 0 ;
}