2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2014-01-15 14:31:53 +04:00
/*
* Copyright ( C ) 2013 Imagination Technologies
2017-10-26 03:04:33 +03:00
* Author : Paul Burton < paul . burton @ mips . com >
2014-01-15 14:31:53 +04:00
*/
2017-04-07 14:40:28 +03:00
# include <linux/cpu.h>
2015-09-22 21:12:14 +03:00
# include <linux/delay.h>
2014-01-15 14:31:53 +04:00
# include <linux/io.h>
2017-02-04 00:59:33 +03:00
# include <linux/sched/task_stack.h>
2017-02-08 20:51:36 +03:00
# include <linux/sched/hotplug.h>
2014-01-15 14:31:53 +04:00
# include <linux/slab.h>
# include <linux/smp.h>
# include <linux/types.h>
2014-07-09 15:51:05 +04:00
# include <asm/bcache.h>
2017-08-13 05:49:41 +03:00
# include <asm/mips-cps.h>
2014-01-15 14:31:53 +04:00
# include <asm/mips_mt.h>
# include <asm/mipsregs.h>
2014-04-14 17:13:57 +04:00
# include <asm/pm-cps.h>
2014-07-09 15:51:05 +04:00
# include <asm/r4kcache.h>
2014-01-15 14:31:53 +04:00
# include <asm/smp-cps.h>
# include <asm/time.h>
# include <asm/uasm.h>
2016-02-03 06:15:34 +03:00
static bool threads_disabled ;
2014-01-15 14:31:53 +04:00
static DECLARE_BITMAP ( core_power , NR_CPUS ) ;
2014-04-14 15:04:27 +04:00
struct core_boot_config * mips_cps_core_bootcfg ;
2014-01-15 14:31:53 +04:00
2016-02-03 06:15:34 +03:00
static int __init setup_nothreads ( char * s )
{
threads_disabled = true ;
return 0 ;
}
early_param ( " nothreads " , setup_nothreads ) ;
2017-08-13 05:49:43 +03:00
static unsigned core_vpe_count ( unsigned int cluster , unsigned core )
2014-01-15 14:31:53 +04:00
{
2016-02-03 06:15:34 +03:00
if ( threads_disabled )
return 1 ;
2017-08-13 05:49:43 +03:00
return mips_cps_numvps ( cluster , core ) ;
2014-01-15 14:31:53 +04:00
}
static void __init cps_smp_setup ( void )
{
2017-08-13 05:49:43 +03:00
unsigned int nclusters , ncores , nvpes , core_vpes ;
2016-02-03 06:15:33 +03:00
unsigned long core_entry ;
2017-08-13 05:49:43 +03:00
int cl , c , v ;
2014-01-15 14:31:53 +04:00
/* Detect & record VPE topology */
2017-08-13 05:49:43 +03:00
nvpes = 0 ;
nclusters = mips_cps_numclusters ( ) ;
2016-02-03 06:15:33 +03:00
pr_info ( " %s topology " , cpu_has_mips_r6 ? " VP " : " VPE " ) ;
2017-08-13 05:49:43 +03:00
for ( cl = 0 ; cl < nclusters ; cl + + ) {
if ( cl > 0 )
pr_cont ( " , " ) ;
pr_cont ( " { " ) ;
ncores = mips_cps_numcores ( cl ) ;
for ( c = 0 ; c < ncores ; c + + ) {
core_vpes = core_vpe_count ( cl , c ) ;
if ( c > 0 )
pr_cont ( " , " ) ;
pr_cont ( " %u " , core_vpes ) ;
/* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
if ( ! cl & & ! c )
smp_num_siblings = core_vpes ;
for ( v = 0 ; v < min_t ( int , core_vpes , NR_CPUS - nvpes ) ; v + + ) {
cpu_set_cluster ( & cpu_data [ nvpes + v ] , cl ) ;
cpu_set_core ( & cpu_data [ nvpes + v ] , c ) ;
cpu_set_vpe_id ( & cpu_data [ nvpes + v ] , v ) ;
}
2014-04-14 15:04:27 +04:00
2017-08-13 05:49:43 +03:00
nvpes + = core_vpes ;
2014-01-15 14:31:53 +04:00
}
2017-08-13 05:49:43 +03:00
pr_cont ( " } " ) ;
2014-01-15 14:31:53 +04:00
}
2017-08-13 05:49:43 +03:00
pr_cont ( " total %u \n " , nvpes ) ;
2014-01-15 14:31:53 +04:00
/* Indicate present CPUs (CPU being synonymous with VPE) */
for ( v = 0 ; v < min_t ( unsigned , nvpes , NR_CPUS ) ; v + + ) {
2017-08-13 05:49:43 +03:00
set_cpu_possible ( v , cpu_cluster ( & cpu_data [ v ] ) = = 0 ) ;
set_cpu_present ( v , cpu_cluster ( & cpu_data [ v ] ) = = 0 ) ;
2014-01-15 14:31:53 +04:00
__cpu_number_map [ v ] = v ;
__cpu_logical_map [ v ] = v ;
}
2014-04-14 18:58:45 +04:00
/* Set a coherent default CCA (CWB) */
change_c0_config ( CONF_CM_CMASK , 0x5 ) ;
2014-01-15 14:31:53 +04:00
/* Core 0 is powered up (we're running on it) */
bitmap_set ( core_power , 0 , 1 ) ;
/* Initialise core 0 */
2014-04-14 15:04:27 +04:00
mips_cps_core_init ( ) ;
2014-01-15 14:31:53 +04:00
/* Make core 0 coherent with everything */
write_gcr_cl_coherence ( 0xff ) ;
2015-01-15 18:41:13 +03:00
2016-02-03 06:15:33 +03:00
if ( mips_cm_revision ( ) > = CM_REV_CM3 ) {
core_entry = CKSEG1ADDR ( ( unsigned long ) mips_cps_core_entry ) ;
write_gcr_bev_base ( core_entry ) ;
}
2015-01-15 18:41:13 +03:00
# ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if ( cpu_has_fpu )
2015-04-29 00:34:23 +03:00
cpumask_set_cpu ( 0 , & mt_fpu_cpumask ) ;
2015-01-15 18:41:13 +03:00
# endif /* CONFIG_MIPS_MT_FPAFF */
2014-01-15 14:31:53 +04:00
}
static void __init cps_prepare_cpus ( unsigned int max_cpus )
{
2014-04-14 18:21:25 +04:00
unsigned ncores , core_vpes , c , cca ;
2017-08-13 05:49:43 +03:00
bool cca_unsuitable , cores_limited ;
2014-04-14 15:21:49 +04:00
u32 * entry_code ;
2014-04-14 15:04:27 +04:00
2014-01-15 14:31:53 +04:00
mips_mt_set_cpuoptions ( ) ;
2014-04-14 15:04:27 +04:00
2014-04-14 18:21:25 +04:00
/* Detect whether the CCA is unsuited to multi-core SMP */
cca = read_c0_config ( ) & CONF_CM_CMASK ;
switch ( cca ) {
case 0x4 : /* CWBE */
case 0x5 : /* CWB */
/* The CCA is coherent, multi-core is fine */
cca_unsuitable = false ;
break ;
default :
/* CCA is not coherent, multi-core is not usable */
cca_unsuitable = true ;
}
/* Warn the user if the CCA prevents multi-core */
2017-08-13 05:49:43 +03:00
cores_limited = false ;
if ( cca_unsuitable | | cpu_has_dc_aliases ) {
for_each_present_cpu ( c ) {
if ( cpus_are_siblings ( smp_processor_id ( ) , c ) )
continue ;
set_cpu_present ( c , false ) ;
cores_limited = true ;
}
}
if ( cores_limited )
2017-06-03 00:48:53 +03:00
pr_warn ( " Using only one core due to %s%s%s \n " ,
cca_unsuitable ? " unsuitable CCA " : " " ,
( cca_unsuitable & & cpu_has_dc_aliases ) ? " & " : " " ,
cpu_has_dc_aliases ? " dcache aliasing " : " " ) ;
2014-04-14 18:21:25 +04:00
2014-04-16 14:10:57 +04:00
/*
* Patch the start of mips_cps_core_entry to provide :
*
* s0 = kseg0 CCA
*/
2014-04-14 15:21:49 +04:00
entry_code = ( u32 * ) & mips_cps_core_entry ;
2014-04-16 14:10:57 +04:00
uasm_i_addiu ( & entry_code , 16 , 0 , cca ) ;
2014-07-09 15:51:05 +04:00
blast_dcache_range ( ( unsigned long ) & mips_cps_core_entry ,
( unsigned long ) entry_code ) ;
bc_wback_inv ( ( unsigned long ) & mips_cps_core_entry ,
( void * ) entry_code - ( void * ) & mips_cps_core_entry ) ;
__sync ( ) ;
2014-04-14 15:21:49 +04:00
2014-04-14 15:04:27 +04:00
/* Allocate core boot configuration structs */
2017-08-13 05:49:43 +03:00
ncores = mips_cps_numcores ( 0 ) ;
2014-04-14 15:04:27 +04:00
mips_cps_core_bootcfg = kcalloc ( ncores , sizeof ( * mips_cps_core_bootcfg ) ,
GFP_KERNEL ) ;
if ( ! mips_cps_core_bootcfg ) {
pr_err ( " Failed to allocate boot config for %u cores \n " , ncores ) ;
goto err_out ;
}
/* Allocate VPE boot configuration structs */
for ( c = 0 ; c < ncores ; c + + ) {
2017-08-13 05:49:43 +03:00
core_vpes = core_vpe_count ( 0 , c ) ;
2014-04-14 15:04:27 +04:00
mips_cps_core_bootcfg [ c ] . vpe_config = kcalloc ( core_vpes ,
sizeof ( * mips_cps_core_bootcfg [ c ] . vpe_config ) ,
GFP_KERNEL ) ;
if ( ! mips_cps_core_bootcfg [ c ] . vpe_config ) {
pr_err ( " Failed to allocate %u VPE boot configs \n " ,
core_vpes ) ;
goto err_out ;
}
}
/* Mark this CPU as booted */
2017-08-13 05:49:35 +03:00
atomic_set ( & mips_cps_core_bootcfg [ cpu_core ( & current_cpu_data ) ] . vpe_mask ,
2014-04-14 15:04:27 +04:00
1 < < cpu_vpe_id ( & current_cpu_data ) ) ;
return ;
err_out :
/* Clean up allocations */
if ( mips_cps_core_bootcfg ) {
for ( c = 0 ; c < ncores ; c + + )
kfree ( mips_cps_core_bootcfg [ c ] . vpe_config ) ;
kfree ( mips_cps_core_bootcfg ) ;
mips_cps_core_bootcfg = NULL ;
}
/* Effectively disable SMP by declaring CPUs not present */
for_each_possible_cpu ( c ) {
if ( c = = 0 )
continue ;
set_cpu_present ( c , false ) ;
}
2014-01-15 14:31:53 +04:00
}
2016-07-07 10:50:38 +03:00
static void boot_core ( unsigned int core , unsigned int vpe_id )
2014-01-15 14:31:53 +04:00
{
2017-08-13 05:49:31 +03:00
u32 stat , seq_state ;
2015-09-22 21:12:14 +03:00
unsigned timeout ;
2014-01-15 14:31:53 +04:00
/* Select the appropriate core */
2017-08-13 05:49:39 +03:00
mips_cm_lock_other ( 0 , core , 0 , CM_GCR_Cx_OTHER_BLOCK_LOCAL ) ;
2014-01-15 14:31:53 +04:00
/* Set its reset vector */
write_gcr_co_reset_base ( CKSEG1ADDR ( ( unsigned long ) mips_cps_core_entry ) ) ;
/* Ensure its coherency is disabled */
write_gcr_co_coherence ( 0 ) ;
2015-12-18 15:47:00 +03:00
/* Start it with the legacy memory map and exception base */
2017-08-13 05:49:27 +03:00
write_gcr_co_reset_ext_base ( CM_GCR_Cx_RESET_EXT_BASE_UEB ) ;
2015-12-18 15:47:00 +03:00
2014-01-15 14:31:53 +04:00
/* Ensure the core can access the GCRs */
2017-08-13 05:49:31 +03:00
set_gcr_access ( 1 < < core ) ;
2014-01-15 14:31:53 +04:00
if ( mips_cpc_present ( ) ) {
/* Reset the core */
2014-03-07 14:42:52 +04:00
mips_cpc_lock_other ( core ) ;
2016-02-03 06:15:33 +03:00
if ( mips_cm_revision ( ) > = CM_REV_CM3 ) {
2016-07-07 10:50:38 +03:00
/* Run only the requested VP following the reset */
write_cpc_co_vp_stop ( 0xf ) ;
write_cpc_co_vp_run ( 1 < < vpe_id ) ;
2016-02-03 06:15:33 +03:00
/*
* Ensure that the VP_RUN register is written before the
* core leaves reset .
*/
wmb ( ) ;
}
2014-01-15 14:31:53 +04:00
write_cpc_co_cmd ( CPC_Cx_CMD_RESET ) ;
2015-09-22 21:12:14 +03:00
timeout = 100 ;
while ( true ) {
stat = read_cpc_co_stat_conf ( ) ;
2017-08-13 05:49:29 +03:00
seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE ;
seq_state > > = __ffs ( CPC_Cx_STAT_CONF_SEQSTATE ) ;
2015-09-22 21:12:14 +03:00
/* U6 == coherent execution, ie. the core is up */
if ( seq_state = = CPC_Cx_STAT_CONF_SEQSTATE_U6 )
break ;
/* Delay a little while before we start warning */
if ( timeout ) {
timeout - - ;
mdelay ( 10 ) ;
continue ;
}
pr_warn ( " Waiting for core %u to start... STAT_CONF=0x%x \n " ,
core , stat ) ;
mdelay ( 1000 ) ;
}
2014-03-07 14:42:52 +04:00
mips_cpc_unlock_other ( ) ;
2014-01-15 14:31:53 +04:00
} else {
/* Take the core out of reset */
write_gcr_co_reset_release ( 0 ) ;
}
2015-09-22 21:12:17 +03:00
mips_cm_unlock_other ( ) ;
2014-01-15 14:31:53 +04:00
/* The core is now powered up */
2014-04-14 15:04:27 +04:00
bitmap_set ( core_power , core , 1 ) ;
2014-01-15 14:31:53 +04:00
}
2014-04-14 15:04:27 +04:00
static void remote_vpe_boot ( void * dummy )
2014-01-15 14:31:53 +04:00
{
2017-08-13 05:49:35 +03:00
unsigned core = cpu_core ( & current_cpu_data ) ;
2016-02-03 06:15:31 +03:00
struct core_boot_config * core_cfg = & mips_cps_core_bootcfg [ core ] ;
mips_cps_boot_vpes ( core_cfg , cpu_vpe_id ( & current_cpu_data ) ) ;
2014-01-15 14:31:53 +04:00
}
2017-08-13 05:49:40 +03:00
static int cps_boot_secondary ( int cpu , struct task_struct * idle )
2014-01-15 14:31:53 +04:00
{
2017-08-13 05:49:35 +03:00
unsigned core = cpu_core ( & cpu_data [ cpu ] ) ;
2014-04-14 15:04:27 +04:00
unsigned vpe_id = cpu_vpe_id ( & cpu_data [ cpu ] ) ;
struct core_boot_config * core_cfg = & mips_cps_core_bootcfg [ core ] ;
struct vpe_boot_config * vpe_cfg = & core_cfg - > vpe_config [ vpe_id ] ;
2016-02-03 06:15:33 +03:00
unsigned long core_entry ;
2014-01-15 14:31:53 +04:00
unsigned int remote ;
int err ;
2017-08-13 05:49:43 +03:00
/* We don't yet support booting CPUs in other clusters */
2017-11-01 19:45:56 +03:00
if ( cpu_cluster ( & cpu_data [ cpu ] ) ! = cpu_cluster ( & raw_current_cpu_data ) )
2017-08-13 05:49:43 +03:00
return - ENOSYS ;
2014-04-14 15:04:27 +04:00
vpe_cfg - > pc = ( unsigned long ) & smp_bootstrap ;
vpe_cfg - > sp = __KSTK_TOS ( idle ) ;
vpe_cfg - > gp = ( unsigned long ) task_thread_info ( idle ) ;
2014-01-15 14:31:53 +04:00
2014-04-14 15:04:27 +04:00
atomic_or ( 1 < < cpu_vpe_id ( & cpu_data [ cpu ] ) , & core_cfg - > vpe_mask ) ;
2014-04-14 17:13:57 +04:00
preempt_disable ( ) ;
2014-01-15 14:31:53 +04:00
2014-04-14 15:04:27 +04:00
if ( ! test_bit ( core , core_power ) ) {
2014-01-15 14:31:53 +04:00
/* Boot a VPE on a powered down core */
2016-07-07 10:50:38 +03:00
boot_core ( core , vpe_id ) ;
2014-04-14 17:13:57 +04:00
goto out ;
2014-01-15 14:31:53 +04:00
}
2016-02-03 06:15:33 +03:00
if ( cpu_has_vp ) {
2017-08-13 05:49:39 +03:00
mips_cm_lock_other ( 0 , core , vpe_id , CM_GCR_Cx_OTHER_BLOCK_LOCAL ) ;
2016-02-03 06:15:33 +03:00
core_entry = CKSEG1ADDR ( ( unsigned long ) mips_cps_core_entry ) ;
write_gcr_co_reset_base ( core_entry ) ;
mips_cm_unlock_other ( ) ;
}
2017-08-13 05:49:37 +03:00
if ( ! cpus_are_siblings ( cpu , smp_processor_id ( ) ) ) {
2014-01-15 14:31:53 +04:00
/* Boot a VPE on another powered up core */
for ( remote = 0 ; remote < NR_CPUS ; remote + + ) {
2017-08-13 05:49:37 +03:00
if ( ! cpus_are_siblings ( cpu , remote ) )
2014-01-15 14:31:53 +04:00
continue ;
if ( cpu_online ( remote ) )
break ;
}
2016-11-04 12:28:58 +03:00
if ( remote > = NR_CPUS ) {
pr_crit ( " No online CPU in core %u to start CPU%d \n " ,
core , cpu ) ;
goto out ;
}
2014-01-15 14:31:53 +04:00
2014-04-14 15:04:27 +04:00
err = smp_call_function_single ( remote , remote_vpe_boot ,
NULL , 1 ) ;
2014-01-15 14:31:53 +04:00
if ( err )
panic ( " Failed to call remote CPU \n " ) ;
2014-04-14 17:13:57 +04:00
goto out ;
2014-01-15 14:31:53 +04:00
}
2016-02-03 06:15:33 +03:00
BUG_ON ( ! cpu_has_mipsmt & & ! cpu_has_vp ) ;
2014-01-15 14:31:53 +04:00
/* Boot a VPE on this core */
2016-02-03 06:15:31 +03:00
mips_cps_boot_vpes ( core_cfg , vpe_id ) ;
2014-04-14 17:13:57 +04:00
out :
preempt_enable ( ) ;
2017-08-13 05:49:40 +03:00
return 0 ;
2014-01-15 14:31:53 +04:00
}
static void cps_init_secondary ( void )
{
/* Disable MT - we only want to run 1 TC per VPE */
if ( cpu_has_mipsmt )
dmt ( ) ;
2016-02-03 06:15:29 +03:00
if ( mips_cm_revision ( ) > = CM_REV_CM3 ) {
2017-08-13 07:36:13 +03:00
unsigned int ident = read_gic_vl_ident ( ) ;
2016-02-03 06:15:29 +03:00
/*
* Ensure that our calculation of the VP ID matches up with
* what the GIC reports , otherwise we ' ll have configured
* interrupts incorrectly .
*/
BUG_ON ( ident ! = mips_cm_vp_id ( smp_processor_id ( ) ) ) ;
}
2016-05-17 17:31:05 +03:00
if ( cpu_has_veic )
clear_c0_status ( ST0_IM ) ;
else
change_c0_status ( ST0_IM , STATUSF_IP2 | STATUSF_IP3 |
STATUSF_IP4 | STATUSF_IP5 |
STATUSF_IP6 | STATUSF_IP7 ) ;
2014-01-15 14:31:53 +04:00
}
static void cps_smp_finish ( void )
{
write_c0_compare ( read_c0_count ( ) + ( 8 * mips_hpt_frequency / HZ ) ) ;
# ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if ( cpu_has_fpu )
2015-03-05 03:19:17 +03:00
cpumask_set_cpu ( smp_processor_id ( ) , & mt_fpu_cpumask ) ;
2014-01-15 14:31:53 +04:00
# endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable ( ) ;
}
2018-09-12 00:49:22 +03:00
# if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
enum cpu_death {
CPU_DEATH_HALT ,
CPU_DEATH_POWER ,
} ;
static void cps_shutdown_this_cpu ( enum cpu_death death )
{
unsigned int cpu , core , vpe_id ;
cpu = smp_processor_id ( ) ;
core = cpu_core ( & cpu_data [ cpu ] ) ;
if ( death = = CPU_DEATH_HALT ) {
vpe_id = cpu_vpe_id ( & cpu_data [ cpu ] ) ;
pr_debug ( " Halting core %d VP%d \n " , core , vpe_id ) ;
if ( cpu_has_mipsmt ) {
/* Halt this TC */
write_c0_tchalt ( TCHALT_H ) ;
instruction_hazard ( ) ;
} else if ( cpu_has_vp ) {
write_cpc_cl_vp_stop ( 1 < < vpe_id ) ;
/* Ensure that the VP_STOP register is written */
wmb ( ) ;
}
} else {
pr_debug ( " Gating power to core %d \n " , core ) ;
/* Power down the core */
cps_pm_enter_state ( CPS_PM_POWER_GATED ) ;
}
}
# ifdef CONFIG_KEXEC
static void cps_kexec_nonboot_cpu ( void )
{
if ( cpu_has_mipsmt | | cpu_has_vp )
cps_shutdown_this_cpu ( CPU_DEATH_HALT ) ;
else
cps_shutdown_this_cpu ( CPU_DEATH_POWER ) ;
}
# endif /* CONFIG_KEXEC */
# endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
2014-04-14 17:13:57 +04:00
# ifdef CONFIG_HOTPLUG_CPU
static int cps_cpu_disable ( void )
{
unsigned cpu = smp_processor_id ( ) ;
struct core_boot_config * core_cfg ;
if ( ! cpu )
return - EBUSY ;
if ( ! cps_pm_support_state ( CPS_PM_POWER_GATED ) )
return - EINVAL ;
2017-08-13 05:49:35 +03:00
core_cfg = & mips_cps_core_bootcfg [ cpu_core ( & current_cpu_data ) ] ;
2014-04-14 17:13:57 +04:00
atomic_sub ( 1 < < cpu_vpe_id ( & current_cpu_data ) , & core_cfg - > vpe_mask ) ;
2014-06-11 14:00:56 +04:00
smp_mb__after_atomic ( ) ;
2014-04-14 17:13:57 +04:00
set_cpu_online ( cpu , false ) ;
2016-07-13 16:12:45 +03:00
calculate_cpu_foreign_map ( ) ;
2014-04-14 17:13:57 +04:00
return 0 ;
}
static unsigned cpu_death_sibling ;
2018-09-12 00:49:22 +03:00
static enum cpu_death cpu_death ;
2014-04-14 17:13:57 +04:00
void play_dead ( void )
{
2018-09-12 00:49:22 +03:00
unsigned int cpu ;
2014-04-14 17:13:57 +04:00
local_irq_disable ( ) ;
idle_task_exit ( ) ;
cpu = smp_processor_id ( ) ;
cpu_death = CPU_DEATH_POWER ;
2016-07-07 10:50:39 +03:00
pr_debug ( " CPU%d going offline \n " , cpu ) ;
if ( cpu_has_mipsmt | | cpu_has_vp ) {
2014-04-14 17:13:57 +04:00
/* Look for another online VPE within the core */
for_each_online_cpu ( cpu_death_sibling ) {
2017-08-13 05:49:37 +03:00
if ( ! cpus_are_siblings ( cpu , cpu_death_sibling ) )
2014-04-14 17:13:57 +04:00
continue ;
/*
* There is an online VPE within the core . Just halt
* this TC and leave the core alone .
*/
cpu_death = CPU_DEATH_HALT ;
break ;
}
}
/* This CPU has chosen its way out */
2017-04-07 14:40:28 +03:00
( void ) cpu_report_death ( ) ;
2014-04-14 17:13:57 +04:00
2018-09-12 00:49:22 +03:00
cps_shutdown_this_cpu ( cpu_death ) ;
2014-04-14 17:13:57 +04:00
/* This should never be reached */
panic ( " Failed to offline CPU %u " , cpu ) ;
}
static void wait_for_sibling_halt ( void * ptr_cpu )
{
2015-07-01 11:13:28 +03:00
unsigned cpu = ( unsigned long ) ptr_cpu ;
2014-07-09 15:48:21 +04:00
unsigned vpe_id = cpu_vpe_id ( & cpu_data [ cpu ] ) ;
2014-04-14 17:13:57 +04:00
unsigned halted ;
unsigned long flags ;
do {
local_irq_save ( flags ) ;
settc ( vpe_id ) ;
halted = read_tc_c0_tchalt ( ) ;
local_irq_restore ( flags ) ;
} while ( ! ( halted & TCHALT_H ) ) ;
}
static void cps_cpu_die ( unsigned int cpu )
{
2017-08-13 05:49:35 +03:00
unsigned core = cpu_core ( & cpu_data [ cpu ] ) ;
2016-07-07 10:50:39 +03:00
unsigned int vpe_id = cpu_vpe_id ( & cpu_data [ cpu ] ) ;
2017-06-03 00:48:54 +03:00
ktime_t fail_time ;
2014-04-14 17:13:57 +04:00
unsigned stat ;
int err ;
/* Wait for the cpu to choose its way out */
2017-04-07 14:40:28 +03:00
if ( ! cpu_wait_death ( cpu , 5 ) ) {
2014-04-14 17:13:57 +04:00
pr_err ( " CPU%u: didn't offline \n " , cpu ) ;
return ;
}
/*
* Now wait for the CPU to actually offline . Without doing this that
* offlining may race with one or more of :
*
* - Onlining the CPU again .
* - Powering down the core if another VPE within it is offlined .
* - A sibling VPE entering a non - coherent state .
*
* In the non - MT halt case ( ie . infinite loop ) the CPU is doing nothing
* with which we could race , so do nothing .
*/
if ( cpu_death = = CPU_DEATH_POWER ) {
/*
* Wait for the core to enter a powered down or clock gated
* state , the latter happening when a JTAG probe is connected
* in which case the CPC will refuse to power down the core .
*/
2017-06-03 00:48:54 +03:00
fail_time = ktime_add_ms ( ktime_get ( ) , 2000 ) ;
2014-04-14 17:13:57 +04:00
do {
2017-08-13 05:49:39 +03:00
mips_cm_lock_other ( 0 , core , 0 , CM_GCR_Cx_OTHER_BLOCK_LOCAL ) ;
2014-04-14 17:13:57 +04:00
mips_cpc_lock_other ( core ) ;
stat = read_cpc_co_stat_conf ( ) ;
2017-08-13 05:49:29 +03:00
stat & = CPC_Cx_STAT_CONF_SEQSTATE ;
stat > > = __ffs ( CPC_Cx_STAT_CONF_SEQSTATE ) ;
2014-04-14 17:13:57 +04:00
mips_cpc_unlock_other ( ) ;
2016-07-07 10:50:39 +03:00
mips_cm_unlock_other ( ) ;
2017-06-03 00:48:54 +03:00
if ( stat = = CPC_Cx_STAT_CONF_SEQSTATE_D0 | |
stat = = CPC_Cx_STAT_CONF_SEQSTATE_D2 | |
stat = = CPC_Cx_STAT_CONF_SEQSTATE_U2 )
break ;
/*
* The core ought to have powered down , but didn ' t &
* now we don ' t really know what state it ' s in . It ' s
* likely that its _pwr_up pin has been wired to logic
* 1 & it powered back up as soon as we powered it
* down . . .
*
* The best we can do is warn the user & continue in
* the hope that the core is doing nothing harmful &
* might behave properly if we online it later .
*/
if ( WARN ( ktime_after ( ktime_get ( ) , fail_time ) ,
" CPU%u hasn't powered down, seq. state %u \n " ,
2017-08-13 05:49:29 +03:00
cpu , stat ) )
2017-06-03 00:48:54 +03:00
break ;
} while ( 1 ) ;
2014-04-14 17:13:57 +04:00
/* Indicate the core is powered off */
bitmap_clear ( core_power , core , 1 ) ;
} else if ( cpu_has_mipsmt ) {
/*
* Have a CPU with access to the offlined CPUs registers wait
* for its TC to halt .
*/
err = smp_call_function_single ( cpu_death_sibling ,
wait_for_sibling_halt ,
2015-07-01 11:13:28 +03:00
( void * ) ( unsigned long ) cpu , 1 ) ;
2014-04-14 17:13:57 +04:00
if ( err )
panic ( " Failed to call remote sibling CPU \n " ) ;
2016-07-07 10:50:39 +03:00
} else if ( cpu_has_vp ) {
do {
2017-08-13 05:49:39 +03:00
mips_cm_lock_other ( 0 , core , vpe_id , CM_GCR_Cx_OTHER_BLOCK_LOCAL ) ;
2016-07-07 10:50:39 +03:00
stat = read_cpc_co_vp_running ( ) ;
mips_cm_unlock_other ( ) ;
} while ( stat & ( 1 < < vpe_id ) ) ;
2014-04-14 17:13:57 +04:00
}
}
# endif /* CONFIG_HOTPLUG_CPU */
2017-07-19 11:21:03 +03:00
static const struct plat_smp_ops cps_smp_ops = {
2014-01-15 14:31:53 +04:00
. smp_setup = cps_smp_setup ,
. prepare_cpus = cps_prepare_cpus ,
. boot_secondary = cps_boot_secondary ,
. init_secondary = cps_init_secondary ,
. smp_finish = cps_smp_finish ,
2015-12-08 16:20:28 +03:00
. send_ipi_single = mips_smp_send_ipi_single ,
. send_ipi_mask = mips_smp_send_ipi_mask ,
2014-04-14 17:13:57 +04:00
# ifdef CONFIG_HOTPLUG_CPU
. cpu_disable = cps_cpu_disable ,
. cpu_die = cps_cpu_die ,
# endif
2018-09-12 00:49:22 +03:00
# ifdef CONFIG_KEXEC
. kexec_nonboot_cpu = cps_kexec_nonboot_cpu ,
# endif
2014-01-15 14:31:53 +04:00
} ;
2014-03-14 20:06:16 +04:00
bool mips_cps_smp_in_use ( void )
{
2017-07-19 11:21:03 +03:00
extern const struct plat_smp_ops * mp_ops ;
2014-03-14 20:06:16 +04:00
return mp_ops = = & cps_smp_ops ;
}
2014-01-15 14:31:53 +04:00
int register_cps_smp_ops ( void )
{
if ( ! mips_cm_present ( ) ) {
pr_warn ( " MIPS CPS SMP unable to proceed without a CM \n " ) ;
return - ENODEV ;
}
/* check we have a GIC - we need one for IPIs */
2017-08-13 05:49:27 +03:00
if ( ! ( read_gcr_gic_status ( ) & CM_GCR_GIC_STATUS_EX ) ) {
2014-01-15 14:31:53 +04:00
pr_warn ( " MIPS CPS SMP unable to proceed without a GIC \n " ) ;
return - ENODEV ;
}
register_smp_ops ( & cps_smp_ops ) ;
return 0 ;
}