2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-01-19 13:24:56 +03:00
/*
* linux / arch / arm / plat - versatile / platsmp . c
*
* Copyright ( C ) 2002 ARM Ltd .
* All Rights Reserved
*
2018-12-13 15:54:26 +03:00
* This code is specific to the hardware found on ARM Realview and
* Versatile Express platforms where the CPUs are unable to be individually
* woken , and where there is no way to hot - unplug CPUs . Real platforms
* should not copy this code .
2011-01-19 13:24:56 +03:00
*/
# include <linux/init.h>
# include <linux/errno.h>
# include <linux/delay.h>
# include <linux/device.h>
# include <linux/jiffies.h>
# include <linux/smp.h>
# include <asm/cacheflush.h>
2012-01-20 15:01:12 +04:00
# include <asm/smp_plat.h>
2011-01-19 13:24:56 +03:00
2016-06-08 20:16:10 +03:00
# include <plat/platsmp.h>
2011-01-19 13:24:56 +03:00
/*
2018-12-13 15:54:26 +03:00
* versatile_cpu_release controls the release of CPUs from the holding
* pen in headsmp . S , which exists because we are not always able to
* control the release of individual CPUs from the board firmware .
* Production platforms do not need this .
*/
volatile int versatile_cpu_release = - 1 ;
/*
* Write versatile_cpu_release in a way that is guaranteed to be visible to
* all observers , irrespective of whether they ' re taking part in coherency
2011-01-19 13:24:56 +03:00
* or not . This is necessary for the hotplug code to work reliably .
*/
2018-12-13 15:54:26 +03:00
static void versatile_write_cpu_release ( int val )
2011-01-19 13:24:56 +03:00
{
2018-12-13 15:54:26 +03:00
versatile_cpu_release = val ;
2011-01-19 13:24:56 +03:00
smp_wmb ( ) ;
2018-12-13 15:54:26 +03:00
sync_cache_w ( & versatile_cpu_release ) ;
2011-01-19 13:24:56 +03:00
}
2018-12-13 15:54:26 +03:00
/*
* versatile_lock exists to avoid running the loops_per_jiffy delay loop
* calibrations on the secondary CPU while the requesting CPU is using
* the limited - bandwidth bus - which affects the calibration value .
* Production platforms do not need this .
*/
static DEFINE_RAW_SPINLOCK ( versatile_lock ) ;
2011-01-19 13:24:56 +03:00
2013-06-17 23:43:14 +04:00
void versatile_secondary_init ( unsigned int cpu )
2011-01-19 13:24:56 +03:00
{
/*
* let the primary processor know we ' re out of the
* pen , then head off into the C entry point
*/
2018-12-13 15:54:26 +03:00
versatile_write_cpu_release ( - 1 ) ;
2011-01-19 13:24:56 +03:00
/*
* Synchronise with the boot thread .
*/
2018-12-13 15:54:26 +03:00
raw_spin_lock ( & versatile_lock ) ;
raw_spin_unlock ( & versatile_lock ) ;
2011-01-19 13:24:56 +03:00
}
2013-06-17 23:43:14 +04:00
int versatile_boot_secondary ( unsigned int cpu , struct task_struct * idle )
2011-01-19 13:24:56 +03:00
{
unsigned long timeout ;
/*
* Set synchronisation state between this boot processor
* and the secondary one
*/
2018-12-13 15:54:26 +03:00
raw_spin_lock ( & versatile_lock ) ;
2011-01-19 13:24:56 +03:00
/*
* This is really belt and braces ; we hold unintended secondary
* CPUs in the holding pen until we ' re ready for them . However ,
* since we haven ' t sent them a soft interrupt , they shouldn ' t
* be there .
*/
2018-12-13 15:54:26 +03:00
versatile_write_cpu_release ( cpu_logical_map ( cpu ) ) ;
2011-01-19 13:24:56 +03:00
/*
* Send the secondary CPU a soft interrupt , thereby causing
* the boot monitor to read the system wide flags register ,
* and branch to the address found there .
*/
2012-11-27 01:05:48 +04:00
arch_send_wakeup_ipi_mask ( cpumask_of ( cpu ) ) ;
2011-01-19 13:24:56 +03:00
timeout = jiffies + ( 1 * HZ ) ;
while ( time_before ( jiffies , timeout ) ) {
smp_rmb ( ) ;
2018-12-13 15:54:26 +03:00
if ( versatile_cpu_release = = - 1 )
2011-01-19 13:24:56 +03:00
break ;
udelay ( 10 ) ;
}
/*
* now the secondary core is starting up let it run its
* calibrations , then wait for it to finish
*/
2018-12-13 15:54:26 +03:00
raw_spin_unlock ( & versatile_lock ) ;
2011-01-19 13:24:56 +03:00
2018-12-13 15:54:26 +03:00
return versatile_cpu_release ! = - 1 ? - ENOSYS : 0 ;
2011-01-19 13:24:56 +03:00
}