2013-01-18 15:12:23 +05:30
/*
* Copyright ( C ) 2004 , 2007 - 2010 , 2011 - 2012 Synopsys , Inc . ( www . synopsys . com )
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* RajeshwarR : Dec 11 , 2007
* - - Added support for Inter Processor Interrupts
*
* Vineetg : Nov 1 st , 2007
* - - Initial Write ( Borrowed heavily from ARM )
*/
# include <linux/spinlock.h>
2017-02-01 19:08:20 +01:00
# include <linux/sched/mm.h>
2013-01-18 15:12:23 +05:30
# include <linux/interrupt.h>
# include <linux/profile.h>
# include <linux/mm.h>
# include <linux/cpu.h>
# include <linux/irq.h>
# include <linux/atomic.h>
# include <linux/cpumask.h>
# include <linux/reboot.h>
2016-11-08 10:08:31 +03:00
# include <linux/irqdomain.h>
2017-10-11 17:07:41 -07:00
# include <linux/export.h>
2018-02-23 19:41:54 +03:00
# include <linux/of_fdt.h>
2017-10-11 17:07:41 -07:00
2013-01-18 15:12:23 +05:30
# include <asm/processor.h>
# include <asm/setup.h>
2013-01-18 15:12:26 +05:30
# include <asm/mach_desc.h>
2013-01-18 15:12:23 +05:30
2014-11-07 18:51:22 +05:30
# ifndef CONFIG_ARC_HAS_LLSC
2013-01-18 15:12:23 +05:30
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED ;
arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED ;
2017-10-11 17:07:41 -07:00
EXPORT_SYMBOL_GPL ( smp_atomic_ops_lock ) ;
EXPORT_SYMBOL_GPL ( smp_bitops_lock ) ;
2014-11-07 18:51:22 +05:30
# endif
2013-01-18 15:12:23 +05:30
2015-06-04 17:37:23 +05:30
struct plat_smp_ops __weak plat_smp_ops ;
2013-01-18 15:12:26 +05:30
2013-01-18 15:12:23 +05:30
/* XXX: per cpu ? Only needed once in early seconday boot */
struct task_struct * secondary_idle_tsk ;
/* Called from start_kernel */
void __init smp_prepare_boot_cpu ( void )
{
}
2018-02-23 19:41:54 +03:00
static int __init arc_get_cpu_map ( const char * name , struct cpumask * cpumask )
{
unsigned long dt_root = of_get_flat_dt_root ( ) ;
const char * buf ;
buf = of_get_flat_dt_prop ( dt_root , name , NULL ) ;
if ( ! buf )
return - EINVAL ;
if ( cpulist_parse ( buf , cpumask ) )
return - EINVAL ;
return 0 ;
}
/*
* Read from DeviceTree and setup cpu possible mask . If there is no
* " possible-cpus " property in DeviceTree pretend all [ 0. . NR_CPUS - 1 ] exist .
*/
static void __init arc_init_cpu_possible ( void )
{
struct cpumask cpumask ;
if ( arc_get_cpu_map ( " possible-cpus " , & cpumask ) ) {
pr_warn ( " Failed to get possible-cpus from dtb, pretending all %u cpus exist \n " ,
NR_CPUS ) ;
cpumask_setall ( & cpumask ) ;
}
if ( ! cpumask_test_cpu ( 0 , & cpumask ) )
panic ( " Master cpu (cpu[0]) is missed in cpu possible mask! " ) ;
init_cpu_possible ( & cpumask ) ;
}
2013-01-18 15:12:23 +05:30
/*
2015-10-12 16:28:55 +05:30
* Called from setup_arch ( ) before calling setup_processor ( )
*
* - Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system .
* - Call early smp init hook . This can initialize a specific multi - core
* IP which is say common to several platforms ( hence not part of
* platform specific int_early ( ) hook )
2013-01-18 15:12:23 +05:30
*/
void __init smp_init_cpus ( void )
{
2018-02-23 19:41:54 +03:00
arc_init_cpu_possible ( ) ;
2015-10-12 16:28:55 +05:30
if ( plat_smp_ops . init_early_smp )
plat_smp_ops . init_early_smp ( ) ;
2013-01-18 15:12:23 +05:30
}
/* called from init ( ) => process 1 */
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
/*
2016-10-30 09:48:42 +02:00
* if platform didn ' t set the present map already , do it now
* boot cpu is set to present already by init / main . c
2013-01-18 15:12:23 +05:30
*/
2018-02-23 19:41:54 +03:00
if ( num_present_cpus ( ) < = 1 )
init_cpu_present ( cpu_possible_mask ) ;
2013-01-18 15:12:23 +05:30
}
void __init smp_cpus_done ( unsigned int max_cpus )
{
}
/*
2015-10-09 12:16:02 +05:30
* Default smp boot helper for Run - on - reset case where all cores start off
* together . Non - masters need to wait for Master to start running .
* This is implemented using a flag in memory , which Non - masters spin - wait on .
* Master sets it to cpu - id of core to " ungate " it .
2013-01-18 15:12:23 +05:30
*/
2015-10-09 12:16:02 +05:30
static volatile int wake_flag ;
2016-06-21 14:24:33 +05:30
# ifdef CONFIG_ISA_ARCOMPACT
# define __boot_read(f) f
# define __boot_write(f, v) f = v
# else
# define __boot_read(f) arc_read_uncached_32(&f)
# define __boot_write(f, v) arc_write_uncached_32(&f, v)
# endif
2015-10-09 12:16:02 +05:30
static void arc_default_smp_cpu_kick ( int cpu , unsigned long pc )
2013-01-18 15:12:23 +05:30
{
2015-10-09 12:16:02 +05:30
BUG_ON ( cpu = = 0 ) ;
2016-06-21 14:24:33 +05:30
__boot_write ( wake_flag , cpu ) ;
2015-10-09 12:16:02 +05:30
}
void arc_platform_smp_wait_to_boot ( int cpu )
{
2017-01-12 14:30:29 -08:00
/* for halt-on-reset, we've waited already */
if ( IS_ENABLED ( CONFIG_ARC_SMP_HALT_ON_RESET ) )
return ;
2016-06-21 14:24:33 +05:30
while ( __boot_read ( wake_flag ) ! = cpu )
2015-10-09 12:16:02 +05:30
;
2016-06-21 14:24:33 +05:30
__boot_write ( wake_flag , 0 ) ;
2013-01-18 15:12:23 +05:30
}
2013-01-18 15:12:26 +05:30
const char * arc_platform_smp_cpuinfo ( void )
{
2014-09-04 10:57:33 +05:30
return plat_smp_ops . info ? : " " ;
2013-01-18 15:12:26 +05:30
}
2013-01-18 15:12:23 +05:30
/*
* The very first " C " code executed by secondary
* Called from asm stub in head . S
* " current " / R25 already setup by low level boot code
*/
2013-06-24 15:30:15 -04:00
void start_kernel_secondary ( void )
2013-01-18 15:12:23 +05:30
{
struct mm_struct * mm = & init_mm ;
unsigned int cpu = smp_processor_id ( ) ;
/* MMU, Caches, Vector Table, Interrupts etc */
setup_processor ( ) ;
2017-02-27 14:30:10 -08:00
mmget ( mm ) ;
2017-02-27 14:30:07 -08:00
mmgrab ( mm ) ;
2013-01-18 15:12:23 +05:30
current - > active_mm = mm ;
2013-10-27 14:49:02 +05:30
cpumask_set_cpu ( cpu , mm_cpumask ( mm ) ) ;
2013-01-18 15:12:23 +05:30
2015-10-14 14:38:02 +05:30
/* Some SMP H/w setup - for each cpu */
2015-12-16 03:10:27 +02:00
if ( plat_smp_ops . init_per_cpu )
plat_smp_ops . init_per_cpu ( cpu ) ;
2015-10-14 14:38:02 +05:30
2015-12-17 12:22:21 +05:30
if ( machine_desc - > init_per_cpu )
machine_desc - > init_per_cpu ( cpu ) ;
2013-01-18 15:12:23 +05:30
2015-11-07 09:16:19 +02:00
notify_cpu_starting ( cpu ) ;
set_cpu_online ( cpu , true ) ;
pr_info ( " ## CPU%u LIVE ##: Executing Code... \n " , cpu ) ;
2013-01-18 15:12:23 +05:30
local_irq_enable ( ) ;
preempt_disable ( ) ;
2016-02-26 18:43:40 +00:00
cpu_startup_entry ( CPUHP_AP_ONLINE_IDLE ) ;
2013-01-18 15:12:23 +05:30
}
/*
* Called from kernel_init ( ) - > smp_init ( ) - for each CPU
*
* At this point , Secondary Processor is " HALT " ed :
* - It booted , but was halted in head . S
* - It was configured to halt - on - reset
* So need to wake it up .
*
* Essential requirements being where to run from ( PC ) and stack ( SP )
*/
2013-06-24 15:30:15 -04:00
int __cpu_up ( unsigned int cpu , struct task_struct * idle )
2013-01-18 15:12:23 +05:30
{
unsigned long wait_till ;
secondary_idle_tsk = idle ;
pr_info ( " Idle Task [%d] %p " , cpu , idle ) ;
pr_info ( " Trying to bring up CPU%u ... \n " , cpu ) ;
2013-01-18 15:12:26 +05:30
if ( plat_smp_ops . cpu_kick )
plat_smp_ops . cpu_kick ( cpu ,
2013-01-18 15:12:23 +05:30
( unsigned long ) first_lines_of_secondary ) ;
2015-10-09 12:16:02 +05:30
else
arc_default_smp_cpu_kick ( cpu , ( unsigned long ) NULL ) ;
2013-01-18 15:12:23 +05:30
/* wait for 1 sec after kicking the secondary */
wait_till = jiffies + HZ ;
while ( time_before ( jiffies , wait_till ) ) {
if ( cpu_online ( cpu ) )
break ;
}
if ( ! cpu_online ( cpu ) ) {
pr_info ( " Timeout: CPU%u FAILED to comeup !!! \n " , cpu ) ;
return - 1 ;
}
secondary_idle_tsk = NULL ;
return 0 ;
}
/*
* not supported here
*/
2015-06-04 00:54:12 +05:30
int setup_profiling_timer ( unsigned int multiplier )
2013-01-18 15:12:23 +05:30
{
return - EINVAL ;
}
/*****************************************************************************/
/* Inter Processor Interrupt Handling */
/*****************************************************************************/
enum ipi_msg_type {
2013-11-26 15:23:44 +05:30
IPI_EMPTY = 0 ,
2013-01-18 15:12:23 +05:30
IPI_RESCHEDULE = 1 ,
IPI_CALL_FUNC ,
2013-11-26 15:23:44 +05:30
IPI_CPU_STOP ,
2013-01-18 15:12:23 +05:30
} ;
2013-11-26 15:23:44 +05:30
/*
* In arches with IRQ for each msg type ( above ) , receiver can use IRQ - id to
* figure out what msg was sent . For those which don ' t ( ARC has dedicated IPI
* IRQ ) , the msg - type needs to be conveyed via per - cpu data
*/
2013-01-18 15:12:23 +05:30
2013-11-26 15:23:44 +05:30
static DEFINE_PER_CPU ( unsigned long , ipi_data ) ;
2013-01-18 15:12:23 +05:30
2013-11-25 12:59:03 +05:30
static void ipi_send_msg_one ( int cpu , enum ipi_msg_type msg )
2013-01-18 15:12:23 +05:30
{
2013-11-26 15:23:44 +05:30
unsigned long __percpu * ipi_data_ptr = per_cpu_ptr ( & ipi_data , cpu ) ;
2013-11-28 13:57:54 +05:30
unsigned long old , new ;
2013-01-18 15:12:23 +05:30
unsigned long flags ;
2013-11-26 15:23:44 +05:30
pr_debug ( " %d Sending msg [%d] to %d \n " , smp_processor_id ( ) , msg , cpu ) ;
2013-01-18 15:12:23 +05:30
local_irq_save ( flags ) ;
2013-11-28 13:57:54 +05:30
/*
* Atomically write new msg bit ( in case others are writing too ) ,
* and read back old value
*/
do {
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-23 14:07:29 -07:00
new = old = READ_ONCE ( * ipi_data_ptr ) ;
2013-11-28 13:57:54 +05:30
new | = 1U < < msg ;
} while ( cmpxchg ( ipi_data_ptr , old , new ) ! = old ) ;
2013-01-18 15:12:23 +05:30
2013-11-28 13:57:54 +05:30
/*
* Call the platform specific IPI kick function , but avoid if possible :
* Only do so if there ' s no pending msg from other concurrent sender ( s ) .
* Otherwise , recevier will see this msg as well when it takes the
* IPI corresponding to that msg . This is true , even if it is already in
* IPI handler , because ! @ old means it has not yet dequeued the msg ( s )
* so @ new msg can be a free - loader
*/
if ( plat_smp_ops . ipi_send & & ! old )
2013-11-25 12:59:03 +05:30
plat_smp_ops . ipi_send ( cpu ) ;
2013-01-18 15:12:23 +05:30
local_irq_restore ( flags ) ;
}
2013-11-25 12:59:03 +05:30
static void ipi_send_msg ( const struct cpumask * callmap , enum ipi_msg_type msg )
{
unsigned int cpu ;
for_each_cpu ( cpu , callmap )
ipi_send_msg_one ( cpu , msg ) ;
}
2013-01-18 15:12:23 +05:30
void smp_send_reschedule ( int cpu )
{
2013-11-25 12:59:03 +05:30
ipi_send_msg_one ( cpu , IPI_RESCHEDULE ) ;
2013-01-18 15:12:23 +05:30
}
void smp_send_stop ( void )
{
struct cpumask targets ;
cpumask_copy ( & targets , cpu_online_mask ) ;
cpumask_clear_cpu ( smp_processor_id ( ) , & targets ) ;
ipi_send_msg ( & targets , IPI_CPU_STOP ) ;
}
void arch_send_call_function_single_ipi ( int cpu )
{
2013-11-25 12:59:03 +05:30
ipi_send_msg_one ( cpu , IPI_CALL_FUNC ) ;
2013-01-18 15:12:23 +05:30
}
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
{
ipi_send_msg ( mask , IPI_CALL_FUNC ) ;
}
/*
* ipi_cpu_stop - handle IPI from smp_send_stop ( )
*/
2013-11-25 14:48:39 +05:30
static void ipi_cpu_stop ( void )
2013-01-18 15:12:23 +05:30
{
machine_halt ( ) ;
}
2014-11-07 10:45:28 +05:30
static inline int __do_IPI ( unsigned long msg )
2013-01-18 15:12:23 +05:30
{
2014-11-07 10:45:28 +05:30
int rc = 0 ;
2013-11-28 13:57:54 +05:30
switch ( msg ) {
case IPI_RESCHEDULE :
scheduler_ipi ( ) ;
break ;
2013-01-18 15:12:23 +05:30
2013-11-28 13:57:54 +05:30
case IPI_CALL_FUNC :
generic_smp_call_function_interrupt ( ) ;
break ;
2013-11-26 15:23:44 +05:30
2013-11-28 13:57:54 +05:30
case IPI_CPU_STOP :
ipi_cpu_stop ( ) ;
break ;
2013-11-26 15:23:44 +05:30
2013-11-28 13:57:54 +05:30
default :
2014-11-07 10:45:28 +05:30
rc = 1 ;
2013-11-26 15:23:44 +05:30
}
2014-11-07 10:45:28 +05:30
return rc ;
2013-01-18 15:12:23 +05:30
}
/*
* arch - common ISR to handle for inter - processor interrupts
* Has hooks for platform specific IPI
*/
irqreturn_t do_IPI ( int irq , void * dev_id )
{
2013-11-26 15:23:44 +05:30
unsigned long pending ;
2014-11-07 10:45:28 +05:30
unsigned long __maybe_unused copy ;
2013-11-26 15:23:44 +05:30
pr_debug ( " IPI [%ld] received on cpu %d \n " ,
* this_cpu_ptr ( & ipi_data ) , smp_processor_id ( ) ) ;
2013-01-18 15:12:23 +05:30
2013-01-18 15:12:26 +05:30
if ( plat_smp_ops . ipi_clear )
2013-11-25 14:37:14 +05:30
plat_smp_ops . ipi_clear ( irq ) ;
2013-01-18 15:12:23 +05:30
/*
2013-11-28 13:57:54 +05:30
* " dequeue " the msg corresponding to this IPI ( and possibly other
* piggybacked msg from elided IPIs : see ipi_send_msg_one ( ) above )
2013-01-18 15:12:23 +05:30
*/
2014-11-07 10:45:28 +05:30
copy = pending = xchg ( this_cpu_ptr ( & ipi_data ) , 0 ) ;
2013-11-28 13:57:54 +05:30
do {
unsigned long msg = __ffs ( pending ) ;
2014-11-07 10:45:28 +05:30
int rc ;
rc = __do_IPI ( msg ) ;
if ( rc )
pr_info ( " IPI with bogus msg %ld in %ld \n " , msg , copy ) ;
2013-11-28 13:57:54 +05:30
pending & = ~ ( 1U < < msg ) ;
} while ( pending ) ;
2013-01-18 15:12:23 +05:30
return IRQ_HANDLED ;
}
/*
* API called by platform code to hookup arch - common ISR to their IPI IRQ
2016-01-28 12:56:03 +05:30
*
* Note : If IPI is provided by platform ( vs . say ARC MCIP ) , their intc setup / map
* function needs to call call irq_set_percpu_devid ( ) for IPI IRQ , otherwise
* request_percpu_irq ( ) below will fail
2013-01-18 15:12:23 +05:30
*/
static DEFINE_PER_CPU ( int , ipi_dev ) ;
2013-02-28 11:07:06 +02:00
2016-11-08 10:08:31 +03:00
int smp_ipi_irq_setup ( int cpu , irq_hw_number_t hwirq )
2013-01-18 15:12:23 +05:30
{
2014-05-07 15:25:10 +05:30
int * dev = per_cpu_ptr ( & ipi_dev , cpu ) ;
2016-11-08 10:08:31 +03:00
unsigned int virq = irq_find_mapping ( NULL , hwirq ) ;
if ( ! virq )
panic ( " Cannot find virq for root domain and hwirq=%lu " , hwirq ) ;
2014-05-07 15:25:10 +05:30
2016-01-28 12:56:03 +05:30
/* Boot cpu calls request, all call enable */
if ( ! cpu ) {
int rc ;
2016-11-08 10:08:31 +03:00
rc = request_percpu_irq ( virq , do_IPI , " IPI Interrupt " , dev ) ;
2016-01-28 12:56:03 +05:30
if ( rc )
2016-11-08 10:08:31 +03:00
panic ( " Percpu IRQ request failed for %u \n " , virq ) ;
2016-01-28 12:56:03 +05:30
}
2016-11-08 10:08:31 +03:00
enable_percpu_irq ( virq , 0 ) ;
2013-02-28 11:07:06 +02:00
return 0 ;
2013-01-18 15:12:23 +05:30
}