2005-04-17 02:20:36 +04:00
/*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version 2
* of the License , or ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*
* Copyright ( C ) 2000 , 2001 Kanoj Sarcar
* Copyright ( C ) 2000 , 2001 Ralf Baechle
* Copyright ( C ) 2000 , 2001 Silicon Graphics , Inc .
* Copyright ( C ) 2000 , 2001 , 2003 Broadcom Corporation
*/
# include <linux/cache.h>
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/interrupt.h>
2009-06-19 17:05:26 +04:00
# include <linux/smp.h>
2005-04-17 02:20:36 +04:00
# include <linux/spinlock.h>
# include <linux/threads.h>
# include <linux/module.h>
# include <linux/time.h>
# include <linux/timex.h>
# include <linux/sched.h>
# include <linux/cpumask.h>
2006-02-20 16:35:27 +03:00
# include <linux/cpu.h>
2007-07-30 02:36:13 +04:00
# include <linux/err.h>
2009-11-20 15:34:33 +03:00
# include <linux/ftrace.h>
2005-04-17 02:20:36 +04:00
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2005-04-17 02:20:36 +04:00
# include <asm/cpu.h>
# include <asm/processor.h>
2013-05-21 18:59:19 +04:00
# include <asm/idle.h>
2008-04-28 20:14:26 +04:00
# include <asm/r4k-timer.h>
2005-04-17 02:20:36 +04:00
# include <asm/mmu_context.h>
2007-10-12 02:46:09 +04:00
# include <asm/time.h>
2012-03-28 21:30:02 +04:00
# include <asm/setup.h>
2005-04-17 02:20:36 +04:00
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
# include <asm/mipsmtregs.h>
# endif /* CONFIG_MIPS_MT_SMTC */
2009-06-23 13:00:31 +04:00
volatile cpumask_t cpu_callin_map ; /* Bitmask of started secondaries */
2010-07-24 05:41:45 +04:00
2005-04-17 02:20:36 +04:00
int __cpu_number_map [ NR_CPUS ] ; /* Map physical to logical */
2010-07-24 05:41:45 +04:00
EXPORT_SYMBOL ( __cpu_number_map ) ;
2005-04-17 02:20:36 +04:00
int __cpu_logical_map [ NR_CPUS ] ; /* Map logical to physical */
2010-07-24 05:41:45 +04:00
EXPORT_SYMBOL ( __cpu_logical_map ) ;
2005-04-17 02:20:36 +04:00
2007-03-02 23:42:04 +03:00
/* Number of TCs (or siblings in Intel speak) per CPU core */
int smp_num_siblings = 1 ;
EXPORT_SYMBOL ( smp_num_siblings ) ;
/* representing the TCs (or siblings in Intel speak) of each logical CPU */
cpumask_t cpu_sibling_map [ NR_CPUS ] __read_mostly ;
EXPORT_SYMBOL ( cpu_sibling_map ) ;
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map ;
static inline void set_cpu_sibling_map ( int cpu )
{
int i ;
cpu_set ( cpu , cpu_sibling_setup_map ) ;
if ( smp_num_siblings > 1 ) {
for_each_cpu_mask ( i , cpu_sibling_setup_map ) {
if ( cpu_data [ cpu ] . core = = cpu_data [ i ] . core ) {
cpu_set ( i , cpu_sibling_map [ cpu ] ) ;
cpu_set ( cpu , cpu_sibling_map [ i ] ) ;
}
}
} else
cpu_set ( cpu , cpu_sibling_map [ cpu ] ) ;
}
2007-11-19 15:23:51 +03:00
struct plat_smp_ops * mp_ops ;
2012-11-22 06:34:14 +04:00
EXPORT_SYMBOL ( mp_ops ) ;
2007-11-19 15:23:51 +03:00
__cpuinit void register_smp_ops ( struct plat_smp_ops * ops )
{
2008-05-06 14:21:22 +04:00
if ( mp_ops )
printk ( KERN_WARNING " Overriding previously set SMP ops \n " ) ;
2007-11-19 15:23:51 +03:00
mp_ops = ops ;
}
2005-04-17 02:20:36 +04:00
/*
* First C code run on the secondary CPUs after being started up by
* the master .
*/
2007-05-31 19:15:01 +04:00
asmlinkage __cpuinit void start_secondary ( void )
2005-04-17 02:20:36 +04:00
{
2005-11-09 08:39:01 +03:00
unsigned int cpu ;
2005-04-17 02:20:36 +04:00
2006-04-05 12:45:45 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
/* Only do cpu_probe for first TC of CPU */
2012-04-09 19:58:39 +04:00
if ( ( read_c0_tcbind ( ) & TCBIND_CURTC ) ! = 0 )
__cpu_name [ smp_processor_id ( ) ] = __cpu_name [ 0 ] ;
else
2006-04-05 12:45:45 +04:00
# endif /* CONFIG_MIPS_MT_SMTC */
2005-04-17 02:20:36 +04:00
cpu_probe ( ) ;
cpu_report ( ) ;
2012-05-15 11:04:50 +04:00
per_cpu_trap_init ( false ) ;
2007-10-12 02:46:09 +04:00
mips_clockevent_init ( ) ;
2007-11-19 15:23:51 +03:00
mp_ops - > init_secondary ( ) ;
2005-04-17 02:20:36 +04:00
/*
* XXX parity protection should be folded in here when it ' s converted
* to an option instead of something based on . cputype
*/
calibrate_delay ( ) ;
2005-11-09 08:39:01 +03:00
preempt_disable ( ) ;
cpu = smp_processor_id ( ) ;
2005-04-17 02:20:36 +04:00
cpu_data [ cpu ] . udelay_val = loops_per_jiffy ;
2008-09-07 18:57:22 +04:00
notify_cpu_starting ( cpu ) ;
2012-07-19 11:13:53 +04:00
set_cpu_online ( cpu , true ) ;
2007-03-02 23:42:04 +03:00
set_cpu_sibling_map ( cpu ) ;
2005-04-17 02:20:36 +04:00
cpu_set ( cpu , cpu_callin_map ) ;
2012-08-14 17:26:13 +04:00
synchronise_count_slave ( cpu ) ;
2008-04-28 20:14:26 +04:00
2012-07-19 11:13:53 +04:00
/*
* irq will be enabled in - > smp_finish ( ) , enabling it too early
* is dangerous .
*/
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
2012-07-19 11:13:53 +04:00
mp_ops - > smp_finish ( ) ;
2013-03-22 01:49:52 +04:00
cpu_startup_entry ( CPUHP_ONLINE ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-17 12:45:23 +04:00
/*
* Call into both interrupt handlers , as we share the IPI for them
*/
2009-11-20 15:34:33 +03:00
void __irq_entry smp_call_function_interrupt ( void )
2005-04-17 02:20:36 +04:00
{
irq_enter ( ) ;
2008-06-17 12:45:23 +04:00
generic_smp_call_function_single_interrupt ( ) ;
generic_smp_call_function_interrupt ( ) ;
2005-04-17 02:20:36 +04:00
irq_exit ( ) ;
2007-07-31 02:01:29 +04:00
}
2005-04-17 02:20:36 +04:00
static void stop_this_cpu ( void * dummy )
{
/*
* Remove this CPU :
*/
2012-03-29 09:08:30 +04:00
set_cpu_online ( smp_processor_id ( ) , false ) ;
2008-10-18 16:23:10 +04:00
for ( ; ; ) {
if ( cpu_wait )
( * cpu_wait ) ( ) ; /* Wait if available. */
}
2005-04-17 02:20:36 +04:00
}
void smp_send_stop ( void )
{
2008-06-06 13:18:06 +04:00
smp_call_function ( stop_this_cpu , NULL , 0 ) ;
2005-04-17 02:20:36 +04:00
}
void __init smp_cpus_done ( unsigned int max_cpus )
{
2007-11-19 15:23:51 +03:00
mp_ops - > cpus_done ( ) ;
2005-04-17 02:20:36 +04:00
}
/* called from main before smp_init() */
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
init_new_context ( current , & init_mm ) ;
current_thread_info ( ) - > cpu = 0 ;
2007-11-19 15:23:51 +03:00
mp_ops - > prepare_cpus ( max_cpus ) ;
2007-03-02 23:42:04 +03:00
set_cpu_sibling_map ( 0 ) ;
2006-05-22 17:24:04 +04:00
# ifndef CONFIG_HOTPLUG_CPU
2012-03-29 09:08:30 +04:00
init_cpu_present ( cpu_possible_mask ) ;
2006-05-22 17:24:04 +04:00
# endif
2005-04-17 02:20:36 +04:00
}
/* preload SMP state for boot cpu */
2012-12-22 02:04:39 +04:00
void smp_prepare_boot_cpu ( void )
2005-04-17 02:20:36 +04:00
{
2009-09-24 19:34:47 +04:00
set_cpu_possible ( 0 , true ) ;
set_cpu_online ( 0 , true ) ;
2005-04-17 02:20:36 +04:00
cpu_set ( 0 , cpu_callin_map ) ;
}
2012-04-20 17:05:42 +04:00
int __cpuinit __cpu_up ( unsigned int cpu , struct task_struct * tidle )
2005-04-17 02:20:36 +04:00
{
2012-04-20 17:05:51 +04:00
mp_ops - > boot_secondary ( cpu , tidle ) ;
2005-04-17 02:20:36 +04:00
2005-02-23 00:18:01 +03:00
/*
* Trust is futile . We should really have timeouts . . .
*/
2005-04-17 02:20:36 +04:00
while ( ! cpu_isset ( cpu , cpu_callin_map ) )
udelay ( 100 ) ;
2012-08-14 17:26:13 +04:00
synchronise_count_master ( cpu ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
/* Not really SMP stuff ... */
int setup_profiling_timer ( unsigned int multiplier )
{
return 0 ;
}
static void flush_tlb_all_ipi ( void * info )
{
local_flush_tlb_all ( ) ;
}
void flush_tlb_all ( void )
{
2008-05-09 11:39:44 +04:00
on_each_cpu ( flush_tlb_all_ipi , NULL , 1 ) ;
2005-04-17 02:20:36 +04:00
}
static void flush_tlb_mm_ipi ( void * mm )
{
local_flush_tlb_mm ( ( struct mm_struct * ) mm ) ;
}
2006-06-23 01:42:32 +04:00
/*
* Special Variant of smp_call_function for use by TLB functions :
*
* o No return value
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache .
* o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core .
*/
static inline void smp_on_other_tlbs ( void ( * func ) ( void * info ) , void * info )
{
# ifndef CONFIG_MIPS_MT_SMTC
2008-06-06 13:18:06 +04:00
smp_call_function ( func , info , 1 ) ;
2006-06-23 01:42:32 +04:00
# endif
}
static inline void smp_on_each_tlb ( void ( * func ) ( void * info ) , void * info )
{
preempt_disable ( ) ;
smp_on_other_tlbs ( func , info ) ;
func ( info ) ;
preempt_enable ( ) ;
}
2005-04-17 02:20:36 +04:00
/*
* The following tlb flush calls are invoked when old translations are
* being torn down , or pte attributes are changing . For single threaded
* address spaces , a new context is obtained on the current cpu , and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time , should the mm ever be used on other cpus . For
* multithreaded address spaces , intercpu interrupts have to be sent .
* Another case where intercpu interrupts are required is when the target
* mm might be active on another cpu ( eg debuggers doing the flushes on
* behalf of debugees , kswapd stealing pages from another process etc ) .
* Kanoj 07 / 00.
*/
void flush_tlb_mm ( struct mm_struct * mm )
{
preempt_disable ( ) ;
if ( ( atomic_read ( & mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = mm ) ) {
2007-10-04 19:57:08 +04:00
smp_on_other_tlbs ( flush_tlb_mm_ipi , mm ) ;
2005-04-17 02:20:36 +04:00
} else {
2007-10-03 22:16:57 +04:00
unsigned int cpu ;
2012-03-29 09:08:30 +04:00
for_each_online_cpu ( cpu ) {
if ( cpu ! = smp_processor_id ( ) & & cpu_context ( cpu , mm ) )
2007-10-03 22:16:57 +04:00
cpu_context ( cpu , mm ) = 0 ;
2012-03-29 09:08:30 +04:00
}
2005-04-17 02:20:36 +04:00
}
local_flush_tlb_mm ( mm ) ;
preempt_enable ( ) ;
}
struct flush_tlb_data {
struct vm_area_struct * vma ;
unsigned long addr1 ;
unsigned long addr2 ;
} ;
static void flush_tlb_range_ipi ( void * info )
{
2007-10-04 19:57:08 +04:00
struct flush_tlb_data * fd = info ;
2005-04-17 02:20:36 +04:00
local_flush_tlb_range ( fd - > vma , fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_range ( struct vm_area_struct * vma , unsigned long start , unsigned long end )
{
struct mm_struct * mm = vma - > vm_mm ;
preempt_disable ( ) ;
if ( ( atomic_read ( & mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = mm ) ) {
2007-10-04 21:18:52 +04:00
struct flush_tlb_data fd = {
. vma = vma ,
. addr1 = start ,
. addr2 = end ,
} ;
2005-04-17 02:20:36 +04:00
2007-10-04 19:57:08 +04:00
smp_on_other_tlbs ( flush_tlb_range_ipi , & fd ) ;
2005-04-17 02:20:36 +04:00
} else {
2007-10-03 22:16:57 +04:00
unsigned int cpu ;
2012-03-29 09:08:30 +04:00
for_each_online_cpu ( cpu ) {
if ( cpu ! = smp_processor_id ( ) & & cpu_context ( cpu , mm ) )
2007-10-03 22:16:57 +04:00
cpu_context ( cpu , mm ) = 0 ;
2012-03-29 09:08:30 +04:00
}
2005-04-17 02:20:36 +04:00
}
local_flush_tlb_range ( vma , start , end ) ;
preempt_enable ( ) ;
}
static void flush_tlb_kernel_range_ipi ( void * info )
{
2007-10-04 19:57:08 +04:00
struct flush_tlb_data * fd = info ;
2005-04-17 02:20:36 +04:00
local_flush_tlb_kernel_range ( fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_kernel_range ( unsigned long start , unsigned long end )
{
2007-10-04 21:18:52 +04:00
struct flush_tlb_data fd = {
. addr1 = start ,
. addr2 = end ,
} ;
2005-04-17 02:20:36 +04:00
2008-05-09 11:39:44 +04:00
on_each_cpu ( flush_tlb_kernel_range_ipi , & fd , 1 ) ;
2005-04-17 02:20:36 +04:00
}
static void flush_tlb_page_ipi ( void * info )
{
2007-10-04 19:57:08 +04:00
struct flush_tlb_data * fd = info ;
2005-04-17 02:20:36 +04:00
local_flush_tlb_page ( fd - > vma , fd - > addr1 ) ;
}
void flush_tlb_page ( struct vm_area_struct * vma , unsigned long page )
{
preempt_disable ( ) ;
if ( ( atomic_read ( & vma - > vm_mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = vma - > vm_mm ) ) {
2007-10-04 21:18:52 +04:00
struct flush_tlb_data fd = {
. vma = vma ,
. addr1 = page ,
} ;
2005-04-17 02:20:36 +04:00
2007-10-04 19:57:08 +04:00
smp_on_other_tlbs ( flush_tlb_page_ipi , & fd ) ;
2005-04-17 02:20:36 +04:00
} else {
2007-10-03 22:16:57 +04:00
unsigned int cpu ;
2012-03-29 09:08:30 +04:00
for_each_online_cpu ( cpu ) {
if ( cpu ! = smp_processor_id ( ) & & cpu_context ( cpu , vma - > vm_mm ) )
2007-10-03 22:16:57 +04:00
cpu_context ( cpu , vma - > vm_mm ) = 0 ;
2012-03-29 09:08:30 +04:00
}
2005-04-17 02:20:36 +04:00
}
local_flush_tlb_page ( vma , page ) ;
preempt_enable ( ) ;
}
static void flush_tlb_one_ipi ( void * info )
{
unsigned long vaddr = ( unsigned long ) info ;
local_flush_tlb_one ( vaddr ) ;
}
void flush_tlb_one ( unsigned long vaddr )
{
2006-06-23 01:42:32 +04:00
smp_on_each_tlb ( flush_tlb_one_ipi , ( void * ) vaddr ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( flush_tlb_page ) ;
EXPORT_SYMBOL ( flush_tlb_one ) ;
2012-10-11 20:14:58 +04:00
# if defined(CONFIG_KEXEC)
void ( * dump_ipi_function_ptr ) ( void * ) = NULL ;
void dump_send_ipi ( void ( * dump_ipi_callback ) ( void * ) )
{
int i ;
int cpu = smp_processor_id ( ) ;
dump_ipi_function_ptr = dump_ipi_callback ;
smp_mb ( ) ;
for_each_online_cpu ( i )
if ( i ! = cpu )
mp_ops - > send_ipi_single ( i , SMP_DUMP ) ;
}
EXPORT_SYMBOL ( dump_send_ipi ) ;
# endif