2005-04-16 15:20:36 -07:00
/*
* SMP Support
*
* Copyright ( C ) 1999 Walt Drummond < drummond @ valinux . com >
* Copyright ( C ) 1999 , 2001 , 2003 David Mosberger - Tang < davidm @ hpl . hp . com >
*
* Lots of stuff stolen from arch / alpha / kernel / smp . c
*
* 01 / 05 / 16 Rohit Seth < rohit . seth @ intel . com > IA64 - SMP functions . Reorganized
* the existing code ( on the lines of x86 port ) .
* 00 / 09 / 11 David Mosberger < davidm @ hpl . hp . com > Do loops_per_jiffy
* calibration on each CPU .
* 00 / 08 / 23 Asit Mallick < asit . k . mallick @ intel . com > fixed logical processor id
* 00 / 03 / 31 Rohit Seth < rohit . seth @ intel . com > Fixes for Bootstrap Processor
* & cpu_online_map now gets done here ( instead of setup . c )
* 99 / 10 / 05 davidm Update to bring it in sync with new command - line processing
* scheme .
* 10 / 13 / 00 Goutham Rao < goutham . rao @ intel . com > Updated smp_call_function and
* smp_call_function_single to resend IPI on timeouts
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/sched.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/smp.h>
# include <linux/kernel_stat.h>
# include <linux/mm.h>
# include <linux/cache.h>
# include <linux/delay.h>
# include <linux/efi.h>
# include <linux/bitops.h>
2006-12-07 09:51:35 -08:00
# include <linux/kexec.h>
2005-04-16 15:20:36 -07:00
# include <asm/atomic.h>
# include <asm/current.h>
# include <asm/delay.h>
# include <asm/machvec.h>
# include <asm/io.h>
# include <asm/irq.h>
# include <asm/page.h>
# include <asm/pgalloc.h>
# include <asm/pgtable.h>
# include <asm/processor.h>
# include <asm/ptrace.h>
# include <asm/sal.h>
# include <asm/system.h>
# include <asm/tlbflush.h>
# include <asm/unistd.h>
# include <asm/mca.h>
2007-05-08 14:50:43 -07:00
/*
* Note : alignment of 4 entries / cacheline was empirically determined
* to be a good tradeoff between hot cachelines & spreading the array
* across too many cacheline .
*/
static struct local_tlb_flush_counts {
unsigned int count ;
} __attribute__ ( ( __aligned__ ( 32 ) ) ) local_tlb_flush_counts [ NR_CPUS ] ;
2008-08-15 09:21:24 -05:00
static DEFINE_PER_CPU ( unsigned short , shadow_flush_counts [ NR_CPUS ] ) ____cacheline_aligned ;
2007-05-08 14:50:43 -07:00
2005-04-16 15:20:36 -07:00
# define IPI_CALL_FUNC 0
# define IPI_CPU_STOP 1
2008-06-26 11:22:30 +02:00
# define IPI_CALL_FUNC_SINGLE 2
2006-12-07 09:51:35 -08:00
# define IPI_KDUMP_CPU_STOP 3
2005-04-16 15:20:36 -07:00
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
2007-07-19 01:48:13 -07:00
static DEFINE_PER_CPU_SHARED_ALIGNED ( u64 , ipi_operation ) ;
2005-04-16 15:20:36 -07:00
extern void cpu_halt ( void ) ;
static void
2008-04-30 18:55:48 +09:00
stop_this_cpu ( void )
2005-04-16 15:20:36 -07:00
{
/*
* Remove this CPU :
*/
cpu_clear ( smp_processor_id ( ) , cpu_online_map ) ;
max_xtp ( ) ;
local_irq_disable ( ) ;
cpu_halt ( ) ;
}
void
cpu_die ( void )
{
max_xtp ( ) ;
local_irq_disable ( ) ;
cpu_halt ( ) ;
/* Should never be here */
BUG ( ) ;
for ( ; ; ) ;
}
irqreturn_t
2006-10-18 15:36:49 +10:00
handle_IPI ( int irq , void * dev_id )
2005-04-16 15:20:36 -07:00
{
int this_cpu = get_cpu ( ) ;
unsigned long * pending_ipis = & __ia64_per_cpu_var ( ipi_operation ) ;
unsigned long ops ;
mb ( ) ; /* Order interrupt and bit testing. */
while ( ( ops = xchg ( pending_ipis , 0 ) ) ! = 0 ) {
mb ( ) ; /* Order bit clearing and data access. */
do {
unsigned long which ;
which = ffz ( ~ ops ) ;
ops & = ~ ( 1 < < which ) ;
switch ( which ) {
2008-04-30 18:55:48 +09:00
case IPI_CPU_STOP :
2005-04-16 15:20:36 -07:00
stop_this_cpu ( ) ;
break ;
2008-06-26 11:22:30 +02:00
case IPI_CALL_FUNC :
generic_smp_call_function_interrupt ( ) ;
break ;
case IPI_CALL_FUNC_SINGLE :
generic_smp_call_function_single_interrupt ( ) ;
break ;
2006-12-12 17:49:03 +09:00
# ifdef CONFIG_KEXEC
2008-04-30 18:55:48 +09:00
case IPI_KDUMP_CPU_STOP :
2006-12-07 09:51:35 -08:00
unw_init_running ( kdump_cpu_freeze , NULL ) ;
break ;
# endif
2008-04-30 18:55:48 +09:00
default :
printk ( KERN_CRIT " Unknown IPI on CPU %d: %lu \n " ,
this_cpu , which ) ;
2005-04-16 15:20:36 -07:00
break ;
}
} while ( ops ) ;
mb ( ) ; /* Order data access and bit testing. */
}
put_cpu ( ) ;
return IRQ_HANDLED ;
}
2008-06-26 11:22:30 +02:00
2005-04-16 15:20:36 -07:00
/*
2007-05-11 14:55:43 -07:00
* Called with preemption disabled .
2005-04-16 15:20:36 -07:00
*/
static inline void
send_IPI_single ( int dest_cpu , int op )
{
set_bit ( op , & per_cpu ( ipi_operation , dest_cpu ) ) ;
platform_send_ipi ( dest_cpu , IA64_IPI_VECTOR , IA64_IPI_DM_INT , 0 ) ;
}
/*
2007-05-11 14:55:43 -07:00
* Called with preemption disabled .
2005-04-16 15:20:36 -07:00
*/
static inline void
send_IPI_allbutself ( int op )
{
unsigned int i ;
2005-10-10 08:43:26 -07:00
for_each_online_cpu ( i ) {
if ( i ! = smp_processor_id ( ) )
2005-04-16 15:20:36 -07:00
send_IPI_single ( i , op ) ;
}
}
2008-04-03 11:39:43 -07:00
/*
* Called with preemption disabled .
*/
static inline void
send_IPI_mask ( cpumask_t mask , int op )
{
unsigned int cpu ;
for_each_cpu_mask ( cpu , mask ) {
send_IPI_single ( cpu , op ) ;
}
}
2005-04-16 15:20:36 -07:00
/*
2007-05-11 14:55:43 -07:00
* Called with preemption disabled .
2005-04-16 15:20:36 -07:00
*/
static inline void
send_IPI_all ( int op )
{
int i ;
2005-10-10 08:43:26 -07:00
for_each_online_cpu ( i ) {
send_IPI_single ( i , op ) ;
}
2005-04-16 15:20:36 -07:00
}
/*
2007-05-11 14:55:43 -07:00
* Called with preemption disabled .
2005-04-16 15:20:36 -07:00
*/
static inline void
send_IPI_self ( int op )
{
send_IPI_single ( smp_processor_id ( ) , op ) ;
}
2006-12-12 17:49:03 +09:00
# ifdef CONFIG_KEXEC
2006-12-07 09:51:35 -08:00
void
2007-02-09 16:38:10 +00:00
kdump_smp_send_stop ( void )
2006-12-07 09:51:35 -08:00
{
send_IPI_allbutself ( IPI_KDUMP_CPU_STOP ) ;
}
void
2007-02-09 16:38:10 +00:00
kdump_smp_send_init ( void )
2006-12-07 09:51:35 -08:00
{
unsigned int cpu , self_cpu ;
self_cpu = smp_processor_id ( ) ;
for_each_online_cpu ( cpu ) {
if ( cpu ! = self_cpu ) {
if ( kdump_status [ cpu ] = = 0 )
platform_send_ipi ( cpu , 0 , IA64_IPI_DM_INIT , 0 ) ;
}
}
}
# endif
2005-04-16 15:20:36 -07:00
/*
2007-05-11 14:55:43 -07:00
* Called with preemption disabled .
2005-04-16 15:20:36 -07:00
*/
void
smp_send_reschedule ( int cpu )
{
platform_send_ipi ( cpu , IA64_IPI_RESCHEDULE , IA64_IPI_DM_INT , 0 ) ;
}
2007-05-08 14:50:43 -07:00
/*
2007-05-11 14:55:43 -07:00
* Called with preemption disabled .
2007-05-08 14:50:43 -07:00
*/
static void
smp_send_local_flush_tlb ( int cpu )
{
platform_send_ipi ( cpu , IA64_IPI_LOCAL_TLB_FLUSH , IA64_IPI_DM_INT , 0 ) ;
}
void
smp_local_flush_tlb ( void )
{
/*
* Use atomic ops . Otherwise , the load / increment / store sequence from
* a " ++ " operation can have the line stolen between the load & store .
* The overhead of the atomic op in negligible in this case & offers
* significant benefit for the brief periods where lots of cpus
* are simultaneously flushing TLBs .
*/
ia64_fetchadd ( 1 , & local_tlb_flush_counts [ smp_processor_id ( ) ] . count , acq ) ;
local_flush_tlb_all ( ) ;
}
# define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
void
smp_flush_tlb_cpumask ( cpumask_t xcpumask )
{
2008-08-15 09:21:24 -05:00
unsigned short * counts = __ia64_per_cpu_var ( shadow_flush_counts ) ;
2007-05-08 14:50:43 -07:00
cpumask_t cpumask = xcpumask ;
int mycpu , cpu , flush_mycpu = 0 ;
preempt_disable ( ) ;
mycpu = smp_processor_id ( ) ;
for_each_cpu_mask ( cpu , cpumask )
2008-08-15 09:21:24 -05:00
counts [ cpu ] = local_tlb_flush_counts [ cpu ] . count & 0xffff ;
2007-05-08 14:50:43 -07:00
mb ( ) ;
for_each_cpu_mask ( cpu , cpumask ) {
if ( cpu = = mycpu )
flush_mycpu = 1 ;
else
smp_send_local_flush_tlb ( cpu ) ;
}
if ( flush_mycpu )
smp_local_flush_tlb ( ) ;
for_each_cpu_mask ( cpu , cpumask )
2008-08-15 09:21:24 -05:00
while ( counts [ cpu ] = = ( local_tlb_flush_counts [ cpu ] . count & 0xffff ) )
2007-05-08 14:50:43 -07:00
udelay ( FLUSH_DELAY ) ;
preempt_enable ( ) ;
}
2005-04-16 15:20:36 -07:00
void
smp_flush_tlb_all ( void )
{
2008-05-09 09:39:44 +02:00
on_each_cpu ( ( void ( * ) ( void * ) ) local_flush_tlb_all , NULL , 1 ) ;
2005-04-16 15:20:36 -07:00
}
void
smp_flush_tlb_mm ( struct mm_struct * mm )
{
2005-06-23 21:14:00 -07:00
preempt_disable ( ) ;
2005-04-16 15:20:36 -07:00
/* this happens for the common case of a single-threaded fork(): */
if ( likely ( mm = = current - > active_mm & & atomic_read ( & mm - > mm_users ) = = 1 ) )
{
local_finish_flush_tlb_mm ( mm ) ;
2005-06-23 21:14:00 -07:00
preempt_enable ( ) ;
2005-04-16 15:20:36 -07:00
return ;
}
2005-06-23 21:14:00 -07:00
preempt_enable ( ) ;
2005-04-16 15:20:36 -07:00
/*
* We could optimize this further by using mm - > cpu_vm_mask to track which CPUs
* have been running in the address space . It ' s not clear that this is worth the
* trouble though : to avoid races , we have to raise the IPI on the target CPU
* anyhow , and once a CPU is interrupted , the cost of local_flush_tlb_all ( ) is
* rather trivial .
*/
2008-05-09 09:39:44 +02:00
on_each_cpu ( ( void ( * ) ( void * ) ) local_finish_flush_tlb_mm , mm , 1 ) ;
2005-04-16 15:20:36 -07:00
}
2008-06-26 11:22:30 +02:00
void arch_send_call_function_single_ipi ( int cpu )
2008-04-03 11:39:43 -07:00
{
2008-06-26 11:22:30 +02:00
send_IPI_single ( cpu , IPI_CALL_FUNC_SINGLE ) ;
2008-04-03 11:39:43 -07:00
}
2008-06-26 11:22:30 +02:00
void arch_send_call_function_ipi ( cpumask_t mask )
2005-04-16 15:20:36 -07:00
{
2008-06-26 11:22:30 +02:00
send_IPI_mask ( mask , IPI_CALL_FUNC ) ;
2005-04-16 15:20:36 -07:00
}
/*
* this function calls the ' stop ' function on all other CPUs in the system .
*/
void
smp_send_stop ( void )
{
send_IPI_allbutself ( IPI_CPU_STOP ) ;
}
2007-07-20 16:14:28 -07:00
int
2005-04-16 15:20:36 -07:00
setup_profiling_timer ( unsigned int multiplier )
{
return - EINVAL ;
}