2008-03-03 14:12:55 -03:00
/*
* Intel SMP support routines .
*
2009-01-05 14:08:04 +00:00
* ( c ) 1995 Alan Cox , Building # 3 < alan @ lxorguk . ukuu . org . uk >
2008-03-03 14:12:55 -03:00
* ( c ) 1998 - 99 , 2000 Ingo Molnar < mingo @ redhat . com >
* ( c ) 2002 , 2003 Andi Kleen , SuSE Labs .
*
* i386 and x86_64 integration by Glauber Costa < gcosta @ redhat . com >
*
* This code is released under the GNU General Public License version 2 or
* later .
*/
2008-03-03 14:12:52 -03:00
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/delay.h>
# include <linux/spinlock.h>
# include <linux/kernel_stat.h>
# include <linux/mc146818rtc.h>
# include <linux/cache.h>
# include <linux/interrupt.h>
# include <linux/cpu.h>
# include <asm/mtrr.h>
# include <asm/tlbflush.h>
# include <asm/mmu_context.h>
# include <asm/proto.h>
# include <mach_ipi.h>
2008-03-25 13:28:56 -03:00
# include <mach_apic.h>
2008-03-03 14:12:55 -03:00
/*
* Some notes on x86 processor bugs affecting SMP operation :
*
* Pentium , Pentium Pro , II , III ( and all CPUs ) have bugs .
* The Linux implications for SMP are handled as follows :
*
* Pentium III / [ Xeon ]
* None of the E1AP - E3AP errata are visible to the user .
*
* E1AP . see PII A1AP
* E2AP . see PII A2AP
* E3AP . see PII A3AP
*
* Pentium II / [ Xeon ]
* None of the A1AP - A3AP errata are visible to the user .
*
* A1AP . see PPro 1 AP
* A2AP . see PPro 2 AP
* A3AP . see PPro 7 AP
*
* Pentium Pro
* None of 1 AP - 9 AP errata are visible to the normal user ,
* except occasional delivery of ' spurious interrupt ' as trap # 15.
* This is very rare and a non - problem .
*
* 1 AP . Linux maps APIC as non - cacheable
* 2 AP . worked around in hardware
* 3 AP . fixed in C0 and above steppings microcode update .
* Linux does not use excessive STARTUP_IPIs .
* 4 AP . worked around in hardware
* 5 AP . symmetric IO mode ( normal Linux operation ) not affected .
* ' noapic ' mode has vector 0xf filled out properly .
* 6 AP . ' noapic ' mode might be affected - fixed in later steppings
* 7 AP . We do not assume writes to the LVT deassering IRQs
* 8 AP . We do not enable low power mode ( deep sleep ) during MP bootup
* 9 AP . We do not use mixed mode
*
* Pentium
* There is a marginal case where REP MOVS on 100 MHz SMP
* machines with B stepping processors can fail . XXX should provide
* an L1cache = Writethrough or L1cache = off option .
*
* B stepping CPUs may hang . There are hardware work arounds
* for this . We warn about it in case your board doesn ' t have the work
* arounds . Basically that ' s so I can tell anyone with a B stepping
* CPU and SMP problems " tough " .
*
* Specific items [ From Pentium Processor Specification Update ]
*
* 1 AP . Linux doesn ' t use remote read
* 2 AP . Linux doesn ' t trust APIC errors
* 3 AP . We work around this
* 4 AP . Linux never generated 3 interrupts of the same priority
* to cause a lost local interrupt .
* 5 AP . Remote read is never used
* 6 AP . not affected - worked around in hardware
* 7 AP . not affected - worked around in hardware
* 8 AP . worked around in hardware - we get explicit CS errors if not
* 9 AP . only ' noapic ' mode affected . Might generate spurious
* interrupts , we log only the first one and count the
* rest silently .
* 10 AP . not affected - worked around in hardware
* 11 AP . Linux reads the APIC between writes to avoid this , as per
* the documentation . Make sure you preserve this as it affects
* the C stepping chips too .
* 12 AP . not affected - worked around in hardware
* 13 AP . not affected - worked around in hardware
* 14 AP . we always deassert INIT during bootup
* 15 AP . not affected - worked around in hardware
* 16 AP . not affected - worked around in hardware
* 17 AP . not affected - worked around in hardware
* 18 AP . not affected - worked around in hardware
* 19 AP . not affected - worked around in BIOS
*
* If this sounds worrying believe me these bugs are either ___RARE___ ,
* or are signal timing bugs worked around in hardware and there ' s
* about nothing of note with C stepping upwards .
*/
2008-03-03 14:12:52 -03:00
/*
* this function sends a ' reschedule ' IPI to another CPU .
* it goes straight through and wastes no time serializing
* anything . Worst case is that we lose a reschedule . . .
*/
static void native_smp_send_reschedule ( int cpu )
{
2008-03-10 17:44:03 +05:30
if ( unlikely ( cpu_is_offline ( cpu ) ) ) {
WARN_ON ( 1 ) ;
return ;
}
2008-12-16 17:33:59 -08:00
send_IPI_mask ( cpumask_of ( cpu ) , RESCHEDULE_VECTOR ) ;
2008-03-03 14:12:52 -03:00
}
2008-06-26 11:21:54 +02:00
void native_send_call_func_single_ipi ( int cpu )
2008-03-03 14:12:52 -03:00
{
2008-12-16 17:33:59 -08:00
send_IPI_mask ( cpumask_of ( cpu ) , CALL_FUNCTION_SINGLE_VECTOR ) ;
2008-03-03 14:12:52 -03:00
}
2008-12-16 17:33:59 -08:00
void native_send_call_func_ipi ( const struct cpumask * mask )
2008-03-03 14:12:52 -03:00
{
2009-01-04 05:18:03 -08:00
cpumask_var_t allbutself ;
2008-03-03 14:12:52 -03:00
2009-01-04 05:18:03 -08:00
if ( ! alloc_cpumask_var ( & allbutself , GFP_ATOMIC ) ) {
send_IPI_mask ( mask , CALL_FUNCTION_VECTOR ) ;
return ;
}
2008-03-03 14:12:52 -03:00
2009-01-04 05:18:03 -08:00
cpumask_copy ( allbutself , cpu_online_mask ) ;
cpumask_clear_cpu ( smp_processor_id ( ) , allbutself ) ;
if ( cpumask_equal ( mask , allbutself ) & &
cpumask_equal ( cpu_online_mask , cpu_callout_mask ) )
2008-03-03 14:12:52 -03:00
send_IPI_allbutself ( CALL_FUNCTION_VECTOR ) ;
else
send_IPI_mask ( mask , CALL_FUNCTION_VECTOR ) ;
2009-01-04 05:18:03 -08:00
free_cpumask_var ( allbutself ) ;
2008-03-03 14:12:52 -03:00
}
/*
* this function calls the ' stop ' function on all other CPUs in the system .
*/
static void native_smp_send_stop ( void )
{
unsigned long flags ;
if ( reboot_force )
return ;
2008-06-06 11:18:06 +02:00
smp_call_function ( stop_this_cpu , NULL , 0 ) ;
2008-03-03 14:12:52 -03:00
local_irq_save ( flags ) ;
disable_local_APIC ( ) ;
local_irq_restore ( flags ) ;
}
/*
* Reschedule call back . Nothing to do ,
* all the work is done automatically when
* we return from the interrupt .
*/
void smp_reschedule_interrupt ( struct pt_regs * regs )
{
ack_APIC_irq ( ) ;
2008-12-08 19:19:26 -08:00
inc_irq_stat ( irq_resched_count ) ;
2008-03-03 14:12:52 -03:00
}
void smp_call_function_interrupt ( struct pt_regs * regs )
{
ack_APIC_irq ( ) ;
irq_enter ( ) ;
2008-06-26 11:21:54 +02:00
generic_smp_call_function_interrupt ( ) ;
2008-12-08 19:19:26 -08:00
inc_irq_stat ( irq_call_count ) ;
2008-03-03 14:12:52 -03:00
irq_exit ( ) ;
2008-06-26 11:21:54 +02:00
}
2008-03-03 14:12:52 -03:00
2008-07-01 13:12:04 +02:00
void smp_call_function_single_interrupt ( struct pt_regs * regs )
2008-06-26 11:21:54 +02:00
{
ack_APIC_irq ( ) ;
irq_enter ( ) ;
generic_smp_call_function_single_interrupt ( ) ;
2008-12-08 19:19:26 -08:00
inc_irq_stat ( irq_call_count ) ;
2008-06-26 11:21:54 +02:00
irq_exit ( ) ;
2008-03-03 14:12:52 -03:00
}
struct smp_ops smp_ops = {
. smp_prepare_boot_cpu = native_smp_prepare_boot_cpu ,
. smp_prepare_cpus = native_smp_prepare_cpus ,
. smp_cpus_done = native_smp_cpus_done ,
. smp_send_stop = native_smp_send_stop ,
. smp_send_reschedule = native_smp_send_reschedule ,
2008-06-26 11:21:54 +02:00
2008-08-22 11:52:11 +01:00
. cpu_up = native_cpu_up ,
. cpu_die = native_cpu_die ,
. cpu_disable = native_cpu_disable ,
. play_dead = native_play_dead ,
2008-06-26 11:21:54 +02:00
. send_call_func_ipi = native_send_call_func_ipi ,
. send_call_func_single_ipi = native_send_call_func_single_ipi ,
2008-03-03 14:12:52 -03:00
} ;
EXPORT_SYMBOL_GPL ( smp_ops ) ;