2005-04-16 15:20:36 -07:00
/*
* Intel SMP support routines .
*
* ( c ) 1995 Alan Cox , Building # 3 < alan @ redhat . com >
* ( c ) 1998 - 99 , 2000 Ingo Molnar < mingo @ redhat . com >
*
* This code is released under the GNU General Public License version 2 or
* later .
*/
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/delay.h>
# include <linux/spinlock.h>
# include <linux/kernel_stat.h>
# include <linux/mc146818rtc.h>
# include <linux/cache.h>
# include <linux/interrupt.h>
2005-06-25 14:54:50 -07:00
# include <linux/cpu.h>
2005-06-23 00:08:33 -07:00
# include <linux/module.h>
2005-04-16 15:20:36 -07:00
# include <asm/mtrr.h>
# include <asm/tlbflush.h>
2007-07-17 18:37:03 -07:00
# include <asm/mmu_context.h>
2005-04-16 15:20:36 -07:00
# include <mach_apic.h>
/*
* Some notes on x86 processor bugs affecting SMP operation :
*
* Pentium , Pentium Pro , II , III ( and all CPUs ) have bugs .
* The Linux implications for SMP are handled as follows :
*
* Pentium III / [ Xeon ]
* None of the E1AP - E3AP errata are visible to the user .
*
* E1AP . see PII A1AP
* E2AP . see PII A2AP
* E3AP . see PII A3AP
*
* Pentium II / [ Xeon ]
* None of the A1AP - A3AP errata are visible to the user .
*
* A1AP . see PPro 1 AP
* A2AP . see PPro 2 AP
* A3AP . see PPro 7 AP
*
* Pentium Pro
* None of 1 AP - 9 AP errata are visible to the normal user ,
* except occasional delivery of ' spurious interrupt ' as trap # 15.
* This is very rare and a non - problem .
*
* 1 AP . Linux maps APIC as non - cacheable
* 2 AP . worked around in hardware
* 3 AP . fixed in C0 and above steppings microcode update .
* Linux does not use excessive STARTUP_IPIs .
* 4 AP . worked around in hardware
* 5 AP . symmetric IO mode ( normal Linux operation ) not affected .
* ' noapic ' mode has vector 0xf filled out properly .
* 6 AP . ' noapic ' mode might be affected - fixed in later steppings
* 7 AP . We do not assume writes to the LVT deassering IRQs
* 8 AP . We do not enable low power mode ( deep sleep ) during MP bootup
* 9 AP . We do not use mixed mode
*
* Pentium
* There is a marginal case where REP MOVS on 100 MHz SMP
* machines with B stepping processors can fail . XXX should provide
* an L1cache = Writethrough or L1cache = off option .
*
* B stepping CPUs may hang . There are hardware work arounds
* for this . We warn about it in case your board doesn ' t have the work
2007-10-20 01:13:56 +02:00
* arounds . Basically that ' s so I can tell anyone with a B stepping
2005-04-16 15:20:36 -07:00
* CPU and SMP problems " tough " .
*
* Specific items [ From Pentium Processor Specification Update ]
*
* 1 AP . Linux doesn ' t use remote read
* 2 AP . Linux doesn ' t trust APIC errors
* 3 AP . We work around this
* 4 AP . Linux never generated 3 interrupts of the same priority
* to cause a lost local interrupt .
* 5 AP . Remote read is never used
* 6 AP . not affected - worked around in hardware
* 7 AP . not affected - worked around in hardware
* 8 AP . worked around in hardware - we get explicit CS errors if not
* 9 AP . only ' noapic ' mode affected . Might generate spurious
* interrupts , we log only the first one and count the
* rest silently .
* 10 AP . not affected - worked around in hardware
* 11 AP . Linux reads the APIC between writes to avoid this , as per
* the documentation . Make sure you preserve this as it affects
* the C stepping chips too .
* 12 AP . not affected - worked around in hardware
* 13 AP . not affected - worked around in hardware
* 14 AP . we always deassert INIT during bootup
* 15 AP . not affected - worked around in hardware
* 16 AP . not affected - worked around in hardware
* 17 AP . not affected - worked around in hardware
* 18 AP . not affected - worked around in hardware
* 19 AP . not affected - worked around in BIOS
*
* If this sounds worrying believe me these bugs are either ___RARE___ ,
* or are signal timing bugs worked around in hardware and there ' s
* about nothing of note with C stepping upwards .
*/
DEFINE_PER_CPU ( struct tlb_state , cpu_tlbstate ) ____cacheline_aligned = { & init_mm , 0 , } ;
/*
* the following functions deal with sending IPIs between CPUs .
*
* We use ' broadcast ' , CPU - > CPU IPIs and self - IPIs too .
*/
static inline int __prepare_ICR ( unsigned int shortcut , int vector )
{
2006-06-26 13:59:41 +02:00
unsigned int icr = shortcut | APIC_DEST_LOGICAL ;
switch ( vector ) {
default :
icr | = APIC_DM_FIXED | vector ;
break ;
case NMI_VECTOR :
icr | = APIC_DM_NMI ;
break ;
}
return icr ;
2005-04-16 15:20:36 -07:00
}
static inline int __prepare_ICR2 ( unsigned int mask )
{
return SET_APIC_DEST_FIELD ( mask ) ;
}
void __send_IPI_shortcut ( unsigned int shortcut , int vector )
{
/*
* Subtle . In the case of the ' never do double writes ' workaround
* we have to lock out interrupts to be safe . As we don ' t care
* of the value read we use an atomic rmw access to avoid costly
* cli / sti . Otherwise we use an even cheaper single atomic write
* to the APIC .
*/
unsigned int cfg ;
/*
* Wait for idle .
*/
apic_wait_icr_idle ( ) ;
/*
* No need to touch the target chip field
*/
cfg = __prepare_ICR ( shortcut , vector ) ;
/*
* Send the IPI . The write to APIC_ICR fires this off .
*/
apic_write_around ( APIC_ICR , cfg ) ;
}
2008-01-30 13:31:17 +01:00
void send_IPI_self ( int vector )
2005-04-16 15:20:36 -07:00
{
__send_IPI_shortcut ( APIC_DEST_SELF , vector ) ;
}
/*
2007-05-02 19:27:18 +02:00
* This is used to send an IPI with no shorthand notation ( the destination is
* specified in bits 56 to 63 of the ICR ) .
2005-04-16 15:20:36 -07:00
*/
2007-05-02 19:27:18 +02:00
static inline void __send_IPI_dest_field ( unsigned long mask , int vector )
2005-04-16 15:20:36 -07:00
{
unsigned long cfg ;
/*
* Wait for idle .
*/
2007-05-02 19:27:18 +02:00
if ( unlikely ( vector = = NMI_VECTOR ) )
safe_apic_wait_icr_idle ( ) ;
else
apic_wait_icr_idle ( ) ;
2005-04-16 15:20:36 -07:00
/*
* prepare target chip field
*/
cfg = __prepare_ICR2 ( mask ) ;
apic_write_around ( APIC_ICR2 , cfg ) ;
/*
* program the ICR
*/
cfg = __prepare_ICR ( 0 , vector ) ;
/*
* Send the IPI . The write to APIC_ICR fires this off .
*/
apic_write_around ( APIC_ICR , cfg ) ;
2007-05-02 19:27:18 +02:00
}
2005-04-16 15:20:36 -07:00
2007-05-02 19:27:18 +02:00
/*
* This is only used on smaller machines .
*/
void send_IPI_mask_bitmask ( cpumask_t cpumask , int vector )
{
unsigned long mask = cpus_addr ( cpumask ) [ 0 ] ;
unsigned long flags ;
local_irq_save ( flags ) ;
WARN_ON ( mask & ~ cpus_addr ( cpu_online_map ) [ 0 ] ) ;
__send_IPI_dest_field ( mask , vector ) ;
2005-04-16 15:20:36 -07:00
local_irq_restore ( flags ) ;
}
void send_IPI_mask_sequence ( cpumask_t mask , int vector )
{
2007-05-02 19:27:18 +02:00
unsigned long flags ;
2005-04-16 15:20:36 -07:00
unsigned int query_cpu ;
/*
* Hack . The clustered APIC addressing mode doesn ' t allow us to send
* to an arbitrary mask , so I do a unicasts to each CPU instead . This
* should be modified to do 1 message per cluster ID - mbligh
*/
local_irq_save ( flags ) ;
2008-01-30 13:30:55 +01:00
for_each_possible_cpu ( query_cpu ) {
2005-04-16 15:20:36 -07:00
if ( cpu_isset ( query_cpu , mask ) ) {
2007-05-02 19:27:18 +02:00
__send_IPI_dest_field ( cpu_to_logical_apicid ( query_cpu ) ,
vector ) ;
2005-04-16 15:20:36 -07:00
}
}
local_irq_restore ( flags ) ;
}
# include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
/*
* Smarter SMP flushing macros .
* c / o Linus Torvalds .
*
* These mean you can really definitely utterly forget about
* writing to user space from interrupts . ( Its not allowed anyway ) .
*
* Optimizations Manfred Spraul < manfred @ colorfullife . com >
*/
static cpumask_t flush_cpumask ;
static struct mm_struct * flush_mm ;
static unsigned long flush_va ;
static DEFINE_SPINLOCK ( tlbstate_lock ) ;
/*
2007-07-17 18:37:03 -07:00
* We cannot call mmdrop ( ) because we are in interrupt context ,
2005-04-16 15:20:36 -07:00
* instead update mm - > cpu_vm_mask .
*
* We need to reload % cr3 since the page tables may be going
* away from under us . .
*/
2008-01-30 13:32:01 +01:00
void leave_mm ( int cpu )
2005-04-16 15:20:36 -07:00
{
if ( per_cpu ( cpu_tlbstate , cpu ) . state = = TLBSTATE_OK )
BUG ( ) ;
cpu_clear ( cpu , per_cpu ( cpu_tlbstate , cpu ) . active_mm - > cpu_vm_mask ) ;
load_cr3 ( swapper_pg_dir ) ;
}
2008-01-30 13:32:01 +01:00
EXPORT_SYMBOL_GPL ( leave_mm ) ;
2005-04-16 15:20:36 -07:00
/*
*
* The flush IPI assumes that a thread switch happens in this order :
* [ cpu0 : the cpu that switches ]
* 1 ) switch_mm ( ) either 1 a ) or 1 b )
* 1 a ) thread switch to a different mm
* 1 a1 ) cpu_clear ( cpu , old_mm - > cpu_vm_mask ) ;
* Stop ipi delivery for the old mm . This is not synchronized with
* the other cpus , but smp_invalidate_interrupt ignore flush ipis
2007-10-20 01:13:56 +02:00
* for the wrong mm , and in the worst case we perform a superfluous
2005-04-16 15:20:36 -07:00
* tlb flush .
* 1 a2 ) set cpu_tlbstate to TLBSTATE_OK
* Now the smp_invalidate_interrupt won ' t call leave_mm if cpu0
* was in lazy tlb mode .
* 1 a3 ) update cpu_tlbstate [ ] . active_mm
* Now cpu0 accepts tlb flushes for the new mm .
* 1 a4 ) cpu_set ( cpu , new_mm - > cpu_vm_mask ) ;
* Now the other cpus will send tlb flush ipis .
* 1 a4 ) change cr3 .
* 1 b ) thread switch without mm change
* cpu_tlbstate [ ] . active_mm is correct , cpu0 already handles
* flush ipis .
* 1 b1 ) set cpu_tlbstate to TLBSTATE_OK
* 1 b2 ) test_and_set the cpu bit in cpu_vm_mask .
* Atomically set the bit [ other cpus will start sending flush ipis ] ,
* and test the bit .
* 1 b3 ) if the bit was 0 : leave_mm was called , flush the tlb .
* 2 ) switch % % esp , ie current
*
* The interrupt must handle 2 special cases :
* - cr3 is changed before % % esp , ie . it cannot use current - > { active_ , } mm .
* - the cpu performs speculative tlb reads , i . e . even if the cpu only
* runs in kernel space , the cpu could load tlb entries for user space
* pages .
*
* The good news is that cpu_tlbstate is local to each cpu , no
* write / read ordering problems .
*/
/*
* TLB flush IPI :
*
* 1 ) Flush the tlb entries if the cpu uses the mm that ' s being flushed .
* 2 ) Leave the mm if we are in the lazy tlb mode .
*/
2008-01-30 13:31:17 +01:00
void smp_invalidate_interrupt ( struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
unsigned long cpu ;
cpu = get_cpu ( ) ;
if ( ! cpu_isset ( cpu , flush_cpumask ) )
goto out ;
/*
* This was a BUG ( ) but until someone can quote me the
* line from the intel manual that guarantees an IPI to
* multiple CPUs is retried _only_ on the erroring CPUs
* its staying as a return
*
* BUG ( ) ;
*/
if ( flush_mm = = per_cpu ( cpu_tlbstate , cpu ) . active_mm ) {
if ( per_cpu ( cpu_tlbstate , cpu ) . state = = TLBSTATE_OK ) {
2007-05-02 19:27:15 +02:00
if ( flush_va = = TLB_FLUSH_ALL )
2005-04-16 15:20:36 -07:00
local_flush_tlb ( ) ;
else
__flush_tlb_one ( flush_va ) ;
} else
leave_mm ( cpu ) ;
}
ack_APIC_irq ( ) ;
smp_mb__before_clear_bit ( ) ;
cpu_clear ( cpu , flush_cpumask ) ;
smp_mb__after_clear_bit ( ) ;
out :
put_cpu_no_resched ( ) ;
2007-10-17 18:04:40 +02:00
__get_cpu_var ( irq_stat ) . irq_tlb_count + + ;
2005-04-16 15:20:36 -07:00
}
2007-05-02 19:27:15 +02:00
void native_flush_tlb_others ( const cpumask_t * cpumaskp , struct mm_struct * mm ,
unsigned long va )
2005-04-16 15:20:36 -07:00
{
2007-05-02 19:27:15 +02:00
cpumask_t cpumask = * cpumaskp ;
2005-04-16 15:20:36 -07:00
/*
* A couple of ( to be removed ) sanity checks :
*
* - current CPU must not be in mask
* - mask must exist : )
*/
BUG_ON ( cpus_empty ( cpumask ) ) ;
BUG_ON ( cpu_isset ( smp_processor_id ( ) , cpumask ) ) ;
BUG_ON ( ! mm ) ;
2007-05-02 19:27:18 +02:00
# ifdef CONFIG_HOTPLUG_CPU
2005-06-25 14:54:50 -07:00
/* If a CPU which we ran on has gone down, OK. */
cpus_and ( cpumask , cpumask , cpu_online_map ) ;
2007-05-02 19:27:18 +02:00
if ( unlikely ( cpus_empty ( cpumask ) ) )
2005-06-25 14:54:50 -07:00
return ;
2007-05-02 19:27:18 +02:00
# endif
2005-06-25 14:54:50 -07:00
2005-04-16 15:20:36 -07:00
/*
* i ' m not happy about this global shared spinlock in the
* MM hot path , but we ' ll see how contended it is .
2007-02-13 13:26:23 +01:00
* AK : x86 - 64 has a faster method that could be ported .
2005-04-16 15:20:36 -07:00
*/
spin_lock ( & tlbstate_lock ) ;
flush_mm = mm ;
flush_va = va ;
2007-05-02 19:27:18 +02:00
cpus_or ( flush_cpumask , cpumask , flush_cpumask ) ;
2005-04-16 15:20:36 -07:00
/*
* We have to send the IPI only to
* CPUs affected .
*/
send_IPI_mask ( cpumask , INVALIDATE_TLB_VECTOR ) ;
while ( ! cpus_empty ( flush_cpumask ) )
/* nothing. lockup detection does not belong here */
2007-02-13 13:26:23 +01:00
cpu_relax ( ) ;
2005-04-16 15:20:36 -07:00
flush_mm = NULL ;
flush_va = 0 ;
spin_unlock ( & tlbstate_lock ) ;
}
void flush_tlb_current_task ( void )
{
struct mm_struct * mm = current - > mm ;
cpumask_t cpu_mask ;
preempt_disable ( ) ;
cpu_mask = mm - > cpu_vm_mask ;
cpu_clear ( smp_processor_id ( ) , cpu_mask ) ;
local_flush_tlb ( ) ;
if ( ! cpus_empty ( cpu_mask ) )
2007-05-02 19:27:15 +02:00
flush_tlb_others ( cpu_mask , mm , TLB_FLUSH_ALL ) ;
2005-04-16 15:20:36 -07:00
preempt_enable ( ) ;
}
void flush_tlb_mm ( struct mm_struct * mm )
{
cpumask_t cpu_mask ;
preempt_disable ( ) ;
cpu_mask = mm - > cpu_vm_mask ;
cpu_clear ( smp_processor_id ( ) , cpu_mask ) ;
if ( current - > active_mm = = mm ) {
if ( current - > mm )
local_flush_tlb ( ) ;
else
leave_mm ( smp_processor_id ( ) ) ;
}
if ( ! cpus_empty ( cpu_mask ) )
2007-05-02 19:27:15 +02:00
flush_tlb_others ( cpu_mask , mm , TLB_FLUSH_ALL ) ;
2007-05-16 22:11:18 -07:00
2005-04-16 15:20:36 -07:00
preempt_enable ( ) ;
}
void flush_tlb_page ( struct vm_area_struct * vma , unsigned long va )
{
struct mm_struct * mm = vma - > vm_mm ;
cpumask_t cpu_mask ;
preempt_disable ( ) ;
cpu_mask = mm - > cpu_vm_mask ;
cpu_clear ( smp_processor_id ( ) , cpu_mask ) ;
if ( current - > active_mm = = mm ) {
if ( current - > mm )
__flush_tlb_one ( va ) ;
else
leave_mm ( smp_processor_id ( ) ) ;
}
if ( ! cpus_empty ( cpu_mask ) )
flush_tlb_others ( cpu_mask , mm , va ) ;
preempt_enable ( ) ;
}
2005-06-23 00:08:33 -07:00
EXPORT_SYMBOL ( flush_tlb_page ) ;
2005-04-16 15:20:36 -07:00
static void do_flush_tlb_all ( void * info )
{
unsigned long cpu = smp_processor_id ( ) ;
__flush_tlb_all ( ) ;
if ( per_cpu ( cpu_tlbstate , cpu ) . state = = TLBSTATE_LAZY )
leave_mm ( cpu ) ;
}
void flush_tlb_all ( void )
{
on_each_cpu ( do_flush_tlb_all , NULL , 1 , 1 ) ;
}
/*
* this function sends a ' reschedule ' IPI to another CPU .
* it goes straight through and wastes no time serializing
* anything . Worst case is that we lose a reschedule . . .
*/
2007-05-15 01:41:48 -07:00
static void native_smp_send_reschedule ( int cpu )
2005-04-16 15:20:36 -07:00
{
2005-06-25 14:54:50 -07:00
WARN_ON ( cpu_is_offline ( cpu ) ) ;
2005-04-16 15:20:36 -07:00
send_IPI_mask ( cpumask_of_cpu ( cpu ) , RESCHEDULE_VECTOR ) ;
}
/*
* Structure and data for smp_call_function ( ) . This is designed to minimise
* static memory requirements . It also looks cleaner .
*/
static DEFINE_SPINLOCK ( call_lock ) ;
struct call_data_struct {
void ( * func ) ( void * info ) ;
void * info ;
atomic_t started ;
atomic_t finished ;
int wait ;
} ;
2005-06-25 14:54:53 -07:00
void lock_ipi_call_lock ( void )
{
spin_lock_irq ( & call_lock ) ;
}
void unlock_ipi_call_lock ( void )
{
spin_unlock_irq ( & call_lock ) ;
}
2006-03-22 00:08:16 -08:00
static struct call_data_struct * call_data ;
2007-05-02 19:27:05 +02:00
static void __smp_call_function ( void ( * func ) ( void * info ) , void * info ,
int nonatomic , int wait )
2005-04-16 15:20:36 -07:00
{
struct call_data_struct data ;
2007-05-02 19:27:05 +02:00
int cpus = num_online_cpus ( ) - 1 ;
2005-04-16 15:20:36 -07:00
2007-05-02 19:27:05 +02:00
if ( ! cpus )
return ;
2005-04-16 15:20:36 -07:00
data . func = func ;
data . info = info ;
atomic_set ( & data . started , 0 ) ;
data . wait = wait ;
if ( wait )
atomic_set ( & data . finished , 0 ) ;
call_data = & data ;
mb ( ) ;
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself ( CALL_FUNCTION_VECTOR ) ;
/* Wait for response */
while ( atomic_read ( & data . started ) ! = cpus )
cpu_relax ( ) ;
if ( wait )
while ( atomic_read ( & data . finished ) ! = cpus )
cpu_relax ( ) ;
2007-05-02 19:27:05 +02:00
}
2007-05-02 19:27:06 +02:00
2007-05-02 19:27:05 +02:00
/**
2007-05-02 19:27:06 +02:00
* smp_call_function_mask ( ) : Run a function on a set of other CPUs .
* @ mask : The set of cpus to run on . Must not include the current cpu .
2007-05-02 19:27:05 +02:00
* @ func : The function to run . This must be fast and non - blocking .
* @ info : An arbitrary pointer to pass to the function .
* @ wait : If true , wait ( atomically ) until function has completed on other CPUs .
*
2007-05-02 19:27:12 +02:00
* Returns 0 on success , else a negative status code .
*
* If @ wait is true , then returns once @ func has returned ; otherwise
* it returns just before the target cpu calls @ func .
2007-05-02 19:27:05 +02:00
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler .
*/
2007-05-15 01:41:48 -07:00
static int
native_smp_call_function_mask ( cpumask_t mask ,
void ( * func ) ( void * ) , void * info ,
int wait )
2007-05-02 19:27:05 +02:00
{
2007-05-02 19:27:06 +02:00
struct call_data_struct data ;
cpumask_t allbutself ;
int cpus ;
2007-05-02 19:27:05 +02:00
/* Can deadlock when called with interrupts disabled */
WARN_ON ( irqs_disabled ( ) ) ;
/* Holding any lock stops cpus from going down. */
spin_lock ( & call_lock ) ;
2007-05-02 19:27:06 +02:00
allbutself = cpu_online_map ;
cpu_clear ( smp_processor_id ( ) , allbutself ) ;
cpus_and ( mask , mask , allbutself ) ;
cpus = cpus_weight ( mask ) ;
if ( ! cpus ) {
spin_unlock ( & call_lock ) ;
return 0 ;
}
data . func = func ;
data . info = info ;
atomic_set ( & data . started , 0 ) ;
data . wait = wait ;
if ( wait )
atomic_set ( & data . finished , 0 ) ;
call_data = & data ;
mb ( ) ;
/* Send a message to other CPUs */
if ( cpus_equal ( mask , allbutself ) )
send_IPI_allbutself ( CALL_FUNCTION_VECTOR ) ;
else
send_IPI_mask ( mask , CALL_FUNCTION_VECTOR ) ;
/* Wait for response */
while ( atomic_read ( & data . started ) ! = cpus )
cpu_relax ( ) ;
if ( wait )
while ( atomic_read ( & data . finished ) ! = cpus )
cpu_relax ( ) ;
2005-04-16 15:20:36 -07:00
spin_unlock ( & call_lock ) ;
return 0 ;
}
2007-05-02 19:27:06 +02:00
2005-04-16 15:20:36 -07:00
static void stop_this_cpu ( void * dummy )
{
2007-05-02 19:27:05 +02:00
local_irq_disable ( ) ;
2005-04-16 15:20:36 -07:00
/*
* Remove this CPU :
*/
cpu_clear ( smp_processor_id ( ) , cpu_online_map ) ;
disable_local_APIC ( ) ;
2007-10-19 20:35:04 +02:00
if ( cpu_data ( smp_processor_id ( ) ) . hlt_works_ok )
2005-09-03 15:56:36 -07:00
for ( ; ; ) halt ( ) ;
2005-04-16 15:20:36 -07:00
for ( ; ; ) ;
}
/*
* this function calls the ' stop ' function on all other CPUs in the system .
*/
2007-05-15 01:41:48 -07:00
static void native_smp_send_stop ( void )
2005-04-16 15:20:36 -07:00
{
2007-05-02 19:27:05 +02:00
/* Don't deadlock on the call lock in panic */
int nolock = ! spin_trylock ( & call_lock ) ;
unsigned long flags ;
2005-04-16 15:20:36 -07:00
2007-05-02 19:27:05 +02:00
local_irq_save ( flags ) ;
__smp_call_function ( stop_this_cpu , NULL , 0 , 0 ) ;
if ( ! nolock )
spin_unlock ( & call_lock ) ;
2005-04-16 15:20:36 -07:00
disable_local_APIC ( ) ;
2007-05-02 19:27:05 +02:00
local_irq_restore ( flags ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Reschedule call back . Nothing to do ,
* all the work is done automatically when
* we return from the interrupt .
*/
2008-01-30 13:31:17 +01:00
void smp_reschedule_interrupt ( struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
ack_APIC_irq ( ) ;
2007-10-17 18:04:40 +02:00
__get_cpu_var ( irq_stat ) . irq_resched_count + + ;
2005-04-16 15:20:36 -07:00
}
2008-01-30 13:31:17 +01:00
void smp_call_function_interrupt ( struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
void ( * func ) ( void * info ) = call_data - > func ;
void * info = call_data - > info ;
int wait = call_data - > wait ;
ack_APIC_irq ( ) ;
/*
* Notify initiating CPU that I ' ve grabbed the data and am
* about to execute the function
*/
mb ( ) ;
atomic_inc ( & call_data - > started ) ;
/*
* At this point the info structure may be out of scope unless wait = = 1
*/
irq_enter ( ) ;
( * func ) ( info ) ;
2007-10-17 18:04:40 +02:00
__get_cpu_var ( irq_stat ) . irq_call_count + + ;
2005-04-16 15:20:36 -07:00
irq_exit ( ) ;
if ( wait ) {
mb ( ) ;
atomic_inc ( & call_data - > finished ) ;
}
}
2006-09-30 23:29:07 -07:00
static int convert_apicid_to_cpu ( int apic_id )
{
int i ;
2008-01-30 13:30:55 +01:00
for_each_possible_cpu ( i ) {
2007-10-19 20:35:03 +02:00
if ( per_cpu ( x86_cpu_to_apicid , i ) = = apic_id )
2006-09-30 23:29:07 -07:00
return i ;
}
return - 1 ;
}
int safe_smp_processor_id ( void )
{
int apicid , cpuid ;
if ( ! boot_cpu_has ( X86_FEATURE_APIC ) )
return 0 ;
apicid = hard_smp_processor_id ( ) ;
if ( apicid = = BAD_APICID )
return 0 ;
cpuid = convert_apicid_to_cpu ( apicid ) ;
return cpuid > = 0 ? cpuid : 0 ;
}
2007-05-02 19:27:11 +02:00
struct smp_ops smp_ops = {
. smp_prepare_boot_cpu = native_smp_prepare_boot_cpu ,
. smp_prepare_cpus = native_smp_prepare_cpus ,
. cpu_up = native_cpu_up ,
. smp_cpus_done = native_smp_cpus_done ,
. smp_send_stop = native_smp_send_stop ,
. smp_send_reschedule = native_smp_send_reschedule ,
. smp_call_function_mask = native_smp_call_function_mask ,
} ;
2007-10-27 20:57:43 +02:00
EXPORT_SYMBOL_GPL ( smp_ops ) ;