2005-04-16 15:20:36 -07:00
# ifndef __ASM_IPI_H
# define __ASM_IPI_H
/*
* Copyright 2004 James Cleverdon , IBM .
* Subject to the GNU Public License , v .2
*
* Generic APIC InterProcessor Interrupt code .
*
* Moved to include file by James Cleverdon from
* arch / x86 - 64 / kernel / smp . c
*
* Copyrights from kernel / smp . c :
*
* ( c ) 1995 Alan Cox , Building # 3 < alan @ redhat . com >
* ( c ) 1998 - 99 , 2000 Ingo Molnar < mingo @ redhat . com >
* ( c ) 2002 , 2003 Andi Kleen , SuSE Labs .
* Subject to the GNU Public License , v .2
*/
# include <asm/hw_irq.h>
2007-05-02 19:27:04 +02:00
# include <asm/apic.h>
2008-05-14 08:15:04 -07:00
# include <asm/smp.h>
2005-04-16 15:20:36 -07:00
/*
* the following functions deal with sending IPIs between CPUs .
*
* We use ' broadcast ' , CPU - > CPU IPIs and self - IPIs too .
*/
2008-03-23 01:02:27 -07:00
static inline unsigned int __prepare_ICR ( unsigned int shortcut , int vector ,
unsigned int dest )
2005-04-16 15:20:36 -07:00
{
2005-09-12 18:49:24 +02:00
unsigned int icr = shortcut | dest ;
switch ( vector ) {
default :
icr | = APIC_DM_FIXED | vector ;
break ;
case NMI_VECTOR :
icr | = APIC_DM_NMI ;
break ;
}
2005-04-16 15:20:36 -07:00
return icr ;
}
2008-03-23 01:02:27 -07:00
static inline int __prepare_ICR2 ( unsigned int mask )
2005-04-16 15:20:36 -07:00
{
return SET_APIC_DEST_FIELD ( mask ) ;
}
2008-03-23 01:02:27 -07:00
static inline void __send_IPI_shortcut ( unsigned int shortcut , int vector ,
unsigned int dest )
2005-04-16 15:20:36 -07:00
{
/*
* Subtle . In the case of the ' never do double writes ' workaround
* we have to lock out interrupts to be safe . As we don ' t care
* of the value read we use an atomic rmw access to avoid costly
* cli / sti . Otherwise we use an even cheaper single atomic write
* to the APIC .
*/
unsigned int cfg ;
/*
* Wait for idle .
*/
apic_wait_icr_idle ( ) ;
/*
* No need to touch the target chip field
*/
cfg = __prepare_ICR ( shortcut , vector , dest ) ;
/*
* Send the IPI . The write to APIC_ICR fires this off .
*/
2005-09-12 18:49:23 +02:00
apic_write ( APIC_ICR , cfg ) ;
2005-04-16 15:20:36 -07:00
}
2007-05-02 19:27:18 +02:00
/*
* This is used to send an IPI with no shorthand notation ( the destination is
* specified in bits 56 to 63 of the ICR ) .
*/
2008-03-23 01:02:27 -07:00
static inline void __send_IPI_dest_field ( unsigned int mask , int vector ,
unsigned int dest )
2007-05-02 19:27:18 +02:00
{
unsigned long cfg ;
/*
* Wait for idle .
*/
2007-05-02 19:27:18 +02:00
if ( unlikely ( vector = = NMI_VECTOR ) )
safe_apic_wait_icr_idle ( ) ;
else
apic_wait_icr_idle ( ) ;
2007-05-02 19:27:18 +02:00
/*
* prepare target chip field
*/
cfg = __prepare_ICR2 ( mask ) ;
apic_write ( APIC_ICR2 , cfg ) ;
/*
* program the ICR
*/
cfg = __prepare_ICR ( 0 , vector , dest ) ;
/*
* Send the IPI . The write to APIC_ICR fires this off .
*/
apic_write ( APIC_ICR , cfg ) ;
}
2005-04-16 15:20:36 -07:00
static inline void send_IPI_mask_sequence ( cpumask_t mask , int vector )
{
2007-05-02 19:27:18 +02:00
unsigned long flags ;
2005-04-16 15:20:36 -07:00
unsigned long query_cpu ;
/*
* Hack . The clustered APIC addressing mode doesn ' t allow us to send
* to an arbitrary mask , so I do a unicast to each CPU instead .
* - mbligh
*/
local_irq_save ( flags ) ;
2008-05-12 21:21:13 +02:00
for_each_cpu_mask_nr ( query_cpu , mask ) {
2007-10-19 20:35:03 +02:00
__send_IPI_dest_field ( per_cpu ( x86_cpu_to_apicid , query_cpu ) ,
2007-05-02 19:27:18 +02:00
vector , APIC_DEST_PHYSICAL ) ;
2005-04-16 15:20:36 -07:00
}
local_irq_restore ( flags ) ;
}
# endif /* __ASM_IPI_H */