2008-10-16 11:32:24 +02:00
/*
* Common interrupt code for 32 and 64 bit
*/
# include <linux/cpu.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
2010-11-12 05:45:26 +00:00
# include <linux/of.h>
2008-10-16 11:32:24 +02:00
# include <linux/seq_file.h>
2009-01-04 16:22:17 +05:30
# include <linux/smp.h>
2009-02-06 14:09:41 -08:00
# include <linux/ftrace.h>
2008-10-16 11:32:24 +02:00
2009-02-17 13:58:15 +01:00
# include <asm/apic.h>
2008-10-16 11:32:24 +02:00
# include <asm/io_apic.h>
2008-12-23 15:15:17 +01:00
# include <asm/irq.h>
2009-02-06 14:09:41 -08:00
# include <asm/idle.h>
2009-05-27 21:56:52 +02:00
# include <asm/mce.h>
2009-04-11 00:03:10 +05:30
# include <asm/hw_irq.h>
2008-10-16 11:32:24 +02:00
atomic_t irq_err_count ;
2009-03-04 12:56:05 -06:00
/* Function pointer for generic interrupt vector handling */
2009-10-14 09:22:57 -05:00
void ( * x86_platform_ipi_callback ) ( void ) = NULL ;
2009-03-04 12:56:05 -06:00
2008-10-16 12:18:50 +02:00
/*
* ' what should we do if we get a hw irq event on an illegal vector ' .
* each architecture has to answer this themselves .
*/
void ack_bad_irq ( unsigned int irq )
{
2009-04-12 20:47:39 +04:00
if ( printk_ratelimit ( ) )
pr_err ( " unexpected IRQ trap at vector %02x \n " , irq ) ;
2008-10-16 12:18:50 +02:00
/*
* Currently unexpected vectors happen only on SMP and APIC .
* We _must_ ack these because every local APIC has only N
* irq slots per priority level , and a ' hanging , unacked ' IRQ
* holds up an irq slot - in excessive cases ( when multiple
* unexpected vectors occur ) that might lock up the APIC
* completely .
* But only ack when the APIC is enabled - AK
*/
2009-04-12 20:47:41 +04:00
ack_APIC_irq ( ) ;
2008-10-16 12:18:50 +02:00
}
2009-01-19 00:38:57 +09:00
# define irq_stats(x) (&per_cpu(irq_stat, x))
2008-10-16 11:32:24 +02:00
/*
* / proc / interrupts printing :
*/
2009-03-12 12:45:15 +00:00
static int show_other_interrupts ( struct seq_file * p , int prec )
2008-10-16 11:32:24 +02:00
{
int j ;
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " NMI " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > __nmi_count ) ;
seq_printf ( p , " Non-maskable interrupts \n " ) ;
# ifdef CONFIG_X86_LOCAL_APIC
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " LOC " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > apic_timer_irqs ) ;
seq_printf ( p , " Local timer interrupts \n " ) ;
2009-03-23 02:08:34 +05:30
seq_printf ( p , " %*s: " , prec , " SPU " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_spurious_count ) ;
seq_printf ( p , " Spurious interrupts \n " ) ;
2009-10-14 18:50:39 +08:00
seq_printf ( p , " %*s: " , prec , " PMI " ) ;
2008-12-03 10:39:53 +01:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > apic_perf_irqs ) ;
2009-10-14 18:50:39 +08:00
seq_printf ( p , " Performance monitoring interrupts \n " ) ;
2010-10-14 14:01:34 +08:00
seq_printf ( p , " %*s: " , prec , " IWI " ) ;
2009-04-06 11:45:03 +02:00
for_each_online_cpu ( j )
2010-10-14 14:01:34 +08:00
seq_printf ( p , " %10u " , irq_stats ( j ) - > apic_irq_work_irqs ) ;
seq_printf ( p , " IRQ work interrupts \n " ) ;
2008-10-16 11:32:24 +02:00
# endif
2009-10-14 09:22:57 -05:00
if ( x86_platform_ipi_callback ) {
2009-03-25 10:50:34 +09:00
seq_printf ( p , " %*s: " , prec , " PLT " ) ;
2009-03-04 12:56:05 -06:00
for_each_online_cpu ( j )
2009-10-14 09:22:57 -05:00
seq_printf ( p , " %10u " , irq_stats ( j ) - > x86_platform_ipis ) ;
2009-03-04 12:56:05 -06:00
seq_printf ( p , " Platform interrupts \n " ) ;
}
2008-10-16 11:32:24 +02:00
# ifdef CONFIG_SMP
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " RES " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_resched_count ) ;
seq_printf ( p , " Rescheduling interrupts \n " ) ;
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " CAL " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_call_count ) ;
seq_printf ( p , " Function call interrupts \n " ) ;
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " TLB " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_tlb_count ) ;
seq_printf ( p , " TLB shootdowns \n " ) ;
# endif
2009-11-20 14:03:05 +00:00
# ifdef CONFIG_X86_THERMAL_VECTOR
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " TRM " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_thermal_count ) ;
seq_printf ( p , " Thermal event interrupts \n " ) ;
2009-11-20 14:03:05 +00:00
# endif
# ifdef CONFIG_X86_MCE_THRESHOLD
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: " , prec , " THR " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_threshold_count ) ;
seq_printf ( p , " Threshold APIC interrupts \n " ) ;
2009-05-27 21:56:52 +02:00
# endif
2009-07-09 00:31:41 +02:00
# ifdef CONFIG_X86_MCE
2009-05-27 21:56:52 +02:00
seq_printf ( p , " %*s: " , prec , " MCE " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , per_cpu ( mce_exception_count , j ) ) ;
seq_printf ( p , " Machine check exceptions \n " ) ;
2009-05-27 21:56:57 +02:00
seq_printf ( p , " %*s: " , prec , " MCP " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , per_cpu ( mce_poll_count , j ) ) ;
seq_printf ( p , " Machine check polls \n " ) ;
2008-10-16 11:32:24 +02:00
# endif
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: %10u \n " , prec , " ERR " , atomic_read ( & irq_err_count ) ) ;
2008-10-16 11:32:24 +02:00
# if defined(CONFIG_X86_IO_APIC)
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s: %10u \n " , prec , " MIS " , atomic_read ( & irq_mis_count ) ) ;
2008-10-16 11:32:24 +02:00
# endif
return 0 ;
}
int show_interrupts ( struct seq_file * p , void * v )
{
unsigned long flags , any_count = 0 ;
2009-03-12 12:45:15 +00:00
int i = * ( loff_t * ) v , j , prec ;
2008-10-16 11:32:24 +02:00
struct irqaction * action ;
struct irq_desc * desc ;
if ( i > nr_irqs )
return 0 ;
2009-03-12 12:45:15 +00:00
for ( prec = 3 , j = 1000 ; prec < 10 & & j < = nr_irqs ; + + prec )
j * = 10 ;
2008-10-16 11:32:24 +02:00
if ( i = = nr_irqs )
2009-03-12 12:45:15 +00:00
return show_other_interrupts ( p , prec ) ;
2008-10-16 11:32:24 +02:00
/* print header */
if ( i = = 0 ) {
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*s " , prec + 8 , " " ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
2008-10-21 15:49:59 +02:00
seq_printf ( p , " CPU%-8d " , j ) ;
2008-10-16 11:32:24 +02:00
seq_putc ( p , ' \n ' ) ;
}
desc = irq_to_desc ( i ) ;
2008-12-05 18:58:31 -08:00
if ( ! desc )
return 0 ;
2009-11-17 16:46:45 +01:00
raw_spin_lock_irqsave ( & desc - > lock , flags ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
any_count | = kstat_irqs_cpu ( i , j ) ;
action = desc - > action ;
if ( ! action & & ! any_count )
goto out ;
2009-03-12 12:45:15 +00:00
seq_printf ( p , " %*d: " , prec , i ) ;
2008-10-16 11:32:24 +02:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , kstat_irqs_cpu ( i , j ) ) ;
2010-10-08 20:24:58 +02:00
seq_printf ( p , " %8s " , desc - > irq_data . chip - > name ) ;
2008-10-16 11:32:24 +02:00
seq_printf ( p , " -%-8s " , desc - > name ) ;
if ( action ) {
seq_printf ( p , " %s " , action - > name ) ;
while ( ( action = action - > next ) ! = NULL )
seq_printf ( p , " , %s " , action - > name ) ;
}
seq_putc ( p , ' \n ' ) ;
out :
2009-11-17 16:46:45 +01:00
raw_spin_unlock_irqrestore ( & desc - > lock , flags ) ;
2008-10-16 11:32:24 +02:00
return 0 ;
}
/*
* / proc / stat helpers
*/
u64 arch_irq_stat_cpu ( unsigned int cpu )
{
u64 sum = irq_stats ( cpu ) - > __nmi_count ;
# ifdef CONFIG_X86_LOCAL_APIC
sum + = irq_stats ( cpu ) - > apic_timer_irqs ;
2009-03-23 02:08:34 +05:30
sum + = irq_stats ( cpu ) - > irq_spurious_count ;
2008-12-03 10:39:53 +01:00
sum + = irq_stats ( cpu ) - > apic_perf_irqs ;
2010-10-14 14:01:34 +08:00
sum + = irq_stats ( cpu ) - > apic_irq_work_irqs ;
2008-10-16 11:32:24 +02:00
# endif
2009-10-14 09:22:57 -05:00
if ( x86_platform_ipi_callback )
sum + = irq_stats ( cpu ) - > x86_platform_ipis ;
2008-10-16 11:32:24 +02:00
# ifdef CONFIG_SMP
sum + = irq_stats ( cpu ) - > irq_resched_count ;
sum + = irq_stats ( cpu ) - > irq_call_count ;
sum + = irq_stats ( cpu ) - > irq_tlb_count ;
# endif
2009-11-20 14:03:05 +00:00
# ifdef CONFIG_X86_THERMAL_VECTOR
2008-10-16 11:32:24 +02:00
sum + = irq_stats ( cpu ) - > irq_thermal_count ;
2009-11-20 14:03:05 +00:00
# endif
# ifdef CONFIG_X86_MCE_THRESHOLD
2008-10-16 11:32:24 +02:00
sum + = irq_stats ( cpu ) - > irq_threshold_count ;
2009-06-02 16:53:23 +09:00
# endif
2009-07-09 00:31:41 +02:00
# ifdef CONFIG_X86_MCE
2009-06-02 16:53:23 +09:00
sum + = per_cpu ( mce_exception_count , cpu ) ;
sum + = per_cpu ( mce_poll_count , cpu ) ;
2008-10-16 11:32:24 +02:00
# endif
return sum ;
}
u64 arch_irq_stat ( void )
{
u64 sum = atomic_read ( & irq_err_count ) ;
# ifdef CONFIG_X86_IO_APIC
sum + = atomic_read ( & irq_mis_count ) ;
# endif
return sum ;
}
2008-12-23 15:15:17 +01:00
2009-02-06 14:09:41 -08:00
/*
* do_IRQ handles all normal device IRQ ' s ( the special
* SMP cross - CPU interrupts have their own specific
* handlers ) .
*/
unsigned int __irq_entry do_IRQ ( struct pt_regs * regs )
{
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
/* high bit used in ret_from_ code */
unsigned vector = ~ regs - > orig_ax ;
unsigned irq ;
exit_idle ( ) ;
irq_enter ( ) ;
2010-12-18 16:28:55 +01:00
irq = __this_cpu_read ( vector_irq [ vector ] ) ;
2009-02-06 14:09:41 -08:00
if ( ! handle_irq ( irq , regs ) ) {
2009-04-12 20:47:41 +04:00
ack_APIC_irq ( ) ;
2009-02-06 14:09:41 -08:00
if ( printk_ratelimit ( ) )
2009-04-12 20:47:39 +04:00
pr_emerg ( " %s: %d.%d No irq handler for vector (irq %d) \n " ,
__func__ , smp_processor_id ( ) , vector , irq ) ;
2009-02-06 14:09:41 -08:00
}
irq_exit ( ) ;
set_irq_regs ( old_regs ) ;
return 1 ;
}
2009-03-04 12:56:05 -06:00
/*
2009-10-14 09:22:57 -05:00
* Handler for X86_PLATFORM_IPI_VECTOR .
2009-03-04 12:56:05 -06:00
*/
2009-10-14 09:22:57 -05:00
void smp_x86_platform_ipi ( struct pt_regs * regs )
2009-03-04 12:56:05 -06:00
{
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
ack_APIC_irq ( ) ;
exit_idle ( ) ;
irq_enter ( ) ;
2009-10-14 09:22:57 -05:00
inc_irq_stat ( x86_platform_ipis ) ;
2009-03-04 12:56:05 -06:00
2009-10-14 09:22:57 -05:00
if ( x86_platform_ipi_callback )
x86_platform_ipi_callback ( ) ;
2009-03-04 12:56:05 -06:00
irq_exit ( ) ;
set_irq_regs ( old_regs ) ;
}
2008-12-23 15:15:17 +01:00
EXPORT_SYMBOL_GPL ( vector_used_by_percpu_irq ) ;
2009-10-26 14:24:31 -08:00
2010-11-12 05:45:26 +00:00
# ifdef CONFIG_OF
unsigned int irq_create_of_mapping ( struct device_node * controller ,
const u32 * intspec , unsigned int intsize )
{
return intspec [ 0 ] ;
}
EXPORT_SYMBOL_GPL ( irq_create_of_mapping ) ;
# endif
2009-10-26 14:24:31 -08:00
# ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs ( void )
{
2009-10-26 14:24:36 -08:00
unsigned int irq , vector ;
2009-10-26 14:24:31 -08:00
static int warned ;
struct irq_desc * desc ;
2010-10-08 20:24:58 +02:00
struct irq_data * data ;
2009-10-26 14:24:31 -08:00
for_each_irq_desc ( irq , desc ) {
int break_affinity = 0 ;
int set_affinity = 1 ;
const struct cpumask * affinity ;
if ( ! desc )
continue ;
if ( irq = = 2 )
continue ;
/* interrupt's are disabled at this point */
2009-11-17 16:46:45 +01:00
raw_spin_lock ( & desc - > lock ) ;
2009-10-26 14:24:31 -08:00
2010-10-08 20:24:58 +02:00
data = & desc - > irq_data ;
affinity = data - > affinity ;
2009-10-26 14:24:31 -08:00
if ( ! irq_has_action ( irq ) | |
cpumask_equal ( affinity , cpu_online_mask ) ) {
2009-11-17 16:46:45 +01:00
raw_spin_unlock ( & desc - > lock ) ;
2009-10-26 14:24:31 -08:00
continue ;
}
2009-10-26 14:24:34 -08:00
/*
* Complete the irq move . This cpu is going down and for
* non intr - remapping case , we can ' t wait till this interrupt
* arrives at this cpu before completing the irq move .
*/
irq_force_complete_move ( irq ) ;
2009-10-26 14:24:31 -08:00
if ( cpumask_any_and ( affinity , cpu_online_mask ) > = nr_cpu_ids ) {
break_affinity = 1 ;
affinity = cpu_all_mask ;
}
2010-10-08 20:24:58 +02:00
if ( ! ( desc - > status & IRQ_MOVE_PCNTXT ) & & data - > chip - > irq_mask )
data - > chip - > irq_mask ( data ) ;
2009-10-26 14:24:31 -08:00
2010-10-08 20:24:58 +02:00
if ( data - > chip - > irq_set_affinity )
data - > chip - > irq_set_affinity ( data , affinity , true ) ;
2009-10-26 14:24:31 -08:00
else if ( ! ( warned + + ) )
set_affinity = 0 ;
2010-10-08 20:24:58 +02:00
if ( ! ( desc - > status & IRQ_MOVE_PCNTXT ) & & data - > chip - > irq_unmask )
data - > chip - > irq_unmask ( data ) ;
2009-10-26 14:24:31 -08:00
2009-11-17 16:46:45 +01:00
raw_spin_unlock ( & desc - > lock ) ;
2009-10-26 14:24:31 -08:00
if ( break_affinity & & set_affinity )
printk ( " Broke affinity for irq %i \n " , irq ) ;
else if ( ! set_affinity )
printk ( " Cannot set affinity for irq %i \n " , irq ) ;
}
2009-10-26 14:24:36 -08:00
/*
* We can remove mdelay ( ) and then send spuriuous interrupts to
* new cpu targets for all the irqs that were handled previously by
* this cpu . While it works , I have seen spurious interrupt messages
* ( nothing wrong but still . . . ) .
*
* So for now , retain mdelay ( 1 ) and check the IRR and then send those
* interrupts to new targets as this cpu is already offlined . . .
*/
2009-10-26 14:24:31 -08:00
mdelay ( 1 ) ;
2009-10-26 14:24:36 -08:00
for ( vector = FIRST_EXTERNAL_VECTOR ; vector < NR_VECTORS ; vector + + ) {
unsigned int irr ;
2010-12-18 16:28:55 +01:00
if ( __this_cpu_read ( vector_irq [ vector ] ) < 0 )
2009-10-26 14:24:36 -08:00
continue ;
irr = apic_read ( APIC_IRR + ( vector / 32 * 0x10 ) ) ;
if ( irr & ( 1 < < ( vector % 32 ) ) ) {
2010-12-18 16:28:55 +01:00
irq = __this_cpu_read ( vector_irq [ vector ] ) ;
2009-10-26 14:24:36 -08:00
2010-10-08 20:24:58 +02:00
data = irq_get_irq_data ( irq ) ;
2009-11-17 16:46:45 +01:00
raw_spin_lock ( & desc - > lock ) ;
2010-10-08 20:24:58 +02:00
if ( data - > chip - > irq_retrigger )
data - > chip - > irq_retrigger ( data ) ;
2009-11-17 16:46:45 +01:00
raw_spin_unlock ( & desc - > lock ) ;
2009-10-26 14:24:36 -08:00
}
}
2009-10-26 14:24:31 -08:00
}
# endif