2008-10-16 13:32:24 +04:00
/*
* Common interrupt code for 32 and 64 bit
*/
# include <linux/cpu.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
2010-11-12 08:45:26 +03:00
# include <linux/of.h>
2008-10-16 13:32:24 +04:00
# include <linux/seq_file.h>
2009-01-04 13:52:17 +03:00
# include <linux/smp.h>
2009-02-07 01:09:41 +03:00
# include <linux/ftrace.h>
2011-03-25 17:20:14 +03:00
# include <linux/delay.h>
2008-10-16 13:32:24 +04:00
2009-02-17 15:58:15 +03:00
# include <asm/apic.h>
2008-10-16 13:32:24 +04:00
# include <asm/io_apic.h>
2008-12-23 17:15:17 +03:00
# include <asm/irq.h>
2009-02-07 01:09:41 +03:00
# include <asm/idle.h>
2009-05-27 23:56:52 +04:00
# include <asm/mce.h>
2009-04-10 22:33:10 +04:00
# include <asm/hw_irq.h>
2008-10-16 13:32:24 +04:00
atomic_t irq_err_count ;
2009-03-04 21:56:05 +03:00
/* Function pointer for generic interrupt vector handling */
2009-10-14 18:22:57 +04:00
void ( * x86_platform_ipi_callback ) ( void ) = NULL ;
2009-03-04 21:56:05 +03:00
2008-10-16 14:18:50 +04:00
/*
* ' what should we do if we get a hw irq event on an illegal vector ' .
* each architecture has to answer this themselves .
*/
void ack_bad_irq ( unsigned int irq )
{
2009-04-12 20:47:39 +04:00
if ( printk_ratelimit ( ) )
pr_err ( " unexpected IRQ trap at vector %02x \n " , irq ) ;
2008-10-16 14:18:50 +04:00
/*
* Currently unexpected vectors happen only on SMP and APIC .
* We _must_ ack these because every local APIC has only N
* irq slots per priority level , and a ' hanging , unacked ' IRQ
* holds up an irq slot - in excessive cases ( when multiple
* unexpected vectors occur ) that might lock up the APIC
* completely .
* But only ack when the APIC is enabled - AK
*/
2009-04-12 20:47:41 +04:00
ack_APIC_irq ( ) ;
2008-10-16 14:18:50 +04:00
}
2009-01-18 18:38:57 +03:00
# define irq_stats(x) (&per_cpu(irq_stat, x))
2008-10-16 13:32:24 +04:00
/*
2010-12-16 19:59:57 +03:00
* / proc / interrupts printing for arch specific interrupts
2008-10-16 13:32:24 +04:00
*/
2010-12-16 19:59:57 +03:00
int arch_show_interrupts ( struct seq_file * p , int prec )
2008-10-16 13:32:24 +04:00
{
int j ;
2009-03-12 15:45:15 +03:00
seq_printf ( p , " %*s: " , prec , " NMI " ) ;
2008-10-16 13:32:24 +04:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > __nmi_count ) ;
seq_printf ( p , " Non-maskable interrupts \n " ) ;
# ifdef CONFIG_X86_LOCAL_APIC
2009-03-12 15:45:15 +03:00
seq_printf ( p , " %*s: " , prec , " LOC " ) ;
2008-10-16 13:32:24 +04:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > apic_timer_irqs ) ;
seq_printf ( p , " Local timer interrupts \n " ) ;
2009-03-22 23:38:34 +03:00
seq_printf ( p , " %*s: " , prec , " SPU " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_spurious_count ) ;
seq_printf ( p , " Spurious interrupts \n " ) ;
2009-10-14 14:50:39 +04:00
seq_printf ( p , " %*s: " , prec , " PMI " ) ;
2008-12-03 12:39:53 +03:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > apic_perf_irqs ) ;
2009-10-14 14:50:39 +04:00
seq_printf ( p , " Performance monitoring interrupts \n " ) ;
2010-10-14 10:01:34 +04:00
seq_printf ( p , " %*s: " , prec , " IWI " ) ;
2009-04-06 13:45:03 +04:00
for_each_online_cpu ( j )
2010-10-14 10:01:34 +04:00
seq_printf ( p , " %10u " , irq_stats ( j ) - > apic_irq_work_irqs ) ;
seq_printf ( p , " IRQ work interrupts \n " ) ;
2008-10-16 13:32:24 +04:00
# endif
2009-10-14 18:22:57 +04:00
if ( x86_platform_ipi_callback ) {
2009-03-25 04:50:34 +03:00
seq_printf ( p , " %*s: " , prec , " PLT " ) ;
2009-03-04 21:56:05 +03:00
for_each_online_cpu ( j )
2009-10-14 18:22:57 +04:00
seq_printf ( p , " %10u " , irq_stats ( j ) - > x86_platform_ipis ) ;
2009-03-04 21:56:05 +03:00
seq_printf ( p , " Platform interrupts \n " ) ;
}
2008-10-16 13:32:24 +04:00
# ifdef CONFIG_SMP
2009-03-12 15:45:15 +03:00
seq_printf ( p , " %*s: " , prec , " RES " ) ;
2008-10-16 13:32:24 +04:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_resched_count ) ;
seq_printf ( p , " Rescheduling interrupts \n " ) ;
2009-03-12 15:45:15 +03:00
seq_printf ( p , " %*s: " , prec , " CAL " ) ;
2008-10-16 13:32:24 +04:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_call_count ) ;
seq_printf ( p , " Function call interrupts \n " ) ;
2009-03-12 15:45:15 +03:00
seq_printf ( p , " %*s: " , prec , " TLB " ) ;
2008-10-16 13:32:24 +04:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_tlb_count ) ;
seq_printf ( p , " TLB shootdowns \n " ) ;
# endif
2009-11-20 17:03:05 +03:00
# ifdef CONFIG_X86_THERMAL_VECTOR
2009-03-12 15:45:15 +03:00
seq_printf ( p , " %*s: " , prec , " TRM " ) ;
2008-10-16 13:32:24 +04:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_thermal_count ) ;
seq_printf ( p , " Thermal event interrupts \n " ) ;
2009-11-20 17:03:05 +03:00
# endif
# ifdef CONFIG_X86_MCE_THRESHOLD
2009-03-12 15:45:15 +03:00
seq_printf ( p , " %*s: " , prec , " THR " ) ;
2008-10-16 13:32:24 +04:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , irq_stats ( j ) - > irq_threshold_count ) ;
seq_printf ( p , " Threshold APIC interrupts \n " ) ;
2009-05-27 23:56:52 +04:00
# endif
2009-07-09 02:31:41 +04:00
# ifdef CONFIG_X86_MCE
2009-05-27 23:56:52 +04:00
seq_printf ( p , " %*s: " , prec , " MCE " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , per_cpu ( mce_exception_count , j ) ) ;
seq_printf ( p , " Machine check exceptions \n " ) ;
2009-05-27 23:56:57 +04:00
seq_printf ( p , " %*s: " , prec , " MCP " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , per_cpu ( mce_poll_count , j ) ) ;
seq_printf ( p , " Machine check polls \n " ) ;
2008-10-16 13:32:24 +04:00
# endif
2009-03-12 15:45:15 +03:00
seq_printf ( p , " %*s: %10u \n " , prec , " ERR " , atomic_read ( & irq_err_count ) ) ;
2008-10-16 13:32:24 +04:00
# if defined(CONFIG_X86_IO_APIC)
2009-03-12 15:45:15 +03:00
seq_printf ( p , " %*s: %10u \n " , prec , " MIS " , atomic_read ( & irq_mis_count ) ) ;
2008-10-16 13:32:24 +04:00
# endif
return 0 ;
}
/*
* / proc / stat helpers
*/
u64 arch_irq_stat_cpu ( unsigned int cpu )
{
u64 sum = irq_stats ( cpu ) - > __nmi_count ;
# ifdef CONFIG_X86_LOCAL_APIC
sum + = irq_stats ( cpu ) - > apic_timer_irqs ;
2009-03-22 23:38:34 +03:00
sum + = irq_stats ( cpu ) - > irq_spurious_count ;
2008-12-03 12:39:53 +03:00
sum + = irq_stats ( cpu ) - > apic_perf_irqs ;
2010-10-14 10:01:34 +04:00
sum + = irq_stats ( cpu ) - > apic_irq_work_irqs ;
2008-10-16 13:32:24 +04:00
# endif
2009-10-14 18:22:57 +04:00
if ( x86_platform_ipi_callback )
sum + = irq_stats ( cpu ) - > x86_platform_ipis ;
2008-10-16 13:32:24 +04:00
# ifdef CONFIG_SMP
sum + = irq_stats ( cpu ) - > irq_resched_count ;
sum + = irq_stats ( cpu ) - > irq_call_count ;
sum + = irq_stats ( cpu ) - > irq_tlb_count ;
# endif
2009-11-20 17:03:05 +03:00
# ifdef CONFIG_X86_THERMAL_VECTOR
2008-10-16 13:32:24 +04:00
sum + = irq_stats ( cpu ) - > irq_thermal_count ;
2009-11-20 17:03:05 +03:00
# endif
# ifdef CONFIG_X86_MCE_THRESHOLD
2008-10-16 13:32:24 +04:00
sum + = irq_stats ( cpu ) - > irq_threshold_count ;
2009-06-02 11:53:23 +04:00
# endif
2009-07-09 02:31:41 +04:00
# ifdef CONFIG_X86_MCE
2009-06-02 11:53:23 +04:00
sum + = per_cpu ( mce_exception_count , cpu ) ;
sum + = per_cpu ( mce_poll_count , cpu ) ;
2008-10-16 13:32:24 +04:00
# endif
return sum ;
}
u64 arch_irq_stat ( void )
{
u64 sum = atomic_read ( & irq_err_count ) ;
# ifdef CONFIG_X86_IO_APIC
sum + = atomic_read ( & irq_mis_count ) ;
# endif
return sum ;
}
2008-12-23 17:15:17 +03:00
2009-02-07 01:09:41 +03:00
/*
* do_IRQ handles all normal device IRQ ' s ( the special
* SMP cross - CPU interrupts have their own specific
* handlers ) .
*/
unsigned int __irq_entry do_IRQ ( struct pt_regs * regs )
{
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
/* high bit used in ret_from_ code */
unsigned vector = ~ regs - > orig_ax ;
unsigned irq ;
exit_idle ( ) ;
irq_enter ( ) ;
2010-12-18 18:28:55 +03:00
irq = __this_cpu_read ( vector_irq [ vector ] ) ;
2009-02-07 01:09:41 +03:00
if ( ! handle_irq ( irq , regs ) ) {
2009-04-12 20:47:41 +04:00
ack_APIC_irq ( ) ;
2009-02-07 01:09:41 +03:00
if ( printk_ratelimit ( ) )
2009-04-12 20:47:39 +04:00
pr_emerg ( " %s: %d.%d No irq handler for vector (irq %d) \n " ,
__func__ , smp_processor_id ( ) , vector , irq ) ;
2009-02-07 01:09:41 +03:00
}
irq_exit ( ) ;
set_irq_regs ( old_regs ) ;
return 1 ;
}
2009-03-04 21:56:05 +03:00
/*
2009-10-14 18:22:57 +04:00
* Handler for X86_PLATFORM_IPI_VECTOR .
2009-03-04 21:56:05 +03:00
*/
2009-10-14 18:22:57 +04:00
void smp_x86_platform_ipi ( struct pt_regs * regs )
2009-03-04 21:56:05 +03:00
{
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
ack_APIC_irq ( ) ;
exit_idle ( ) ;
irq_enter ( ) ;
2009-10-14 18:22:57 +04:00
inc_irq_stat ( x86_platform_ipis ) ;
2009-03-04 21:56:05 +03:00
2009-10-14 18:22:57 +04:00
if ( x86_platform_ipi_callback )
x86_platform_ipi_callback ( ) ;
2009-03-04 21:56:05 +03:00
irq_exit ( ) ;
set_irq_regs ( old_regs ) ;
}
2008-12-23 17:15:17 +03:00
EXPORT_SYMBOL_GPL ( vector_used_by_percpu_irq ) ;
2009-10-27 01:24:31 +03:00
# ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs ( void )
{
2009-10-27 01:24:36 +03:00
unsigned int irq , vector ;
2009-10-27 01:24:31 +03:00
static int warned ;
struct irq_desc * desc ;
2010-10-08 22:24:58 +04:00
struct irq_data * data ;
2011-02-10 23:40:36 +03:00
struct irq_chip * chip ;
2009-10-27 01:24:31 +03:00
for_each_irq_desc ( irq , desc ) {
int break_affinity = 0 ;
int set_affinity = 1 ;
const struct cpumask * affinity ;
if ( ! desc )
continue ;
if ( irq = = 2 )
continue ;
/* interrupt's are disabled at this point */
2009-11-17 18:46:45 +03:00
raw_spin_lock ( & desc - > lock ) ;
2009-10-27 01:24:31 +03:00
2011-02-10 23:40:36 +03:00
data = irq_desc_get_irq_data ( desc ) ;
2010-10-08 22:24:58 +04:00
affinity = data - > affinity ;
2011-05-06 10:43:36 +04:00
if ( ! irq_has_action ( irq ) | | irqd_is_per_cpu ( data ) | |
2011-02-17 18:54:26 +03:00
cpumask_subset ( affinity , cpu_online_mask ) ) {
2009-11-17 18:46:45 +03:00
raw_spin_unlock ( & desc - > lock ) ;
2009-10-27 01:24:31 +03:00
continue ;
}
2009-10-27 01:24:34 +03:00
/*
* Complete the irq move . This cpu is going down and for
* non intr - remapping case , we can ' t wait till this interrupt
* arrives at this cpu before completing the irq move .
*/
irq_force_complete_move ( irq ) ;
2009-10-27 01:24:31 +03:00
if ( cpumask_any_and ( affinity , cpu_online_mask ) > = nr_cpu_ids ) {
break_affinity = 1 ;
affinity = cpu_all_mask ;
}
2011-02-10 23:40:36 +03:00
chip = irq_data_get_irq_chip ( data ) ;
if ( ! irqd_can_move_in_process_context ( data ) & & chip - > irq_mask )
chip - > irq_mask ( data ) ;
2009-10-27 01:24:31 +03:00
2011-02-10 23:40:36 +03:00
if ( chip - > irq_set_affinity )
chip - > irq_set_affinity ( data , affinity , true ) ;
2009-10-27 01:24:31 +03:00
else if ( ! ( warned + + ) )
set_affinity = 0 ;
2011-05-06 10:43:56 +04:00
if ( ! irqd_can_move_in_process_context ( data ) & &
! irqd_irq_disabled ( data ) & & chip - > irq_unmask )
2011-02-10 23:40:36 +03:00
chip - > irq_unmask ( data ) ;
2009-10-27 01:24:31 +03:00
2009-11-17 18:46:45 +03:00
raw_spin_unlock ( & desc - > lock ) ;
2009-10-27 01:24:31 +03:00
if ( break_affinity & & set_affinity )
printk ( " Broke affinity for irq %i \n " , irq ) ;
else if ( ! set_affinity )
printk ( " Cannot set affinity for irq %i \n " , irq ) ;
}
2009-10-27 01:24:36 +03:00
/*
* We can remove mdelay ( ) and then send spuriuous interrupts to
* new cpu targets for all the irqs that were handled previously by
* this cpu . While it works , I have seen spurious interrupt messages
* ( nothing wrong but still . . . ) .
*
* So for now , retain mdelay ( 1 ) and check the IRR and then send those
* interrupts to new targets as this cpu is already offlined . . .
*/
2009-10-27 01:24:31 +03:00
mdelay ( 1 ) ;
2009-10-27 01:24:36 +03:00
for ( vector = FIRST_EXTERNAL_VECTOR ; vector < NR_VECTORS ; vector + + ) {
unsigned int irr ;
2010-12-18 18:28:55 +03:00
if ( __this_cpu_read ( vector_irq [ vector ] ) < 0 )
2009-10-27 01:24:36 +03:00
continue ;
irr = apic_read ( APIC_IRR + ( vector / 32 * 0x10 ) ) ;
if ( irr & ( 1 < < ( vector % 32 ) ) ) {
2010-12-18 18:28:55 +03:00
irq = __this_cpu_read ( vector_irq [ vector ] ) ;
2009-10-27 01:24:36 +03:00
2011-02-12 13:51:03 +03:00
desc = irq_to_desc ( irq ) ;
2011-02-10 23:40:36 +03:00
data = irq_desc_get_irq_data ( desc ) ;
chip = irq_data_get_irq_chip ( data ) ;
2009-11-17 18:46:45 +03:00
raw_spin_lock ( & desc - > lock ) ;
2011-02-10 23:40:36 +03:00
if ( chip - > irq_retrigger )
chip - > irq_retrigger ( data ) ;
2009-11-17 18:46:45 +03:00
raw_spin_unlock ( & desc - > lock ) ;
2009-10-27 01:24:36 +03:00
}
}
2009-10-27 01:24:31 +03:00
}
# endif