2005-04-17 02:20:36 +04:00
/*
* linux / arch / arm / kernel / irq . c
*
* Copyright ( C ) 1992 Linus Torvalds
* Modifications for ARM processor Copyright ( C ) 1995 - 2000 Russell King .
*
2005-06-25 22:39:45 +04:00
* Support for Dynamic Tick Timer Copyright ( C ) 2004 - 2005 Nokia Corporation .
* Dynamic Tick Timer written by Tony Lindgren < tony @ atomide . com > and
* Tuukka Tikkanen < tuukka . tikkanen @ elektrobit . com > .
*
2005-04-17 02:20:36 +04:00
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This file contains the code used by various IRQ handling routines :
* asking for different IRQ ' s should be done through these routines
* instead of just grabbing them . Thus setups with different IRQ numbers
* shouldn ' t result in any weird surprises , and installing new handlers
* should be easier .
*
* IRQ ' s are in fact implemented a bit like signal handlers for the kernel .
* Naturally it ' s not a 1 : 1 relation , but there are similarities .
*/
# include <linux/kernel_stat.h>
# include <linux/module.h>
# include <linux/signal.h>
# include <linux/ioport.h>
# include <linux/interrupt.h>
2006-07-02 01:30:09 +04:00
# include <linux/irq.h>
2005-04-17 02:20:36 +04:00
# include <linux/random.h>
# include <linux/smp.h>
# include <linux/init.h>
# include <linux/seq_file.h>
# include <linux/errno.h>
# include <linux/list.h>
# include <linux/kallsyms.h>
# include <linux/proc_fs.h>
2010-10-07 19:21:58 +04:00
# include <linux/ftrace.h>
2005-04-17 02:20:36 +04:00
# include <asm/system.h>
2010-12-20 13:18:36 +03:00
# include <asm/mach/arch.h>
2008-08-03 18:04:04 +04:00
# include <asm/mach/irq.h>
2005-06-25 22:39:45 +04:00
# include <asm/mach/time.h>
2005-04-17 02:20:36 +04:00
/*
* No architecture - specific irq_finish function defined in arm / arch / irqs . h .
*/
# ifndef irq_finish
# define irq_finish(irq) do { } while (0)
# endif
2006-07-02 01:30:09 +04:00
unsigned long irq_err_count ;
2005-04-17 02:20:36 +04:00
2011-03-24 14:02:11 +03:00
int arch_show_interrupts ( struct seq_file * p , int prec )
2005-04-17 02:20:36 +04:00
{
2009-08-03 18:11:29 +04:00
# ifdef CONFIG_FIQ
2011-03-24 14:02:11 +03:00
show_fiq_list ( p , prec ) ;
2005-04-17 02:20:36 +04:00
# endif
# ifdef CONFIG_SMP
2011-03-24 14:02:11 +03:00
show_ipi_list ( p , prec ) ;
2010-11-15 16:38:06 +03:00
# endif
# ifdef CONFIG_LOCAL_TIMERS
2011-03-24 14:02:11 +03:00
show_local_irqs ( p , prec ) ;
2005-04-17 02:20:36 +04:00
# endif
2011-03-24 14:02:11 +03:00
seq_printf ( p , " %*s: %10lu \n " , prec , " Err " , irq_err_count ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
/*
* do_IRQ handles all hardware IRQ ' s . Decoded IRQs should not
* come via this function . Instead , they should provide their
* own ' handler '
*/
2010-10-07 19:21:58 +04:00
asmlinkage void __exception_irq_entry
asm_do_IRQ ( unsigned int irq , struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
2006-10-07 00:11:15 +04:00
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2008-10-09 16:36:24 +04:00
irq_enter ( ) ;
2005-04-17 02:20:36 +04:00
/*
* Some hardware gives randomly wrong interrupts . Rather
* than crashing , do something sensible .
*/
2010-06-25 12:46:09 +04:00
if ( unlikely ( irq > = nr_irqs ) ) {
2009-06-22 12:23:36 +04:00
if ( printk_ratelimit ( ) )
printk ( KERN_WARNING " Bad IRQ%u \n " , irq ) ;
ack_bad_irq ( irq ) ;
} else {
2008-10-09 16:36:24 +04:00
generic_handle_irq ( irq ) ;
2009-06-22 12:23:36 +04:00
}
2005-04-17 02:20:36 +04:00
2006-07-02 01:30:09 +04:00
/* AT91 specific workaround */
2005-04-17 02:20:36 +04:00
irq_finish ( irq ) ;
irq_exit ( ) ;
2006-10-07 00:11:15 +04:00
set_irq_regs ( old_regs ) ;
2005-04-17 02:20:36 +04:00
}
void set_irq_flags ( unsigned int irq , unsigned int iflags )
{
2011-02-08 00:30:49 +03:00
unsigned long clr = 0 , set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN ;
2005-04-17 02:20:36 +04:00
2010-06-25 12:46:09 +04:00
if ( irq > = nr_irqs ) {
2005-04-17 02:20:36 +04:00
printk ( KERN_ERR " Trying to set irq flags for IRQ%d \n " , irq ) ;
return ;
}
2006-07-02 01:30:09 +04:00
if ( iflags & IRQF_VALID )
2011-02-08 00:30:49 +03:00
clr | = IRQ_NOREQUEST ;
2006-07-02 01:30:09 +04:00
if ( iflags & IRQF_PROBE )
2011-02-08 00:30:49 +03:00
clr | = IRQ_NOPROBE ;
2006-07-02 01:30:09 +04:00
if ( ! ( iflags & IRQF_NOAUTOEN ) )
2011-02-08 00:30:49 +03:00
clr | = IRQ_NOAUTOEN ;
/* Order is clear bits in "clr" then set bits in "set" */
irq_modify_status ( irq , clr , set & ~ clr ) ;
2005-04-17 02:20:36 +04:00
}
void __init init_IRQ ( void )
{
2010-12-20 13:18:36 +03:00
machine_desc - > init_irq ( ) ;
2005-04-17 02:20:36 +04:00
}
2010-06-25 12:46:09 +04:00
# ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs ( void )
{
2010-12-20 13:18:36 +03:00
nr_irqs = machine_desc - > nr_irqs ? machine_desc - > nr_irqs : NR_IRQS ;
2010-09-27 22:55:03 +04:00
return nr_irqs ;
2010-06-25 12:46:09 +04:00
}
# endif
2005-11-03 01:24:33 +03:00
# ifdef CONFIG_HOTPLUG_CPU
2006-07-12 01:54:34 +04:00
2011-01-23 15:09:36 +03:00
static bool migrate_one_irq ( struct irq_data * d )
2006-07-12 01:54:34 +04:00
{
2011-01-23 15:09:36 +03:00
unsigned int cpu = cpumask_any_and ( d - > affinity , cpu_online_mask ) ;
bool ret = false ;
2006-07-12 01:54:34 +04:00
2011-01-23 15:09:36 +03:00
if ( cpu > = nr_cpu_ids ) {
cpu = cpumask_any ( cpu_online_mask ) ;
ret = true ;
}
pr_debug ( " IRQ%u: moving from cpu%u to cpu%u \n " , d - > irq , d - > node , cpu ) ;
d - > chip - > irq_set_affinity ( d , cpumask_of ( cpu ) , true ) ;
return ret ;
2006-07-12 01:54:34 +04:00
}
2005-11-03 01:24:33 +03:00
/*
* The CPU has been marked offline . Migrate IRQs off this CPU . If
* the affinity settings do not allow other CPUs , force them onto any
* available CPU .
*/
void migrate_irqs ( void )
{
unsigned int i , cpu = smp_processor_id ( ) ;
2010-06-25 12:46:09 +04:00
struct irq_desc * desc ;
2011-01-23 15:09:36 +03:00
unsigned long flags ;
local_irq_save ( flags ) ;
2005-11-03 01:24:33 +03:00
2010-06-25 12:46:09 +04:00
for_each_irq_desc ( i , desc ) {
2010-11-29 12:21:48 +03:00
struct irq_data * d = & desc - > irq_data ;
2011-01-23 15:09:36 +03:00
bool affinity_broken = false ;
2010-11-29 12:21:48 +03:00
2011-01-23 15:09:36 +03:00
raw_spin_lock ( & desc - > lock ) ;
do {
if ( desc - > action = = NULL )
break ;
2005-11-03 01:24:33 +03:00
2011-01-23 15:09:36 +03:00
if ( d - > node ! = cpu )
break ;
2005-11-03 01:24:33 +03:00
2011-01-23 15:09:36 +03:00
affinity_broken = migrate_one_irq ( d ) ;
} while ( 0 ) ;
raw_spin_unlock ( & desc - > lock ) ;
if ( affinity_broken & & printk_ratelimit ( ) )
pr_warning ( " IRQ%u no longer affine to CPU%u \n " , i , cpu ) ;
2005-11-03 01:24:33 +03:00
}
2011-01-23 15:09:36 +03:00
local_irq_restore ( flags ) ;
2005-11-03 01:24:33 +03:00
}
# endif /* CONFIG_HOTPLUG_CPU */