2005-04-17 02:20:36 +04:00
/*
* linux / arch / alpha / kernel / irq . c
*
* Copyright ( C ) 1995 Linus Torvalds
*
* This file contains the code used by various IRQ handling routines :
* asking for different IRQ ' s should be done through these routines
* instead of just grabbing them . Thus setups with different IRQ numbers
* shouldn ' t result in any weird surprises , and installing new handlers
* should be easier .
*/
# include <linux/config.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/errno.h>
# include <linux/kernel_stat.h>
# include <linux/signal.h>
# include <linux/sched.h>
# include <linux/ptrace.h>
# include <linux/interrupt.h>
# include <linux/slab.h>
# include <linux/random.h>
# include <linux/init.h>
# include <linux/irq.h>
# include <linux/proc_fs.h>
# include <linux/seq_file.h>
# include <linux/profile.h>
# include <linux/bitops.h>
# include <asm/system.h>
# include <asm/io.h>
# include <asm/uaccess.h>
volatile unsigned long irq_err_count ;
2006-01-06 11:12:22 +03:00
void ack_bad_irq ( unsigned int irq )
2005-04-17 02:20:36 +04:00
{
irq_err_count + + ;
printk ( KERN_CRIT " Unexpected IRQ trap at vector %u \n " , irq ) ;
}
# ifdef CONFIG_SMP
static char irq_user_affinity [ NR_IRQS ] ;
2006-01-06 11:12:22 +03:00
int
select_smp_affinity ( unsigned int irq )
2005-04-17 02:20:36 +04:00
{
static int last_cpu ;
int cpu = last_cpu + 1 ;
2006-01-06 11:12:22 +03:00
if ( ! irq_desc [ irq ] . handler - > set_affinity | | irq_user_affinity [ irq ] )
return 1 ;
2005-04-17 02:20:36 +04:00
while ( ! cpu_possible ( cpu ) )
cpu = ( cpu < ( NR_CPUS - 1 ) ? cpu + 1 : 0 ) ;
last_cpu = cpu ;
irq_affinity [ irq ] = cpumask_of_cpu ( cpu ) ;
irq_desc [ irq ] . handler - > set_affinity ( irq , cpumask_of_cpu ( cpu ) ) ;
2006-01-06 11:12:22 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
# endif /* CONFIG_SMP */
int
show_interrupts ( struct seq_file * p , void * v )
{
# ifdef CONFIG_SMP
int j ;
# endif
2006-02-01 14:06:13 +03:00
int irq = * ( loff_t * ) v ;
2005-04-17 02:20:36 +04:00
struct irqaction * action ;
unsigned long flags ;
# ifdef CONFIG_SMP
2006-02-01 14:06:13 +03:00
if ( irq = = 0 ) {
2005-04-17 02:20:36 +04:00
seq_puts ( p , " " ) ;
2006-02-01 14:06:13 +03:00
for_each_online_cpu ( j )
seq_printf ( p , " CPU%d " , j ) ;
2005-04-17 02:20:36 +04:00
seq_putc ( p , ' \n ' ) ;
}
# endif
2006-02-01 14:06:13 +03:00
if ( irq < ACTUAL_NR_IRQS ) {
spin_lock_irqsave ( & irq_desc [ irq ] . lock , flags ) ;
action = irq_desc [ irq ] . action ;
2005-04-17 02:20:36 +04:00
if ( ! action )
goto unlock ;
2006-02-01 14:06:13 +03:00
seq_printf ( p , " %3d: " , irq ) ;
2005-04-17 02:20:36 +04:00
# ifndef CONFIG_SMP
2006-02-01 14:06:13 +03:00
seq_printf ( p , " %10u " , kstat_irqs ( irq ) ) ;
2005-04-17 02:20:36 +04:00
# else
2006-02-01 14:06:13 +03:00
for_each_online_cpu ( j )
seq_printf ( p , " %10u " , kstat_cpu ( j ) . irqs [ irq ] ) ;
2005-04-17 02:20:36 +04:00
# endif
2006-02-01 14:06:13 +03:00
seq_printf ( p , " %14s " , irq_desc [ irq ] . handler - > typename ) ;
2005-04-17 02:20:36 +04:00
seq_printf ( p , " %c%s " ,
( action - > flags & SA_INTERRUPT ) ? ' + ' : ' ' ,
action - > name ) ;
for ( action = action - > next ; action ; action = action - > next ) {
seq_printf ( p , " , %c%s " ,
( action - > flags & SA_INTERRUPT ) ? ' + ' : ' ' ,
action - > name ) ;
}
seq_putc ( p , ' \n ' ) ;
unlock :
2006-02-01 14:06:13 +03:00
spin_unlock_irqrestore ( & irq_desc [ irq ] . lock , flags ) ;
} else if ( irq = = ACTUAL_NR_IRQS ) {
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_SMP
seq_puts ( p , " IPI: " ) ;
2006-02-01 14:06:13 +03:00
for_each_online_cpu ( j )
seq_printf ( p , " %10lu " , cpu_data [ j ] . ipi_count ) ;
2005-04-17 02:20:36 +04:00
seq_putc ( p , ' \n ' ) ;
# endif
seq_printf ( p , " ERR: %10lu \n " , irq_err_count ) ;
}
return 0 ;
}
/*
* handle_irq handles all normal device IRQ ' s ( the special
* SMP cross - CPU interrupts have their own specific
* handlers ) .
*/
# define MAX_ILLEGAL_IRQS 16
void
handle_irq ( int irq , struct pt_regs * regs )
{
/*
* We ack quickly , we don ' t want the irq controller
* thinking we ' re snobs just because some other CPU has
* disabled global interrupts ( we have already done the
* INT_ACK cycles , it ' s too late to try to pretend to the
* controller that we aren ' t taking the interrupt ) .
*
* 0 return value means that this irq is already being
* handled by some other CPU . ( or is disabled )
*/
static unsigned int illegal_count = 0 ;
if ( ( unsigned ) irq > ACTUAL_NR_IRQS & & illegal_count < MAX_ILLEGAL_IRQS ) {
irq_err_count + + ;
illegal_count + + ;
printk ( KERN_CRIT " device_interrupt: invalid interrupt %d \n " ,
irq ) ;
return ;
}
irq_enter ( ) ;
2006-03-10 04:33:37 +03:00
/*
* __do_IRQ ( ) must be called with IPL_MAX . Note that we do not
* explicitly enable interrupts afterwards - some MILO PALcode
* ( namely LX164 one ) seems to have severe problems with RTI
* at IPL 0.
*/
2006-01-06 11:12:22 +03:00
local_irq_disable ( ) ;
__do_IRQ ( irq , regs ) ;
2005-04-17 02:20:36 +04:00
irq_exit ( ) ;
}