2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Code to handle x86 style IRQs plus some generic interrupt stuff .
*
* Copyright ( C ) 1992 Linus Torvalds
* Copyright ( C ) 1994 - 2000 Ralf Baechle
*/
# include <linux/kernel.h>
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
# include <linux/proc_fs.h>
# include <linux/mm.h>
# include <linux/random.h>
# include <linux/sched.h>
# include <linux/seq_file.h>
# include <linux/kallsyms.h>
2008-07-29 15:58:53 -05:00
# include <linux/kgdb.h>
2009-11-20 20:34:33 +08:00
# include <linux/ftrace.h>
2005-04-16 15:20:36 -07:00
2011-07-26 16:09:06 -07:00
# include <linux/atomic.h>
2016-12-24 11:46:01 -08:00
# include <linux/uaccess.h>
2005-04-16 15:20:36 -07:00
2016-12-19 14:20:56 +00:00
void * irq_stack [ NR_CPUS ] ;
2005-04-16 15:20:36 -07:00
/*
* ' what should we do if we get a hw irq event on an illegal vector ' .
* each architecture has to answer this themselves .
*/
void ack_bad_irq ( unsigned int irq )
{
printk ( " unexpected IRQ # %d \n " , irq ) ;
}
atomic_t irq_err_count ;
2011-03-23 21:09:05 +00:00
int arch_show_interrupts ( struct seq_file * p , int prec )
2005-04-16 15:20:36 -07:00
{
2011-03-23 21:09:05 +00:00
seq_printf ( p , " %*s: %10u \n " , prec , " ERR " , atomic_read ( & irq_err_count ) ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
2006-10-07 19:44:33 +01:00
asmlinkage void spurious_interrupt ( void )
2006-04-01 21:17:45 +01:00
{
atomic_inc ( & irq_err_count ) ;
}
2005-04-16 15:20:36 -07:00
void __init init_IRQ ( void )
{
2008-02-08 04:22:02 -08:00
int i ;
2019-02-16 17:12:24 +08:00
unsigned int order = get_order ( IRQ_STACK_SIZE ) ;
2008-02-08 04:22:02 -08:00
for ( i = 0 ; i < NR_IRQS ; i + + )
2011-03-27 15:19:28 +02:00
irq_set_noprobe ( i ) ;
2008-02-08 04:22:02 -08:00
2016-05-17 15:31:04 +01:00
if ( cpu_has_veic )
clear_c0_status ( ST0_IM ) ;
2005-04-16 15:20:36 -07:00
arch_init_irq ( ) ;
2016-12-19 14:20:56 +00:00
for_each_possible_cpu ( i ) {
2019-02-16 17:12:24 +08:00
void * s = ( void * ) __get_free_pages ( GFP_KERNEL , order ) ;
2016-12-19 14:20:56 +00:00
irq_stack [ i ] = s ;
pr_debug ( " CPU%d IRQ stack at 0x%p - 0x%p \n " , i ,
irq_stack [ i ] , irq_stack [ i ] + IRQ_STACK_SIZE ) ;
}
2005-04-16 15:20:36 -07:00
}
2009-11-20 20:34:33 +08:00
2015-06-04 13:25:27 +01:00
# ifdef CONFIG_DEBUG_STACKOVERFLOW
2010-08-27 18:32:06 +09:00
static inline void check_stack_overflow ( void )
{
unsigned long sp ;
__asm__ __volatile__ ( " move %0, $sp " : " =r " ( sp ) ) ;
sp & = THREAD_MASK ;
/*
* Check for stack overflow : is there less than STACK_WARN free ?
* STACK_WARN is defined as 1 / 8 of THREAD_SIZE by default .
*/
if ( unlikely ( sp < ( sizeof ( struct thread_info ) + STACK_WARN ) ) ) {
printk ( " do_IRQ: stack overflow: %ld \n " ,
sp - sizeof ( struct thread_info ) ) ;
dump_stack ( ) ;
}
}
# else
static inline void check_stack_overflow ( void ) { }
# endif
2009-11-20 20:34:33 +08:00
/*
* do_IRQ handles all normal device IRQ ' s ( the special
* SMP cross - CPU interrupts have their own specific
* handlers ) .
*/
void __irq_entry do_IRQ ( unsigned int irq )
{
irq_enter ( ) ;
2010-08-27 18:32:06 +09:00
check_stack_overflow ( ) ;
2009-11-20 20:34:33 +08:00
generic_handle_irq ( irq ) ;
irq_exit ( ) ;
}