2005-04-16 15:20:36 -07:00
/*
* Copyright ( C ) 1992 , 1998 Linus Torvalds , Ingo Molnar
*
* This file contains the lowest level x86_64 - specific interrupt
* entry and irq statistics code . All the remaining irq logic is
* done by the generic kernel / irq / code and in the
* x86_64 - specific irq controller code . ( e . g . i8259 . c and
* io_apic . c . )
*/
# include <linux/kernel_stat.h>
# include <linux/interrupt.h>
# include <linux/seq_file.h>
# include <linux/module.h>
2005-06-25 14:55:00 -07:00
# include <linux/delay.h>
2008-12-09 23:54:20 +01:00
# include <linux/ftrace.h>
2009-01-04 16:25:19 +05:30
# include <linux/uaccess.h>
# include <linux/smp.h>
2005-04-16 15:20:36 -07:00
# include <asm/io_apic.h>
2006-01-11 22:44:36 +01:00
# include <asm/idle.h>
2009-01-23 11:03:29 +09:00
# include <asm/apic.h>
2005-04-16 15:20:36 -07:00
2009-01-19 00:38:57 +09:00
DEFINE_PER_CPU_SHARED_ALIGNED ( irq_cpustat_t , irq_stat ) ;
EXPORT_PER_CPU_SYMBOL ( irq_stat ) ;
2009-01-21 17:26:06 +09:00
DEFINE_PER_CPU ( struct pt_regs * , irq_regs ) ;
EXPORT_PER_CPU_SYMBOL ( irq_regs ) ;
2011-11-29 15:08:36 +09:00
int sysctl_panic_on_stackoverflow ;
2006-06-26 14:00:05 +02:00
/*
* Probabilistic stack overflow check :
*
* Only check the stack in process context , because everything else
* runs on the big interrupt stacks . Checking reliably is too expensive ,
* so we just check from interrupts .
*/
static inline void stack_overflow_check ( struct pt_regs * regs )
{
2008-11-23 09:02:26 +01:00
# ifdef CONFIG_DEBUG_STACKOVERFLOW
2011-11-29 15:08:29 +09:00
struct orig_ist * oist ;
u64 irq_stack_top , irq_stack_bottom ;
u64 estack_top , estack_bottom ;
2007-05-09 02:35:16 -07:00
u64 curbase = ( u64 ) task_stack_page ( current ) ;
2008-11-23 09:02:26 +01:00
2011-11-29 15:08:21 +09:00
if ( user_mode_vm ( regs ) )
return ;
2011-11-29 15:08:29 +09:00
if ( regs - > sp > = curbase & &
regs - > sp < = curbase + THREAD_SIZE & &
regs - > sp > = curbase + sizeof ( struct thread_info ) +
sizeof ( struct pt_regs ) + 128 )
return ;
irq_stack_top = ( u64 ) __get_cpu_var ( irq_stack_union . irq_stack ) ;
irq_stack_bottom = ( u64 ) __get_cpu_var ( irq_stack_ptr ) ;
if ( regs - > sp > = irq_stack_top & & regs - > sp < = irq_stack_bottom )
return ;
oist = & __get_cpu_var ( orig_ist ) ;
estack_top = ( u64 ) oist - > ist [ 0 ] - EXCEPTION_STKSZ ;
estack_bottom = ( u64 ) oist - > ist [ N_EXCEPTION_STACKS - 1 ] ;
if ( regs - > sp > = estack_top & & regs - > sp < = estack_bottom )
return ;
2008-11-23 09:02:26 +01:00
2011-11-29 15:08:29 +09:00
WARN_ONCE ( 1 , " do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx) \n " ,
current - > comm , curbase , regs - > sp ,
irq_stack_top , irq_stack_bottom ,
estack_top , estack_bottom ) ;
2011-11-29 15:08:36 +09:00
if ( sysctl_panic_on_stackoverflow )
panic ( " low stack detected by irq handler - check messages \n " ) ;
2006-06-26 14:00:05 +02:00
# endif
2008-11-23 09:02:26 +01:00
}
2006-06-26 14:00:05 +02:00
2009-02-06 14:09:40 -08:00
bool handle_irq ( unsigned irq , struct pt_regs * regs )
{
struct irq_desc * desc ;
stack_overflow_check ( regs ) ;
desc = irq_to_desc ( irq ) ;
if ( unlikely ( ! desc ) )
return false ;
generic_handle_irq_desc ( irq , desc ) ;
return true ;
}
2005-07-28 21:15:49 -07:00
extern void call_softirq ( void ) ;
asmlinkage void do_softirq ( void )
{
2009-01-04 16:25:19 +05:30
__u32 pending ;
unsigned long flags ;
2005-07-28 21:15:49 -07:00
2009-01-04 16:25:19 +05:30
if ( in_interrupt ( ) )
return ;
2005-07-28 21:15:49 -07:00
2009-01-04 16:25:19 +05:30
local_irq_save ( flags ) ;
pending = local_softirq_pending ( ) ;
/* Switch to interrupt stack */
if ( pending ) {
2005-07-28 21:15:49 -07:00
call_softirq ( ) ;
2006-07-03 00:24:45 -07:00
WARN_ON_ONCE ( softirq_count ( ) ) ;
}
2009-01-04 16:25:19 +05:30
local_irq_restore ( flags ) ;
2005-07-28 21:15:49 -07:00
}