2005-04-17 02:20:36 +04:00
/*
* Copyright ( C ) 1992 , 1998 Linus Torvalds , Ingo Molnar
*
* This file contains the lowest level x86 - specific interrupt
* entry , irq - stacks and irq statistics code . All the remaining
* irq logic is done by the generic kernel / irq / code and
* by the x86 - specific irq controller code . ( e . g . i8259 . c and
* io_apic . c . )
*/
# include <linux/module.h>
# include <linux/seq_file.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
2005-06-26 01:54:50 +04:00
# include <linux/notifier.h>
# include <linux/cpu.h>
# include <linux/delay.h>
2009-01-04 14:02:36 +03:00
# include <linux/uaccess.h>
2009-02-17 06:46:42 +03:00
# include <linux/percpu.h>
2010-10-28 18:40:54 +04:00
# include <linux/mm.h>
2005-04-17 02:20:36 +04:00
2007-02-16 12:27:58 +03:00
# include <asm/apic.h>
2007-07-19 12:48:13 +04:00
DEFINE_PER_CPU_SHARED_ALIGNED ( irq_cpustat_t , irq_stat ) ;
2005-04-17 02:20:36 +04:00
EXPORT_PER_CPU_SYMBOL ( irq_stat ) ;
2007-05-02 21:27:16 +04:00
DEFINE_PER_CPU ( struct pt_regs * , irq_regs ) ;
EXPORT_PER_CPU_SYMBOL ( irq_regs ) ;
2008-05-05 17:58:15 +04:00
# ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
static int check_stack_overflow ( void )
{
long sp ;
__asm__ __volatile__ ( " andl %%esp,%0 " :
" =r " ( sp ) : " 0 " ( THREAD_SIZE - 1 ) ) ;
return sp < ( sizeof ( struct thread_info ) + STACK_WARN ) ;
}
static void print_stack_overflow ( void )
{
printk ( KERN_WARNING " low stack detected by irq handler \n " ) ;
dump_stack ( ) ;
}
# else
static inline int check_stack_overflow ( void ) { return 0 ; }
static inline void print_stack_overflow ( void ) { }
# endif
2005-04-17 02:20:36 +04:00
/*
* per - CPU IRQ handling contexts ( thread information and stack )
*/
union irq_ctx {
struct thread_info tinfo ;
u32 stack [ THREAD_SIZE / sizeof ( u32 ) ] ;
2010-07-27 16:13:13 +04:00
} __attribute__ ( ( aligned ( THREAD_SIZE ) ) ) ;
2005-04-17 02:20:36 +04:00
2009-02-17 06:46:42 +03:00
static DEFINE_PER_CPU ( union irq_ctx * , hardirq_ctx ) ;
static DEFINE_PER_CPU ( union irq_ctx * , softirq_ctx ) ;
2005-04-17 02:20:36 +04:00
2008-05-05 20:13:50 +04:00
static void call_on_stack ( void * func , void * stack )
2008-05-05 14:36:38 +04:00
{
2008-05-05 20:13:50 +04:00
asm volatile ( " xchgl %%ebx,%%esp \n "
" call *%%edi \n "
" movl %%ebx,%%esp \n "
: " =b " ( stack )
: " 0 " ( stack ) ,
" D " ( func )
: " memory " , " cc " , " edx " , " ecx " , " eax " ) ;
2008-05-05 14:36:38 +04:00
}
2005-04-17 02:20:36 +04:00
2008-05-05 17:58:15 +04:00
static inline int
execute_on_irq_stack ( int overflow , struct irq_desc * desc , int irq )
{
union irq_ctx * curctx , * irqctx ;
2008-05-05 20:13:50 +04:00
u32 * isp , arg1 , arg2 ;
2005-04-17 02:20:36 +04:00
curctx = ( union irq_ctx * ) current_thread_info ( ) ;
2010-12-18 18:28:55 +03:00
irqctx = __this_cpu_read ( hardirq_ctx ) ;
2005-04-17 02:20:36 +04:00
/*
* this is where we switch to the IRQ stack . However , if we are
* already using the IRQ stack ( because we interrupted a hardirq
* handler ) we can ' t do that and just have to keep using the
* current stack ( which is the irq stack already after all )
*/
2008-05-05 17:58:15 +04:00
if ( unlikely ( curctx = = irqctx ) )
return 0 ;
2005-04-17 02:20:36 +04:00
2008-05-05 17:58:15 +04:00
/* build the stack frame on the IRQ stack */
2009-01-04 14:02:36 +03:00
isp = ( u32 * ) ( ( char * ) irqctx + sizeof ( * irqctx ) ) ;
2008-05-05 17:58:15 +04:00
irqctx - > tinfo . task = curctx - > tinfo . task ;
irqctx - > tinfo . previous_esp = current_stack_pointer ;
2005-04-17 02:20:36 +04:00
2008-05-05 17:58:15 +04:00
/*
* Copy the softirq bits in preempt_count so that the
* softirq checks work in the hardirq context .
*/
irqctx - > tinfo . preempt_count =
( irqctx - > tinfo . preempt_count & ~ SOFTIRQ_MASK ) |
( curctx - > tinfo . preempt_count & SOFTIRQ_MASK ) ;
if ( unlikely ( overflow ) )
2008-05-05 20:13:50 +04:00
call_on_stack ( print_stack_overflow , isp ) ;
asm volatile ( " xchgl %%ebx,%%esp \n "
" call *%%edi \n "
" movl %%ebx,%%esp \n "
: " =a " ( arg1 ) , " =d " ( arg2 ) , " =b " ( isp )
: " 0 " ( irq ) , " 1 " ( desc ) , " 2 " ( isp ) ,
" D " ( desc - > handle_irq )
: " memory " , " cc " , " ecx " ) ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
/*
* allocate per - cpu stacks for hardirq and for softirq processing
*/
2008-05-05 20:13:50 +04:00
void __cpuinit irq_ctx_init ( int cpu )
2005-04-17 02:20:36 +04:00
{
union irq_ctx * irqctx ;
2009-02-17 06:46:42 +03:00
if ( per_cpu ( hardirq_ctx , cpu ) )
2005-04-17 02:20:36 +04:00
return ;
2010-10-28 18:40:54 +04:00
irqctx = page_address ( alloc_pages_node ( cpu_to_node ( cpu ) ,
THREAD_FLAGS ,
THREAD_ORDER ) ) ;
2011-01-17 15:32:10 +03:00
memset ( & irqctx - > tinfo , 0 , sizeof ( struct thread_info ) ) ;
2008-05-05 20:13:50 +04:00
irqctx - > tinfo . cpu = cpu ;
irqctx - > tinfo . preempt_count = HARDIRQ_OFFSET ;
irqctx - > tinfo . addr_limit = MAKE_MM_SEG ( 0 ) ;
2005-04-17 02:20:36 +04:00
2009-02-17 06:46:42 +03:00
per_cpu ( hardirq_ctx , cpu ) = irqctx ;
2005-04-17 02:20:36 +04:00
2010-10-28 18:40:54 +04:00
irqctx = page_address ( alloc_pages_node ( cpu_to_node ( cpu ) ,
THREAD_FLAGS ,
THREAD_ORDER ) ) ;
2011-01-17 15:32:10 +03:00
memset ( & irqctx - > tinfo , 0 , sizeof ( struct thread_info ) ) ;
2008-05-05 20:13:50 +04:00
irqctx - > tinfo . cpu = cpu ;
irqctx - > tinfo . addr_limit = MAKE_MM_SEG ( 0 ) ;
2005-04-17 02:20:36 +04:00
2009-02-17 06:46:42 +03:00
per_cpu ( softirq_ctx , cpu ) = irqctx ;
2005-04-17 02:20:36 +04:00
2008-05-05 20:13:50 +04:00
printk ( KERN_DEBUG " CPU %u irqstacks, hard=%p soft=%p \n " ,
2009-02-17 06:46:42 +03:00
cpu , per_cpu ( hardirq_ctx , cpu ) , per_cpu ( softirq_ctx , cpu ) ) ;
2005-04-17 02:20:36 +04:00
}
asmlinkage void do_softirq ( void )
{
unsigned long flags ;
struct thread_info * curctx ;
union irq_ctx * irqctx ;
u32 * isp ;
if ( in_interrupt ( ) )
return ;
local_irq_save ( flags ) ;
if ( local_softirq_pending ( ) ) {
curctx = current_thread_info ( ) ;
2010-12-18 18:28:55 +03:00
irqctx = __this_cpu_read ( softirq_ctx ) ;
2005-04-17 02:20:36 +04:00
irqctx - > tinfo . task = curctx - > task ;
irqctx - > tinfo . previous_esp = current_stack_pointer ;
/* build the stack frame on the softirq stack */
2009-01-04 14:02:36 +03:00
isp = ( u32 * ) ( ( char * ) irqctx + sizeof ( * irqctx ) ) ;
2005-04-17 02:20:36 +04:00
2008-05-05 20:13:50 +04:00
call_on_stack ( __do_softirq , isp ) ;
2006-07-03 11:24:43 +04:00
/*
2011-03-17 22:24:16 +03:00
* Shouldn ' t happen , we returned above if in_interrupt ( ) :
2008-05-05 20:13:50 +04:00
*/
2006-07-03 11:24:43 +04:00
WARN_ON_ONCE ( softirq_count ( ) ) ;
2005-04-17 02:20:36 +04:00
}
local_irq_restore ( flags ) ;
}
2008-05-05 20:13:50 +04:00
2009-02-07 01:09:40 +03:00
bool handle_irq ( unsigned irq , struct pt_regs * regs )
{
struct irq_desc * desc ;
int overflow ;
overflow = check_stack_overflow ( ) ;
desc = irq_to_desc ( irq ) ;
if ( unlikely ( ! desc ) )
return false ;
if ( ! execute_on_irq_stack ( overflow , desc , irq ) ) {
if ( unlikely ( overflow ) )
print_stack_overflow ( ) ;
desc - > handle_irq ( irq , desc ) ;
}
return true ;
}