2005-04-17 02:20:36 +04:00
/*
* Copyright ( C ) 1992 , 1998 Linus Torvalds , Ingo Molnar
*
* This file contains the lowest level x86 - specific interrupt
* entry , irq - stacks and irq statistics code . All the remaining
* irq logic is done by the generic kernel / irq / code and
* by the x86 - specific irq controller code . ( e . g . i8259 . c and
* io_apic . c . )
*/
# include <linux/module.h>
# include <linux/seq_file.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
2005-06-26 01:54:50 +04:00
# include <linux/notifier.h>
# include <linux/cpu.h>
# include <linux/delay.h>
2005-04-17 02:20:36 +04:00
2007-02-16 12:27:58 +03:00
# include <asm/apic.h>
# include <asm/uaccess.h>
2007-07-19 12:48:13 +04:00
DEFINE_PER_CPU_SHARED_ALIGNED ( irq_cpustat_t , irq_stat ) ;
2005-04-17 02:20:36 +04:00
EXPORT_PER_CPU_SYMBOL ( irq_stat ) ;
2007-05-02 21:27:16 +04:00
DEFINE_PER_CPU ( struct pt_regs * , irq_regs ) ;
EXPORT_PER_CPU_SYMBOL ( irq_regs ) ;
2008-05-05 17:58:15 +04:00
# ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
static int check_stack_overflow ( void )
{
long sp ;
__asm__ __volatile__ ( " andl %%esp,%0 " :
" =r " ( sp ) : " 0 " ( THREAD_SIZE - 1 ) ) ;
return sp < ( sizeof ( struct thread_info ) + STACK_WARN ) ;
}
static void print_stack_overflow ( void )
{
printk ( KERN_WARNING " low stack detected by irq handler \n " ) ;
dump_stack ( ) ;
}
# else
static inline int check_stack_overflow ( void ) { return 0 ; }
static inline void print_stack_overflow ( void ) { }
# endif
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_4KSTACKS
/*
* per - CPU IRQ handling contexts ( thread information and stack )
*/
union irq_ctx {
struct thread_info tinfo ;
u32 stack [ THREAD_SIZE / sizeof ( u32 ) ] ;
} ;
2006-06-23 13:05:30 +04:00
static union irq_ctx * hardirq_ctx [ NR_CPUS ] __read_mostly ;
static union irq_ctx * softirq_ctx [ NR_CPUS ] __read_mostly ;
2005-04-17 02:20:36 +04:00
2008-07-09 02:06:27 +04:00
static char softirq_stack [ NR_CPUS * THREAD_SIZE ] __page_aligned_bss ;
static char hardirq_stack [ NR_CPUS * THREAD_SIZE ] __page_aligned_bss ;
2006-06-28 15:26:43 +04:00
2008-05-05 20:13:50 +04:00
static void call_on_stack ( void * func , void * stack )
2008-05-05 14:36:38 +04:00
{
2008-05-05 20:13:50 +04:00
asm volatile ( " xchgl %%ebx,%%esp \n "
" call *%%edi \n "
" movl %%ebx,%%esp \n "
: " =b " ( stack )
: " 0 " ( stack ) ,
" D " ( func )
: " memory " , " cc " , " edx " , " ecx " , " eax " ) ;
2008-05-05 14:36:38 +04:00
}
2005-04-17 02:20:36 +04:00
2008-05-05 17:58:15 +04:00
static inline int
execute_on_irq_stack ( int overflow , struct irq_desc * desc , int irq )
{
union irq_ctx * curctx , * irqctx ;
2008-05-05 20:13:50 +04:00
u32 * isp , arg1 , arg2 ;
2005-04-17 02:20:36 +04:00
curctx = ( union irq_ctx * ) current_thread_info ( ) ;
irqctx = hardirq_ctx [ smp_processor_id ( ) ] ;
/*
* this is where we switch to the IRQ stack . However , if we are
* already using the IRQ stack ( because we interrupted a hardirq
* handler ) we can ' t do that and just have to keep using the
* current stack ( which is the irq stack already after all )
*/
2008-05-05 17:58:15 +04:00
if ( unlikely ( curctx = = irqctx ) )
return 0 ;
2005-04-17 02:20:36 +04:00
2008-05-05 17:58:15 +04:00
/* build the stack frame on the IRQ stack */
isp = ( u32 * ) ( ( char * ) irqctx + sizeof ( * irqctx ) ) ;
irqctx - > tinfo . task = curctx - > tinfo . task ;
irqctx - > tinfo . previous_esp = current_stack_pointer ;
2005-04-17 02:20:36 +04:00
2008-05-05 17:58:15 +04:00
/*
* Copy the softirq bits in preempt_count so that the
* softirq checks work in the hardirq context .
*/
irqctx - > tinfo . preempt_count =
( irqctx - > tinfo . preempt_count & ~ SOFTIRQ_MASK ) |
( curctx - > tinfo . preempt_count & SOFTIRQ_MASK ) ;
if ( unlikely ( overflow ) )
2008-05-05 20:13:50 +04:00
call_on_stack ( print_stack_overflow , isp ) ;
asm volatile ( " xchgl %%ebx,%%esp \n "
" call *%%edi \n "
" movl %%ebx,%%esp \n "
: " =a " ( arg1 ) , " =d " ( arg2 ) , " =b " ( isp )
: " 0 " ( irq ) , " 1 " ( desc ) , " 2 " ( isp ) ,
" D " ( desc - > handle_irq )
: " memory " , " cc " , " ecx " ) ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
/*
* allocate per - cpu stacks for hardirq and for softirq processing
*/
2008-05-05 20:13:50 +04:00
void __cpuinit irq_ctx_init ( int cpu )
2005-04-17 02:20:36 +04:00
{
union irq_ctx * irqctx ;
if ( hardirq_ctx [ cpu ] )
return ;
irqctx = ( union irq_ctx * ) & hardirq_stack [ cpu * THREAD_SIZE ] ;
2008-05-05 20:13:50 +04:00
irqctx - > tinfo . task = NULL ;
irqctx - > tinfo . exec_domain = NULL ;
irqctx - > tinfo . cpu = cpu ;
irqctx - > tinfo . preempt_count = HARDIRQ_OFFSET ;
irqctx - > tinfo . addr_limit = MAKE_MM_SEG ( 0 ) ;
2005-04-17 02:20:36 +04:00
hardirq_ctx [ cpu ] = irqctx ;
irqctx = ( union irq_ctx * ) & softirq_stack [ cpu * THREAD_SIZE ] ;
2008-05-05 20:13:50 +04:00
irqctx - > tinfo . task = NULL ;
irqctx - > tinfo . exec_domain = NULL ;
irqctx - > tinfo . cpu = cpu ;
irqctx - > tinfo . preempt_count = 0 ;
irqctx - > tinfo . addr_limit = MAKE_MM_SEG ( 0 ) ;
2005-04-17 02:20:36 +04:00
softirq_ctx [ cpu ] = irqctx ;
2008-05-05 20:13:50 +04:00
printk ( KERN_DEBUG " CPU %u irqstacks, hard=%p soft=%p \n " ,
cpu , hardirq_ctx [ cpu ] , softirq_ctx [ cpu ] ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-26 01:54:56 +04:00
void irq_ctx_exit ( int cpu )
{
hardirq_ctx [ cpu ] = NULL ;
}
2005-04-17 02:20:36 +04:00
asmlinkage void do_softirq ( void )
{
unsigned long flags ;
struct thread_info * curctx ;
union irq_ctx * irqctx ;
u32 * isp ;
if ( in_interrupt ( ) )
return ;
local_irq_save ( flags ) ;
if ( local_softirq_pending ( ) ) {
curctx = current_thread_info ( ) ;
irqctx = softirq_ctx [ smp_processor_id ( ) ] ;
irqctx - > tinfo . task = curctx - > task ;
irqctx - > tinfo . previous_esp = current_stack_pointer ;
/* build the stack frame on the softirq stack */
isp = ( u32 * ) ( ( char * ) irqctx + sizeof ( * irqctx ) ) ;
2008-05-05 20:13:50 +04:00
call_on_stack ( __do_softirq , isp ) ;
2006-07-03 11:24:43 +04:00
/*
* Shouldnt happen , we returned above if in_interrupt ( ) :
2008-05-05 20:13:50 +04:00
*/
2006-07-03 11:24:43 +04:00
WARN_ON_ONCE ( softirq_count ( ) ) ;
2005-04-17 02:20:36 +04:00
}
local_irq_restore ( flags ) ;
}
2008-05-05 20:13:50 +04:00
# else
static inline int
execute_on_irq_stack ( int overflow , struct irq_desc * desc , int irq ) { return 0 ; }
2005-04-17 02:20:36 +04:00
# endif
2008-05-05 20:13:50 +04:00
/*
* do_IRQ handles all normal device IRQ ' s ( the special
* SMP cross - CPU interrupts have their own specific
* handlers ) .
*/
unsigned int do_IRQ ( struct pt_regs * regs )
{
struct pt_regs * old_regs ;
/* high bit used in ret_from_ code */
2008-08-20 07:50:28 +04:00
int overflow ;
unsigned vector = ~ regs - > orig_ax ;
2008-08-20 07:50:27 +04:00
struct irq_desc * desc ;
2008-08-20 07:50:28 +04:00
unsigned irq ;
2008-05-05 20:13:50 +04:00
old_regs = set_irq_regs ( regs ) ;
irq_enter ( ) ;
2008-08-20 07:50:28 +04:00
irq = __get_cpu_var ( vector_irq ) [ vector ] ;
2008-05-05 20:13:50 +04:00
overflow = check_stack_overflow ( ) ;
2008-08-20 07:50:28 +04:00
desc = irq_to_desc ( irq ) ;
if ( unlikely ( ! desc ) ) {
2008-08-20 07:50:32 +04:00
printk ( KERN_EMERG " %s: cannot handle IRQ %d vector %#x cpu %d \n " ,
__func__ , irq , vector , smp_processor_id ( ) ) ;
2008-08-20 07:50:28 +04:00
BUG ( ) ;
}
2008-05-05 20:13:50 +04:00
if ( ! execute_on_irq_stack ( overflow , desc , irq ) ) {
if ( unlikely ( overflow ) )
print_stack_overflow ( ) ;
desc - > handle_irq ( irq , desc ) ;
}
irq_exit ( ) ;
set_irq_regs ( old_regs ) ;
return 1 ;
}
2005-06-26 01:54:50 +04:00
# ifdef CONFIG_HOTPLUG_CPU
# include <mach_apic.h>
2008-12-17 04:33:58 +03:00
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs ( void )
2005-06-26 01:54:50 +04:00
{
unsigned int irq ;
static int warned ;
2008-08-20 07:50:27 +04:00
struct irq_desc * desc ;
2005-06-26 01:54:50 +04:00
2008-08-20 07:50:27 +04:00
for_each_irq_desc ( irq , desc ) {
2008-12-17 04:33:58 +03:00
const struct cpumask * affinity ;
2008-08-20 07:50:05 +04:00
2008-12-06 05:58:31 +03:00
if ( ! desc )
continue ;
2005-06-26 01:54:50 +04:00
if ( irq = = 2 )
continue ;
2008-12-17 04:33:58 +03:00
affinity = & desc - > affinity ;
if ( cpumask_any_and ( affinity , cpu_online_mask ) > = nr_cpu_ids ) {
2005-06-26 01:54:50 +04:00
printk ( " Breaking affinity for irq %i \n " , irq ) ;
2008-12-17 04:33:58 +03:00
affinity = cpu_all_mask ;
2005-06-26 01:54:50 +04:00
}
2008-08-20 07:50:05 +04:00
if ( desc - > chip - > set_affinity )
2008-12-17 04:33:58 +03:00
desc - > chip - > set_affinity ( irq , affinity ) ;
2008-08-20 07:50:05 +04:00
else if ( desc - > action & & ! ( warned + + ) )
2005-06-26 01:54:50 +04:00
printk ( " Cannot set affinity for irq %i \n " , irq ) ;
}
#if 0
barrier ( ) ;
/* Ingo Molnar says: "after the IO-APIC masks have been redirected
[ note the nop - the interrupt - enable boundary on x86 is two
instructions from sti ] - to flush out pending hardirqs and
IPIs . After this point nothing is supposed to reach this CPU . " */
__asm__ __volatile__ ( " sti; nop; cli " ) ;
barrier ( ) ;
# else
/* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable ( ) ;
mdelay ( 1 ) ;
local_irq_disable ( ) ;
# endif
}
# endif