2005-04-16 15:20:36 -07:00
/*
* Copyright ( C ) 1992 , 1998 Linus Torvalds , Ingo Molnar
*
* This file contains the lowest level x86 - specific interrupt
* entry , irq - stacks and irq statistics code . All the remaining
* irq logic is done by the generic kernel / irq / code and
* by the x86 - specific irq controller code . ( e . g . i8259 . c and
* io_apic . c . )
*/
# include <linux/module.h>
# include <linux/seq_file.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
2005-06-25 14:54:50 -07:00
# include <linux/notifier.h>
# include <linux/cpu.h>
# include <linux/delay.h>
2009-01-04 16:32:36 +05:30
# include <linux/uaccess.h>
2005-04-16 15:20:36 -07:00
2007-02-16 01:27:58 -08:00
# include <asm/apic.h>
2007-07-19 01:48:13 -07:00
DEFINE_PER_CPU_SHARED_ALIGNED ( irq_cpustat_t , irq_stat ) ;
2005-04-16 15:20:36 -07:00
EXPORT_PER_CPU_SYMBOL ( irq_stat ) ;
2007-05-02 19:27:16 +02:00
DEFINE_PER_CPU ( struct pt_regs * , irq_regs ) ;
EXPORT_PER_CPU_SYMBOL ( irq_regs ) ;
2008-05-05 15:58:15 +02:00
# ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
static int check_stack_overflow ( void )
{
long sp ;
__asm__ __volatile__ ( " andl %%esp,%0 " :
" =r " ( sp ) : " 0 " ( THREAD_SIZE - 1 ) ) ;
return sp < ( sizeof ( struct thread_info ) + STACK_WARN ) ;
}
static void print_stack_overflow ( void )
{
printk ( KERN_WARNING " low stack detected by irq handler \n " ) ;
dump_stack ( ) ;
}
# else
static inline int check_stack_overflow ( void ) { return 0 ; }
static inline void print_stack_overflow ( void ) { }
# endif
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_4KSTACKS
/*
* per - CPU IRQ handling contexts ( thread information and stack )
*/
union irq_ctx {
struct thread_info tinfo ;
u32 stack [ THREAD_SIZE / sizeof ( u32 ) ] ;
} ;
2006-06-23 02:05:30 -07:00
static union irq_ctx * hardirq_ctx [ NR_CPUS ] __read_mostly ;
static union irq_ctx * softirq_ctx [ NR_CPUS ] __read_mostly ;
2005-04-16 15:20:36 -07:00
2008-07-08 15:06:27 -07:00
static char softirq_stack [ NR_CPUS * THREAD_SIZE ] __page_aligned_bss ;
static char hardirq_stack [ NR_CPUS * THREAD_SIZE ] __page_aligned_bss ;
2006-06-28 04:26:43 -07:00
2008-05-05 18:13:50 +02:00
static void call_on_stack ( void * func , void * stack )
2008-05-05 12:36:38 +02:00
{
2008-05-05 18:13:50 +02:00
asm volatile ( " xchgl %%ebx,%%esp \n "
" call *%%edi \n "
" movl %%ebx,%%esp \n "
: " =b " ( stack )
: " 0 " ( stack ) ,
" D " ( func )
: " memory " , " cc " , " edx " , " ecx " , " eax " ) ;
2008-05-05 12:36:38 +02:00
}
2005-04-16 15:20:36 -07:00
2008-05-05 15:58:15 +02:00
static inline int
execute_on_irq_stack ( int overflow , struct irq_desc * desc , int irq )
{
union irq_ctx * curctx , * irqctx ;
2008-05-05 18:13:50 +02:00
u32 * isp , arg1 , arg2 ;
2005-04-16 15:20:36 -07:00
curctx = ( union irq_ctx * ) current_thread_info ( ) ;
irqctx = hardirq_ctx [ smp_processor_id ( ) ] ;
/*
* this is where we switch to the IRQ stack . However , if we are
* already using the IRQ stack ( because we interrupted a hardirq
* handler ) we can ' t do that and just have to keep using the
* current stack ( which is the irq stack already after all )
*/
2008-05-05 15:58:15 +02:00
if ( unlikely ( curctx = = irqctx ) )
return 0 ;
2005-04-16 15:20:36 -07:00
2008-05-05 15:58:15 +02:00
/* build the stack frame on the IRQ stack */
2009-01-04 16:32:36 +05:30
isp = ( u32 * ) ( ( char * ) irqctx + sizeof ( * irqctx ) ) ;
2008-05-05 15:58:15 +02:00
irqctx - > tinfo . task = curctx - > tinfo . task ;
irqctx - > tinfo . previous_esp = current_stack_pointer ;
2005-04-16 15:20:36 -07:00
2008-05-05 15:58:15 +02:00
/*
* Copy the softirq bits in preempt_count so that the
* softirq checks work in the hardirq context .
*/
irqctx - > tinfo . preempt_count =
( irqctx - > tinfo . preempt_count & ~ SOFTIRQ_MASK ) |
( curctx - > tinfo . preempt_count & SOFTIRQ_MASK ) ;
if ( unlikely ( overflow ) )
2008-05-05 18:13:50 +02:00
call_on_stack ( print_stack_overflow , isp ) ;
asm volatile ( " xchgl %%ebx,%%esp \n "
" call *%%edi \n "
" movl %%ebx,%%esp \n "
: " =a " ( arg1 ) , " =d " ( arg2 ) , " =b " ( isp )
: " 0 " ( irq ) , " 1 " ( desc ) , " 2 " ( isp ) ,
" D " ( desc - > handle_irq )
: " memory " , " cc " , " ecx " ) ;
2005-04-16 15:20:36 -07:00
return 1 ;
}
/*
* allocate per - cpu stacks for hardirq and for softirq processing
*/
2008-05-05 18:13:50 +02:00
void __cpuinit irq_ctx_init ( int cpu )
2005-04-16 15:20:36 -07:00
{
union irq_ctx * irqctx ;
if ( hardirq_ctx [ cpu ] )
return ;
irqctx = ( union irq_ctx * ) & hardirq_stack [ cpu * THREAD_SIZE ] ;
2008-05-05 18:13:50 +02:00
irqctx - > tinfo . task = NULL ;
irqctx - > tinfo . exec_domain = NULL ;
irqctx - > tinfo . cpu = cpu ;
irqctx - > tinfo . preempt_count = HARDIRQ_OFFSET ;
irqctx - > tinfo . addr_limit = MAKE_MM_SEG ( 0 ) ;
2005-04-16 15:20:36 -07:00
hardirq_ctx [ cpu ] = irqctx ;
2009-01-04 16:32:36 +05:30
irqctx = ( union irq_ctx * ) & softirq_stack [ cpu * THREAD_SIZE ] ;
2008-05-05 18:13:50 +02:00
irqctx - > tinfo . task = NULL ;
irqctx - > tinfo . exec_domain = NULL ;
irqctx - > tinfo . cpu = cpu ;
irqctx - > tinfo . preempt_count = 0 ;
irqctx - > tinfo . addr_limit = MAKE_MM_SEG ( 0 ) ;
2005-04-16 15:20:36 -07:00
softirq_ctx [ cpu ] = irqctx ;
2008-05-05 18:13:50 +02:00
printk ( KERN_DEBUG " CPU %u irqstacks, hard=%p soft=%p \n " ,
2009-01-04 16:32:36 +05:30
cpu , hardirq_ctx [ cpu ] , softirq_ctx [ cpu ] ) ;
2005-04-16 15:20:36 -07:00
}
2005-06-25 14:54:56 -07:00
void irq_ctx_exit ( int cpu )
{
hardirq_ctx [ cpu ] = NULL ;
}
2005-04-16 15:20:36 -07:00
asmlinkage void do_softirq ( void )
{
unsigned long flags ;
struct thread_info * curctx ;
union irq_ctx * irqctx ;
u32 * isp ;
if ( in_interrupt ( ) )
return ;
local_irq_save ( flags ) ;
if ( local_softirq_pending ( ) ) {
curctx = current_thread_info ( ) ;
irqctx = softirq_ctx [ smp_processor_id ( ) ] ;
irqctx - > tinfo . task = curctx - > task ;
irqctx - > tinfo . previous_esp = current_stack_pointer ;
/* build the stack frame on the softirq stack */
2009-01-04 16:32:36 +05:30
isp = ( u32 * ) ( ( char * ) irqctx + sizeof ( * irqctx ) ) ;
2005-04-16 15:20:36 -07:00
2008-05-05 18:13:50 +02:00
call_on_stack ( __do_softirq , isp ) ;
2006-07-03 00:24:43 -07:00
/*
* Shouldnt happen , we returned above if in_interrupt ( ) :
2008-05-05 18:13:50 +02:00
*/
2006-07-03 00:24:43 -07:00
WARN_ON_ONCE ( softirq_count ( ) ) ;
2005-04-16 15:20:36 -07:00
}
local_irq_restore ( flags ) ;
}
2008-05-05 18:13:50 +02:00
# else
static inline int
execute_on_irq_stack ( int overflow , struct irq_desc * desc , int irq ) { return 0 ; }
2005-04-16 15:20:36 -07:00
# endif
2009-02-06 14:09:40 -08:00
bool handle_irq ( unsigned irq , struct pt_regs * regs )
{
struct irq_desc * desc ;
int overflow ;
overflow = check_stack_overflow ( ) ;
desc = irq_to_desc ( irq ) ;
if ( unlikely ( ! desc ) )
return false ;
if ( ! execute_on_irq_stack ( overflow , desc , irq ) ) {
if ( unlikely ( overflow ) )
print_stack_overflow ( ) ;
desc - > handle_irq ( irq , desc ) ;
}
return true ;
}
2005-06-25 14:54:50 -07:00
# ifdef CONFIG_HOTPLUG_CPU
2008-12-16 17:33:58 -08:00
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs ( void )
2005-06-25 14:54:50 -07:00
{
unsigned int irq ;
static int warned ;
2008-08-19 20:50:27 -07:00
struct irq_desc * desc ;
2005-06-25 14:54:50 -07:00
2008-08-19 20:50:27 -07:00
for_each_irq_desc ( irq , desc ) {
2008-12-16 17:33:58 -08:00
const struct cpumask * affinity ;
2008-08-19 20:50:05 -07:00
2008-12-05 18:58:31 -08:00
if ( ! desc )
continue ;
2005-06-25 14:54:50 -07:00
if ( irq = = 2 )
continue ;
2009-01-10 21:58:08 -08:00
affinity = desc - > affinity ;
2008-12-16 17:33:58 -08:00
if ( cpumask_any_and ( affinity , cpu_online_mask ) > = nr_cpu_ids ) {
2005-06-25 14:54:50 -07:00
printk ( " Breaking affinity for irq %i \n " , irq ) ;
2008-12-16 17:33:58 -08:00
affinity = cpu_all_mask ;
2005-06-25 14:54:50 -07:00
}
2008-08-19 20:50:05 -07:00
if ( desc - > chip - > set_affinity )
2008-12-16 17:33:58 -08:00
desc - > chip - > set_affinity ( irq , affinity ) ;
2008-08-19 20:50:05 -07:00
else if ( desc - > action & & ! ( warned + + ) )
2005-06-25 14:54:50 -07:00
printk ( " Cannot set affinity for irq %i \n " , irq ) ;
}
#if 0
barrier ( ) ;
/* Ingo Molnar says: "after the IO-APIC masks have been redirected
[ note the nop - the interrupt - enable boundary on x86 is two
instructions from sti ] - to flush out pending hardirqs and
IPIs . After this point nothing is supposed to reach this CPU . " */
__asm__ __volatile__ ( " sti; nop; cli " ) ;
barrier ( ) ;
# else
/* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable ( ) ;
mdelay ( 1 ) ;
local_irq_disable ( ) ;
# endif
}
# endif