2005-04-16 15:20:36 -07:00
/*
* Copyright ( C ) 1992 , 1998 Linus Torvalds , Ingo Molnar
*
* This file contains the lowest level x86_64 - specific interrupt
* entry and irq statistics code . All the remaining irq logic is
* done by the generic kernel / irq / code and in the
* x86_64 - specific irq controller code . ( e . g . i8259 . c and
* io_apic . c . )
*/
# include <linux/kernel_stat.h>
# include <linux/interrupt.h>
# include <linux/seq_file.h>
# include <linux/module.h>
2005-06-25 14:55:00 -07:00
# include <linux/delay.h>
2008-12-09 23:54:20 +01:00
# include <linux/ftrace.h>
2009-01-04 16:25:19 +05:30
# include <linux/uaccess.h>
# include <linux/smp.h>
2005-04-16 15:20:36 -07:00
# include <asm/io_apic.h>
2006-01-11 22:44:36 +01:00
# include <asm/idle.h>
2009-01-23 11:03:29 +09:00
# include <asm/apic.h>
2005-04-16 15:20:36 -07:00
2009-01-19 00:38:57 +09:00
DEFINE_PER_CPU_SHARED_ALIGNED ( irq_cpustat_t , irq_stat ) ;
EXPORT_PER_CPU_SYMBOL ( irq_stat ) ;
2009-01-21 17:26:06 +09:00
DEFINE_PER_CPU ( struct pt_regs * , irq_regs ) ;
EXPORT_PER_CPU_SYMBOL ( irq_regs ) ;
2006-06-26 14:00:05 +02:00
/*
* Probabilistic stack overflow check :
*
* Only check the stack in process context , because everything else
* runs on the big interrupt stacks . Checking reliably is too expensive ,
* so we just check from interrupts .
*/
static inline void stack_overflow_check ( struct pt_regs * regs )
{
2008-11-23 09:02:26 +01:00
# ifdef CONFIG_DEBUG_STACKOVERFLOW
2007-05-09 02:35:16 -07:00
u64 curbase = ( u64 ) task_stack_page ( current ) ;
2008-11-23 09:02:26 +01:00
WARN_ONCE ( regs - > sp > = curbase & &
regs - > sp < = curbase + THREAD_SIZE & &
regs - > sp < curbase + sizeof ( struct thread_info ) +
sizeof ( struct pt_regs ) + 128 ,
" do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx) \n " ,
current - > comm , curbase , regs - > sp ) ;
2006-06-26 14:00:05 +02:00
# endif
2008-11-23 09:02:26 +01:00
}
2006-06-26 14:00:05 +02:00
2009-02-06 14:09:40 -08:00
bool handle_irq ( unsigned irq , struct pt_regs * regs )
{
struct irq_desc * desc ;
stack_overflow_check ( regs ) ;
desc = irq_to_desc ( irq ) ;
if ( unlikely ( ! desc ) )
return false ;
generic_handle_irq_desc ( irq , desc ) ;
return true ;
}
2005-06-25 14:55:00 -07:00
# ifdef CONFIG_HOTPLUG_CPU
2008-12-16 17:33:58 -08:00
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs ( void )
2005-06-25 14:55:00 -07:00
{
unsigned int irq ;
static int warned ;
2008-08-19 20:50:11 -07:00
struct irq_desc * desc ;
2005-06-25 14:55:00 -07:00
2008-08-19 20:50:11 -07:00
for_each_irq_desc ( irq , desc ) {
2007-06-25 15:52:35 -07:00
int break_affinity = 0 ;
int set_affinity = 1 ;
2008-12-16 17:33:58 -08:00
const struct cpumask * affinity ;
2007-06-25 15:52:35 -07:00
2008-12-05 18:58:31 -08:00
if ( ! desc )
continue ;
2005-06-25 14:55:00 -07:00
if ( irq = = 2 )
continue ;
2007-06-25 15:52:35 -07:00
/* interrupt's are disabled at this point */
2008-08-19 20:50:05 -07:00
spin_lock ( & desc - > lock ) ;
2007-06-25 15:52:35 -07:00
2009-01-10 21:58:08 -08:00
affinity = desc - > affinity ;
2007-06-25 15:52:35 -07:00
if ( ! irq_has_action ( irq ) | |
2008-12-16 17:33:58 -08:00
cpumask_equal ( affinity , cpu_online_mask ) ) {
2008-08-19 20:50:05 -07:00
spin_unlock ( & desc - > lock ) ;
2007-06-25 15:52:35 -07:00
continue ;
}
2008-12-16 17:33:58 -08:00
if ( cpumask_any_and ( affinity , cpu_online_mask ) > = nr_cpu_ids ) {
2007-06-25 15:52:35 -07:00
break_affinity = 1 ;
2008-12-16 17:33:58 -08:00
affinity = cpu_all_mask ;
2005-06-25 14:55:00 -07:00
}
2007-06-25 15:52:35 -07:00
2008-08-19 20:50:05 -07:00
if ( desc - > chip - > mask )
desc - > chip - > mask ( irq ) ;
2007-06-25 15:52:35 -07:00
2008-08-19 20:50:05 -07:00
if ( desc - > chip - > set_affinity )
2008-12-16 17:33:58 -08:00
desc - > chip - > set_affinity ( irq , affinity ) ;
2007-06-25 15:52:35 -07:00
else if ( ! ( warned + + ) )
set_affinity = 0 ;
2008-08-19 20:50:05 -07:00
if ( desc - > chip - > unmask )
desc - > chip - > unmask ( irq ) ;
2007-06-25 15:52:35 -07:00
2008-08-19 20:50:05 -07:00
spin_unlock ( & desc - > lock ) ;
2007-06-25 15:52:35 -07:00
if ( break_affinity & & set_affinity )
printk ( " Broke affinity for irq %i \n " , irq ) ;
else if ( ! set_affinity )
2005-06-25 14:55:00 -07:00
printk ( " Cannot set affinity for irq %i \n " , irq ) ;
}
/* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable ( ) ;
mdelay ( 1 ) ;
local_irq_disable ( ) ;
}
# endif
2005-07-28 21:15:49 -07:00
extern void call_softirq ( void ) ;
asmlinkage void do_softirq ( void )
{
2009-01-04 16:25:19 +05:30
__u32 pending ;
unsigned long flags ;
2005-07-28 21:15:49 -07:00
2009-01-04 16:25:19 +05:30
if ( in_interrupt ( ) )
return ;
2005-07-28 21:15:49 -07:00
2009-01-04 16:25:19 +05:30
local_irq_save ( flags ) ;
pending = local_softirq_pending ( ) ;
/* Switch to interrupt stack */
if ( pending ) {
2005-07-28 21:15:49 -07:00
call_softirq ( ) ;
2006-07-03 00:24:45 -07:00
WARN_ON_ONCE ( softirq_count ( ) ) ;
}
2009-01-04 16:25:19 +05:30
local_irq_restore ( flags ) ;
2005-07-28 21:15:49 -07:00
}