2005-04-17 02:20:36 +04:00
/*
* linux / kernel / irq / spurious . c
*
* Copyright ( C ) 1992 , 1998 - 2004 Linus Torvalds , Ingo Molnar
*
* This file contains spurious interrupt handling .
*/
2008-02-14 18:36:51 +03:00
# include <linux/jiffies.h>
2005-04-17 02:20:36 +04:00
# include <linux/irq.h>
# include <linux/module.h>
# include <linux/kallsyms.h>
# include <linux/interrupt.h>
2008-01-30 15:32:48 +03:00
# include <linux/moduleparam.h>
2008-07-11 01:48:54 +04:00
# include <linux/timer.h>
2005-04-17 02:20:36 +04:00
2010-10-01 17:17:14 +04:00
# include "internals.h"
2006-06-23 13:05:32 +04:00
static int irqfixup __read_mostly ;
2005-06-29 07:45:18 +04:00
2008-07-11 01:48:54 +04:00
# define POLL_SPURIOUS_IRQ_INTERVAL (HZ / 10)
static void poll_spurious_irqs ( unsigned long dummy ) ;
static DEFINE_TIMER ( poll_spurious_irq_timer , poll_spurious_irqs , 0 , 0 ) ;
2011-02-07 16:31:37 +03:00
static int irq_poll_cpu ;
static atomic_t irq_poll_active ;
2008-07-11 01:48:54 +04:00
2011-02-07 12:34:30 +03:00
/*
* We wait here for a poller to finish .
*
* If the poll runs on this CPU , then we yell loudly and return
* false . That will leave the interrupt line disabled in the worst
* case , but it should never happen .
*
* We wait until the poller is done and then recheck disabled and
* action ( about to be disabled ) . Only if it ' s still active , we return
* true and let the handler run .
*/
bool irq_wait_for_poll ( struct irq_desc * desc )
{
if ( WARN_ONCE ( irq_poll_cpu = = smp_processor_id ( ) ,
" irq poll in progress on cpu %d for irq %d \n " ,
smp_processor_id ( ) , desc - > irq_data . irq ) )
return false ;
# ifdef CONFIG_SMP
do {
raw_spin_unlock ( & desc - > lock ) ;
2011-03-28 16:10:52 +04:00
while ( irqd_irq_inprogress ( & desc - > irq_data ) )
2011-02-07 12:34:30 +03:00
cpu_relax ( ) ;
raw_spin_lock ( & desc - > lock ) ;
2011-03-28 22:28:56 +04:00
} while ( irqd_irq_inprogress ( & desc - > irq_data ) ) ;
2011-02-07 12:34:30 +03:00
/* Might have been disabled in meantime */
2011-03-28 16:10:52 +04:00
return ! irqd_irq_disabled ( & desc - > irq_data ) & & desc - > action ;
2011-02-07 12:34:30 +03:00
# else
return false ;
# endif
}
2011-02-07 03:29:15 +03:00
2005-06-29 07:45:18 +04:00
/*
* Recovery handler for misrouted interrupts .
*/
2015-06-23 21:07:35 +03:00
static int try_one_irq ( struct irq_desc * desc , bool force )
2005-06-29 07:45:18 +04:00
{
2011-02-07 03:29:15 +03:00
irqreturn_t ret = IRQ_NONE ;
2008-07-11 01:48:54 +04:00
struct irqaction * action ;
2005-06-29 07:45:18 +04:00
2009-11-17 18:46:45 +03:00
raw_spin_lock ( & desc - > lock ) ;
2011-02-07 11:52:27 +03:00
2013-11-06 15:30:07 +04:00
/*
* PER_CPU , nested thread interrupts and interrupts explicitely
* marked polled are excluded from polling .
*/
if ( irq_settings_is_per_cpu ( desc ) | |
irq_settings_is_nested_thread ( desc ) | |
irq_settings_is_polled ( desc ) )
2011-02-07 11:52:27 +03:00
goto out ;
/*
* Do not poll disabled interrupts unless the spurious
* disabled poller asks explicitely .
*/
2011-03-28 16:10:52 +04:00
if ( irqd_irq_disabled ( & desc - > irq_data ) & & ! force )
2011-02-07 11:52:27 +03:00
goto out ;
/*
* All handlers must agree on IRQF_SHARED , so we test just the
2012-11-23 13:08:44 +04:00
* first .
2011-02-07 11:52:27 +03:00
*/
action = desc - > action ;
if ( ! action | | ! ( action - > flags & IRQF_SHARED ) | |
2012-11-23 13:08:44 +04:00
( action - > flags & __IRQF_TIMER ) )
2011-02-07 11:52:27 +03:00
goto out ;
2008-07-11 01:48:54 +04:00
/* Already running on another processor */
2011-03-28 16:10:52 +04:00
if ( irqd_irq_inprogress ( & desc - > irq_data ) ) {
2008-07-11 01:48:54 +04:00
/*
* Already running : If it is shared get the other
* CPU to go looking for our mystery interrupt too
*/
2011-02-08 14:17:57 +03:00
desc - > istate | = IRQS_PENDING ;
2011-02-07 11:10:39 +03:00
goto out ;
2011-02-07 11:52:27 +03:00
}
2011-02-07 11:10:39 +03:00
2011-02-07 03:29:15 +03:00
/* Mark it poll in progress */
2011-02-07 22:55:35 +03:00
desc - > istate | = IRQS_POLL_INPROGRESS ;
2011-02-07 11:10:39 +03:00
do {
2011-02-07 03:29:15 +03:00
if ( handle_irq_event ( desc ) = = IRQ_HANDLED )
ret = IRQ_HANDLED ;
2012-11-23 13:08:44 +04:00
/* Make sure that there is still a valid action */
2011-02-07 11:10:39 +03:00
action = desc - > action ;
2011-02-08 14:17:57 +03:00
} while ( ( desc - > istate & IRQS_PENDING ) & & action ) ;
2011-02-07 22:55:35 +03:00
desc - > istate & = ~ IRQS_POLL_INPROGRESS ;
2011-02-07 11:10:39 +03:00
out :
raw_spin_unlock ( & desc - > lock ) ;
2011-02-07 03:29:15 +03:00
return ret = = IRQ_HANDLED ;
2008-07-11 01:48:54 +04:00
}
static int misrouted_irq ( int irq )
{
2008-09-15 12:53:50 +04:00
struct irq_desc * desc ;
2008-10-16 11:55:00 +04:00
int i , ok = 0 ;
2008-07-11 01:48:54 +04:00
2011-11-01 23:29:44 +04:00
if ( atomic_inc_return ( & irq_poll_active ) ! = 1 )
2011-02-07 16:31:37 +03:00
goto out ;
irq_poll_cpu = smp_processor_id ( ) ;
2008-09-15 12:53:50 +04:00
for_each_irq_desc ( i , desc ) {
if ( ! i )
continue ;
2008-07-11 01:48:54 +04:00
if ( i = = irq ) /* Already tried */
continue ;
2015-06-23 21:07:35 +03:00
if ( try_one_irq ( desc , false ) )
2008-07-11 01:48:54 +04:00
ok = 1 ;
2005-06-29 07:45:18 +04:00
}
2011-02-07 16:31:37 +03:00
out :
atomic_dec ( & irq_poll_active ) ;
2005-06-29 07:45:18 +04:00
/* So the caller can adjust the irq error counts */
return ok ;
}
2009-11-04 16:22:21 +03:00
static void poll_spurious_irqs ( unsigned long dummy )
2008-07-11 01:48:54 +04:00
{
2008-09-15 12:53:50 +04:00
struct irq_desc * desc ;
2008-10-16 11:55:00 +04:00
int i ;
2008-09-15 12:53:50 +04:00
2011-02-07 16:31:37 +03:00
if ( atomic_inc_return ( & irq_poll_active ) ! = 1 )
goto out ;
irq_poll_cpu = smp_processor_id ( ) ;
2008-09-15 12:53:50 +04:00
for_each_irq_desc ( i , desc ) {
2011-02-07 22:40:54 +03:00
unsigned int state ;
2008-07-11 01:48:54 +04:00
2008-09-15 12:53:50 +04:00
if ( ! i )
continue ;
2008-07-11 01:48:54 +04:00
/* Racy but it doesn't matter */
2011-02-07 22:40:54 +03:00
state = desc - > istate ;
2008-07-11 01:48:54 +04:00
barrier ( ) ;
2011-02-07 22:40:54 +03:00
if ( ! ( state & IRQS_SPURIOUS_DISABLED ) )
2008-07-11 01:48:54 +04:00
continue ;
2009-11-07 06:16:13 +03:00
local_irq_disable ( ) ;
2015-06-23 21:07:35 +03:00
try_one_irq ( desc , true ) ;
2009-11-07 06:16:13 +03:00
local_irq_enable ( ) ;
2008-07-11 01:48:54 +04:00
}
2011-02-07 16:31:37 +03:00
out :
atomic_dec ( & irq_poll_active ) ;
2008-10-16 11:55:00 +04:00
mod_timer ( & poll_spurious_irq_timer ,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL ) ;
2008-07-11 01:48:54 +04:00
}
2011-05-31 10:56:11 +04:00
static inline int bad_action_ret ( irqreturn_t action_ret )
{
2017-02-16 07:24:09 +03:00
unsigned int r = action_ret ;
if ( likely ( r < = ( IRQ_HANDLED | IRQ_WAKE_THREAD ) ) )
2011-05-31 10:56:11 +04:00
return 0 ;
return 1 ;
}
2005-04-17 02:20:36 +04:00
/*
* If 99 , 900 of the previous 100 , 000 interrupts have not been handled
* then assume that the IRQ is stuck in some manner . Drop a diagnostic
* and try to turn the IRQ off .
*
* ( The other 100 - of - 100 , 000 interrupts may have been a correctly
* functioning device sharing an IRQ with the failing one )
*/
2015-06-23 21:02:43 +03:00
static void __report_bad_irq ( struct irq_desc * desc , irqreturn_t action_ret )
2005-04-17 02:20:36 +04:00
{
2015-06-23 21:02:43 +03:00
unsigned int irq = irq_desc_get_irq ( desc ) ;
2005-04-17 02:20:36 +04:00
struct irqaction * action ;
2011-02-07 11:05:05 +03:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
2011-05-31 10:56:11 +04:00
if ( bad_action_ret ( action_ret ) ) {
2005-04-17 02:20:36 +04:00
printk ( KERN_ERR " irq event %d: bogus return value %x \n " ,
irq , action_ret ) ;
} else {
2005-06-29 07:45:18 +04:00
printk ( KERN_ERR " irq %d: nobody cared (try booting with "
" the \" irqpoll \" option) \n " , irq ) ;
2005-04-17 02:20:36 +04:00
}
dump_stack ( ) ;
printk ( KERN_ERR " handlers: \n " ) ;
2006-06-29 13:24:40 +04:00
2011-02-07 11:05:05 +03:00
/*
* We need to take desc - > lock here . note_interrupt ( ) is called
* w / o desc - > lock held , but IRQ_PROGRESS set . We might race
* with something else removing an action . It ' s ok to take
* desc - > lock here . See synchronize_irq ( ) .
*/
raw_spin_lock_irqsave ( & desc - > lock , flags ) ;
2016-01-14 12:54:13 +03:00
for_each_action_of_desc ( desc , action ) {
2011-05-31 10:56:10 +04:00
printk ( KERN_ERR " [<%p>] %pf " , action - > handler , action - > handler ) ;
if ( action - > thread_fn )
printk ( KERN_CONT " threaded [<%p>] %pf " ,
action - > thread_fn , action - > thread_fn ) ;
printk ( KERN_CONT " \n " ) ;
2005-04-17 02:20:36 +04:00
}
2011-02-07 11:05:05 +03:00
raw_spin_unlock_irqrestore ( & desc - > lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
2015-06-23 21:02:43 +03:00
static void report_bad_irq ( struct irq_desc * desc , irqreturn_t action_ret )
2005-04-17 02:20:36 +04:00
{
static int count = 100 ;
if ( count > 0 ) {
count - - ;
2015-06-23 21:02:43 +03:00
__report_bad_irq ( desc , action_ret ) ;
2005-04-17 02:20:36 +04:00
}
}
2008-10-16 11:55:00 +04:00
static inline int
try_misrouted_irq ( unsigned int irq , struct irq_desc * desc ,
irqreturn_t action_ret )
2007-05-24 19:37:14 +04:00
{
struct irqaction * action ;
if ( ! irqfixup )
return 0 ;
/* We didn't actually handle the IRQ - see if it was misrouted? */
if ( action_ret = = IRQ_NONE )
return 1 ;
/*
* But for ' irqfixup = = 2 ' we also do it for handled interrupts if
* they are marked as IRQF_IRQPOLL ( or for irq zero , which is the
* traditional PC timer interrupt . . Legacy )
*/
if ( irqfixup < 2 )
return 0 ;
if ( ! irq )
return 1 ;
/*
* Since we don ' t get the descriptor lock , " action " can
* change under us . We don ' t really care , but we don ' t
* want to follow a NULL pointer . So tell the compiler to
* just load it once by using a barrier .
*/
action = desc - > action ;
barrier ( ) ;
return action & & ( action - > flags & IRQF_IRQPOLL ) ;
}
2013-03-07 17:53:45 +04:00
# define SPURIOUS_DEFERRED 0x80000000
2015-06-04 07:13:28 +03:00
void note_interrupt ( struct irq_desc * desc , irqreturn_t action_ret )
2005-04-17 02:20:36 +04:00
{
2015-06-04 07:13:28 +03:00
unsigned int irq ;
2013-11-06 15:30:07 +04:00
if ( desc - > istate & IRQS_POLL_INPROGRESS | |
irq_settings_is_polled ( desc ) )
2011-02-07 12:34:30 +03:00
return ;
2011-05-31 10:56:11 +04:00
if ( bad_action_ret ( action_ret ) ) {
2015-06-23 21:02:43 +03:00
report_bad_irq ( desc , action_ret ) ;
2011-05-31 10:56:11 +04:00
return ;
}
2013-03-07 17:53:45 +04:00
/*
* We cannot call note_interrupt from the threaded handler
* because we need to look at the compound of all handlers
* ( primary and threaded ) . Aside of that in the threaded
* shared case we have no serialization against an incoming
* hardware interrupt while we are dealing with a threaded
* result .
*
* So in case a thread is woken , we just note the fact and
* defer the analysis to the next hardware interrupt .
*
* The threaded handlers store whether they sucessfully
* handled an interrupt and we check whether that number
* changed versus the last invocation .
*
* We could handle all interrupts with the delayed by one
* mechanism , but for the non forced threaded case we ' d just
* add pointless overhead to the straight hardirq interrupts
* for the sake of a few lines less code .
*/
if ( action_ret & IRQ_WAKE_THREAD ) {
/*
* There is a thread woken . Check whether one of the
* shared primary handlers returned IRQ_HANDLED . If
* not we defer the spurious detection to the next
* interrupt .
*/
if ( action_ret = = IRQ_WAKE_THREAD ) {
int handled ;
/*
* We use bit 31 of thread_handled_last to
* denote the deferred spurious detection
* active . No locking necessary as
* thread_handled_last is only accessed here
* and we have the guarantee that hard
* interrupts are not reentrant .
*/
if ( ! ( desc - > threads_handled_last & SPURIOUS_DEFERRED ) ) {
desc - > threads_handled_last | = SPURIOUS_DEFERRED ;
return ;
}
/*
* Check whether one of the threaded handlers
* returned IRQ_HANDLED since the last
* interrupt happened .
*
* For simplicity we just set bit 31 , as it is
* set in threads_handled_last as well . So we
* avoid extra masking . And we really do not
* care about the high bits of the handled
* count . We just care about the count being
* different than the one we saw before .
*/
handled = atomic_read ( & desc - > threads_handled ) ;
handled | = SPURIOUS_DEFERRED ;
if ( handled ! = desc - > threads_handled_last ) {
action_ret = IRQ_HANDLED ;
/*
* Note : We keep the SPURIOUS_DEFERRED
* bit set . We are handling the
* previous invocation right now .
* Keep it for the current one , so the
* next hardware interrupt will
* account for it .
*/
desc - > threads_handled_last = handled ;
} else {
/*
* None of the threaded handlers felt
* responsible for the last interrupt
*
* We keep the SPURIOUS_DEFERRED bit
* set in threads_handled_last as we
* need to account for the current
* interrupt as well .
*/
action_ret = IRQ_NONE ;
}
} else {
/*
* One of the primary handlers returned
* IRQ_HANDLED . So we don ' t care about the
* threaded handlers on the same line . Clear
* the deferred detection bit .
*
* In theory we could / should check whether the
* deferred bit is set and take the result of
* the previous run into account here as
* well . But it ' s really not worth the
* trouble . If every other interrupt is
* handled we never trigger the spurious
* detector . And if this is just the one out
* of 100 k unhandled ones which is handled
* then we merily delay the spurious detection
* by one hard interrupt . Not a real problem .
*/
desc - > threads_handled_last & = ~ SPURIOUS_DEFERRED ;
}
}
2011-05-31 10:56:11 +04:00
if ( unlikely ( action_ret = = IRQ_NONE ) ) {
2007-07-16 10:40:55 +04:00
/*
* If we are seeing only the odd spurious IRQ caused by
* bus asynchronicity then don ' t eventually trigger an error ,
2009-10-28 22:11:04 +03:00
* otherwise the counter becomes a doomsday timer for otherwise
2007-07-16 10:40:55 +04:00
* working systems
*/
2008-02-14 18:36:51 +03:00
if ( time_after ( jiffies , desc - > last_unhandled + HZ / 10 ) )
2007-07-16 10:40:55 +04:00
desc - > irqs_unhandled = 1 ;
else
desc - > irqs_unhandled + + ;
desc - > last_unhandled = jiffies ;
2005-04-17 02:20:36 +04:00
}
2015-06-04 07:13:28 +03:00
irq = irq_desc_get_irq ( desc ) ;
2007-05-24 19:37:14 +04:00
if ( unlikely ( try_misrouted_irq ( irq , desc , action_ret ) ) ) {
int ok = misrouted_irq ( irq ) ;
if ( action_ret = = IRQ_NONE )
desc - > irqs_unhandled - = ok ;
2005-06-29 07:45:18 +04:00
}
2005-04-17 02:20:36 +04:00
desc - > irq_count + + ;
2006-06-23 13:05:32 +04:00
if ( likely ( desc - > irq_count < 100000 ) )
2005-04-17 02:20:36 +04:00
return ;
desc - > irq_count = 0 ;
2006-06-23 13:05:32 +04:00
if ( unlikely ( desc - > irqs_unhandled > 99900 ) ) {
2005-04-17 02:20:36 +04:00
/*
* The interrupt is stuck
*/
2015-06-23 21:02:43 +03:00
__report_bad_irq ( desc , action_ret ) ;
2005-04-17 02:20:36 +04:00
/*
* Now kill the IRQ
*/
printk ( KERN_EMERG " Disabling IRQ #%d \n " , irq ) ;
2011-02-07 22:40:54 +03:00
desc - > istate | = IRQS_SPURIOUS_DISABLED ;
2008-04-28 19:01:56 +04:00
desc - > depth + + ;
2011-02-03 14:27:44 +03:00
irq_disable ( desc ) ;
2008-07-11 01:48:54 +04:00
2008-10-16 11:55:00 +04:00
mod_timer ( & poll_spurious_irq_timer ,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL ) ;
2005-04-17 02:20:36 +04:00
}
desc - > irqs_unhandled = 0 ;
}
2012-01-13 03:02:18 +04:00
bool noirqdebug __read_mostly ;
2005-04-17 02:20:36 +04:00
2007-01-11 03:52:44 +03:00
int noirqdebug_setup ( char * str )
2005-04-17 02:20:36 +04:00
{
noirqdebug = 1 ;
printk ( KERN_INFO " IRQ lockup detection disabled \n " ) ;
2006-06-29 13:24:40 +04:00
2005-04-17 02:20:36 +04:00
return 1 ;
}
__setup ( " noirqdebug " , noirqdebug_setup ) ;
2008-01-30 15:32:48 +03:00
module_param ( noirqdebug , bool , 0644 ) ;
MODULE_PARM_DESC ( noirqdebug , " Disable irq lockup detection when true " ) ;
2005-04-17 02:20:36 +04:00
2005-06-29 07:45:18 +04:00
static int __init irqfixup_setup ( char * str )
{
irqfixup = 1 ;
printk ( KERN_WARNING " Misrouted IRQ fixup support enabled. \n " ) ;
printk ( KERN_WARNING " This may impact system performance. \n " ) ;
2006-06-29 13:24:40 +04:00
2005-06-29 07:45:18 +04:00
return 1 ;
}
__setup ( " irqfixup " , irqfixup_setup ) ;
2008-01-30 15:32:48 +03:00
module_param ( irqfixup , int , 0644 ) ;
2005-06-29 07:45:18 +04:00
static int __init irqpoll_setup ( char * str )
{
irqfixup = 2 ;
printk ( KERN_WARNING " Misrouted IRQ fixup and polling support "
" enabled \n " ) ;
printk ( KERN_WARNING " This may significantly impact system "
" performance \n " ) ;
return 1 ;
}
__setup ( " irqpoll " , irqpoll_setup ) ;