2006-03-25 03:07:36 -08:00
2006-04-10 22:54:04 -07:00
# include <linux/irq.h>
2009-04-27 17:59:53 -07:00
# include <linux/interrupt.h>
# include "internals.h"
2006-03-25 03:07:36 -08:00
2006-10-04 02:16:29 -07:00
void move_masked_irq ( int irq )
2006-03-25 03:07:36 -08:00
{
2008-08-19 20:50:05 -07:00
struct irq_desc * desc = irq_to_desc ( irq ) ;
2010-09-27 12:45:41 +00:00
struct irq_chip * chip = desc - > irq_data . chip ;
2006-03-25 03:07:36 -08:00
2006-10-04 02:16:27 -07:00
if ( likely ( ! ( desc - > status & IRQ_MOVE_PENDING ) ) )
2006-03-25 03:07:36 -08:00
return ;
2006-03-25 03:07:37 -08:00
/*
* Paranoia : cpu - local interrupts shouldn ' t be calling in here anyway .
*/
if ( CHECK_IRQ_PER_CPU ( desc - > status ) ) {
WARN_ON ( 1 ) ;
return ;
}
2006-10-04 02:16:27 -07:00
desc - > status & = ~ IRQ_MOVE_PENDING ;
2006-03-25 03:07:36 -08:00
2009-01-10 21:58:08 -08:00
if ( unlikely ( cpumask_empty ( desc - > pending_mask ) ) )
2006-03-25 03:07:36 -08:00
return ;
2010-09-27 12:45:41 +00:00
if ( ! chip - > irq_set_affinity )
2006-03-25 03:07:36 -08:00
return ;
2009-11-17 16:46:45 +01:00
assert_raw_spin_locked ( & desc - > lock ) ;
2006-03-25 03:07:37 -08:00
2006-03-25 03:07:36 -08:00
/*
* If there was a valid mask to work with , please
* do the disable , re - program , enable sequence .
* This is * not * particularly important for level triggered
* but in a edge trigger case , we might be setting rte
* when an active trigger is comming in . This could
* cause some ioapics to mal - function .
* Being paranoid i guess !
2006-10-04 02:16:29 -07:00
*
* For correct operation this depends on the caller
* masking the irqs .
2006-03-25 03:07:36 -08:00
*/
2009-01-10 21:58:08 -08:00
if ( likely ( cpumask_any_and ( desc - > pending_mask , cpu_online_mask )
2009-04-27 17:59:53 -07:00
< nr_cpu_ids ) )
2010-09-27 12:45:41 +00:00
if ( ! chip - > irq_set_affinity ( & desc - > irq_data ,
desc - > pending_mask , false ) ) {
2010-10-01 12:58:38 +02:00
cpumask_copy ( desc - > irq_data . affinity , desc - > pending_mask ) ;
2009-07-21 11:09:39 +02:00
irq_set_thread_affinity ( desc ) ;
2009-04-27 17:59:53 -07:00
}
2009-01-10 21:58:08 -08:00
cpumask_clear ( desc - > pending_mask ) ;
2006-03-25 03:07:36 -08:00
}
2006-10-04 02:16:29 -07:00
void move_native_irq ( int irq )
{
2008-08-19 20:50:05 -07:00
struct irq_desc * desc = irq_to_desc ( irq ) ;
2011-01-28 08:47:15 +01:00
bool masked ;
2006-10-04 02:16:29 -07:00
if ( likely ( ! ( desc - > status & IRQ_MOVE_PENDING ) ) )
return ;
2007-02-23 04:46:20 -07:00
if ( unlikely ( desc - > status & IRQ_DISABLED ) )
return ;
2006-10-04 02:16:29 -07:00
2011-01-28 08:47:15 +01:00
/*
* Be careful vs . already masked interrupts . If this is a
* threaded interrupt with ONESHOT set , we can end up with an
* interrupt storm .
*/
masked = desc - > status & IRQ_MASKED ;
if ( ! masked )
desc - > irq_data . chip - > irq_mask ( & desc - > irq_data ) ;
2006-10-04 02:16:29 -07:00
move_masked_irq ( irq ) ;
2011-01-28 08:47:15 +01:00
if ( ! masked )
desc - > irq_data . chip - > irq_unmask ( & desc - > irq_data ) ;
2006-10-04 02:16:29 -07:00
}