2006-03-25 14:07:36 +03:00
2006-04-11 09:54:04 +04:00
# include <linux/irq.h>
2009-04-28 04:59:53 +04:00
# include <linux/interrupt.h>
# include "internals.h"
2006-03-25 14:07:36 +03:00
2011-02-04 20:46:16 +03:00
void irq_move_masked_irq ( struct irq_data * idata )
2006-03-25 14:07:36 +03:00
{
2011-02-04 20:46:16 +03:00
struct irq_desc * desc = irq_data_to_desc ( idata ) ;
struct irq_chip * chip = idata - > chip ;
2006-03-25 14:07:36 +03:00
2011-02-05 17:20:04 +03:00
if ( likely ( ! irqd_is_setaffinity_pending ( & desc - > irq_data ) ) )
2006-03-25 14:07:36 +03:00
return ;
2006-03-25 14:07:37 +03:00
/*
* Paranoia : cpu - local interrupts shouldn ' t be calling in here anyway .
*/
2011-02-08 19:11:03 +03:00
if ( ! irqd_can_balance ( & desc - > irq_data ) ) {
2006-03-25 14:07:37 +03:00
WARN_ON ( 1 ) ;
return ;
}
2011-02-05 17:20:04 +03:00
irqd_clr_move_pending ( & desc - > irq_data ) ;
2006-03-25 14:07:36 +03:00
2009-01-11 08:58:08 +03:00
if ( unlikely ( cpumask_empty ( desc - > pending_mask ) ) )
2006-03-25 14:07:36 +03:00
return ;
2010-09-27 16:45:41 +04:00
if ( ! chip - > irq_set_affinity )
2006-03-25 14:07:36 +03:00
return ;
2009-11-17 18:46:45 +03:00
assert_raw_spin_locked ( & desc - > lock ) ;
2006-03-25 14:07:37 +03:00
2006-03-25 14:07:36 +03:00
/*
* If there was a valid mask to work with , please
* do the disable , re - program , enable sequence .
* This is * not * particularly important for level triggered
* but in a edge trigger case , we might be setting rte
* when an active trigger is comming in . This could
* cause some ioapics to mal - function .
* Being paranoid i guess !
2006-10-04 13:16:29 +04:00
*
* For correct operation this depends on the caller
* masking the irqs .
2006-03-25 14:07:36 +03:00
*/
2009-01-11 08:58:08 +03:00
if ( likely ( cpumask_any_and ( desc - > pending_mask , cpu_online_mask )
2009-04-28 04:59:53 +04:00
< nr_cpu_ids ) )
2010-09-27 16:45:41 +04:00
if ( ! chip - > irq_set_affinity ( & desc - > irq_data ,
desc - > pending_mask , false ) ) {
2010-10-01 14:58:38 +04:00
cpumask_copy ( desc - > irq_data . affinity , desc - > pending_mask ) ;
2009-07-21 13:09:39 +04:00
irq_set_thread_affinity ( desc ) ;
2009-04-28 04:59:53 +04:00
}
2009-01-11 08:58:08 +03:00
cpumask_clear ( desc - > pending_mask ) ;
2006-03-25 14:07:36 +03:00
}
2006-10-04 13:16:29 +04:00
2011-02-04 20:46:16 +03:00
void irq_move_irq ( struct irq_data * idata )
2006-10-04 13:16:29 +04:00
{
2011-01-28 10:47:15 +03:00
bool masked ;
2006-10-04 13:16:29 +04:00
2011-02-04 20:46:16 +03:00
if ( likely ( ! irqd_is_setaffinity_pending ( idata ) ) )
2006-10-04 13:16:29 +04:00
return ;
2011-03-28 16:10:52 +04:00
if ( unlikely ( irqd_irq_disabled ( idata ) ) )
2007-02-23 14:46:20 +03:00
return ;
2006-10-04 13:16:29 +04:00
2011-01-28 10:47:15 +03:00
/*
* Be careful vs . already masked interrupts . If this is a
* threaded interrupt with ONESHOT set , we can end up with an
* interrupt storm .
*/
2011-03-28 16:10:52 +04:00
masked = irqd_irq_masked ( idata ) ;
2011-01-28 10:47:15 +03:00
if ( ! masked )
2011-02-04 20:46:16 +03:00
idata - > chip - > irq_mask ( idata ) ;
irq_move_masked_irq ( idata ) ;
2011-01-28 10:47:15 +03:00
if ( ! masked )
2011-02-04 20:46:16 +03:00
idata - > chip - > irq_unmask ( idata ) ;
}