2006-03-25 14:07:36 +03:00
2006-04-11 09:54:04 +04:00
# include <linux/irq.h>
2009-04-28 04:59:53 +04:00
# include <linux/interrupt.h>
# include "internals.h"
2006-03-25 14:07:36 +03:00
2006-10-04 13:16:29 +04:00
void move_masked_irq ( int irq )
2006-03-25 14:07:36 +03:00
{
2008-08-20 07:50:05 +04:00
struct irq_desc * desc = irq_to_desc ( irq ) ;
2006-03-25 14:07:36 +03:00
2006-10-04 13:16:27 +04:00
if ( likely ( ! ( desc - > status & IRQ_MOVE_PENDING ) ) )
2006-03-25 14:07:36 +03:00
return ;
2006-03-25 14:07:37 +03:00
/*
* Paranoia : cpu - local interrupts shouldn ' t be calling in here anyway .
*/
if ( CHECK_IRQ_PER_CPU ( desc - > status ) ) {
WARN_ON ( 1 ) ;
return ;
}
2006-10-04 13:16:27 +04:00
desc - > status & = ~ IRQ_MOVE_PENDING ;
2006-03-25 14:07:36 +03:00
2009-01-11 08:58:08 +03:00
if ( unlikely ( cpumask_empty ( desc - > pending_mask ) ) )
2006-03-25 14:07:36 +03:00
return ;
2010-10-01 14:58:38 +04:00
if ( ! desc - > irq_data . chip - > set_affinity )
2006-03-25 14:07:36 +03:00
return ;
2009-11-17 18:46:45 +03:00
assert_raw_spin_locked ( & desc - > lock ) ;
2006-03-25 14:07:37 +03:00
2006-03-25 14:07:36 +03:00
/*
* If there was a valid mask to work with , please
* do the disable , re - program , enable sequence .
* This is * not * particularly important for level triggered
* but in a edge trigger case , we might be setting rte
* when an active trigger is comming in . This could
* cause some ioapics to mal - function .
* Being paranoid i guess !
2006-10-04 13:16:29 +04:00
*
* For correct operation this depends on the caller
* masking the irqs .
2006-03-25 14:07:36 +03:00
*/
2009-01-11 08:58:08 +03:00
if ( likely ( cpumask_any_and ( desc - > pending_mask , cpu_online_mask )
2009-04-28 04:59:53 +04:00
< nr_cpu_ids ) )
2010-10-01 14:58:38 +04:00
if ( ! desc - > irq_data . chip - > set_affinity ( irq , desc - > pending_mask ) ) {
cpumask_copy ( desc - > irq_data . affinity , desc - > pending_mask ) ;
2009-07-21 13:09:39 +04:00
irq_set_thread_affinity ( desc ) ;
2009-04-28 04:59:53 +04:00
}
2009-01-11 08:58:08 +03:00
cpumask_clear ( desc - > pending_mask ) ;
2006-03-25 14:07:36 +03:00
}
2006-10-04 13:16:29 +04:00
void move_native_irq ( int irq )
{
2008-08-20 07:50:05 +04:00
struct irq_desc * desc = irq_to_desc ( irq ) ;
2006-10-04 13:16:29 +04:00
if ( likely ( ! ( desc - > status & IRQ_MOVE_PENDING ) ) )
return ;
2007-02-23 14:46:20 +03:00
if ( unlikely ( desc - > status & IRQ_DISABLED ) )
return ;
2006-10-04 13:16:29 +04:00
2010-09-27 16:44:42 +04:00
desc - > irq_data . chip - > irq_mask ( & desc - > irq_data ) ;
2006-10-04 13:16:29 +04:00
move_masked_irq ( irq ) ;
2010-09-27 16:44:44 +04:00
desc - > irq_data . chip - > irq_unmask ( & desc - > irq_data ) ;
2006-10-04 13:16:29 +04:00
}