2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 by Waldorf GMBH , written by Ralf Baechle
* Copyright ( C ) 1995 , 96 , 97 , 98 , 99 , 2000 , 01 , 02 , 03 by Ralf Baechle
*/
# ifndef _ASM_IRQ_H
# define _ASM_IRQ_H
# include <linux/linkage.h>
2006-04-05 12:45:45 +04:00
# include <asm/mipsmtregs.h>
2005-04-17 02:20:36 +04:00
# include <irq.h>
# ifdef CONFIG_I8259
static inline int irq_canonicalize ( int irq )
{
2007-01-14 17:41:42 +03:00
return ( ( irq = = I8259A_IRQ_BASE + 2 ) ? I8259A_IRQ_BASE + 9 : irq ) ;
2005-04-17 02:20:36 +04:00
}
# else
# define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
# endif
2007-09-21 20:13:55 +04:00
# ifdef CONFIG_MIPS_MT_SMTC
struct irqaction ;
extern unsigned long irq_hwmask [ ] ;
extern int setup_irq_smtc ( unsigned int irq , struct irqaction * new ,
unsigned long hwmask ) ;
static inline void smtc_im_ack_irq ( unsigned int irq )
{
if ( irq_hwmask [ irq ] & ST0_IM )
set_c0_status ( irq_hwmask [ irq ] & ST0_IM ) ;
}
# else
static inline void smtc_im_ack_irq ( unsigned int irq )
{
}
# endif /* CONFIG_MIPS_MT_SMTC */
2007-08-03 21:38:03 +04:00
# ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
# include <linux/cpumask.h>
2008-12-13 13:50:26 +03:00
extern void plat_set_irq_affinity ( unsigned int irq ,
const struct cpumask * affinity ) ;
2007-08-03 21:38:03 +04:00
extern void smtc_forward_irq ( unsigned int irq ) ;
/*
* IRQ affinity hook invoked at the beginning of interrupt dispatch
* if option is enabled .
*
* Up through Linux 2.6 .22 ( at least ) cpumask operations are very
* inefficient on MIPS . Initial prototypes of SMTC IRQ affinity
* used a " fast path " per - IRQ - descriptor cache of affinity information
* to reduce latency . As there is a project afoot to optimize the
* cpumask implementations , this version is optimistically assuming
* that cpumask . h macro overhead is reasonable during interrupt dispatch .
*/
# define IRQ_AFFINITY_HOOK(irq) \
do { \
2009-01-13 02:27:13 +03:00
if ( ! cpumask_test_cpu ( smp_processor_id ( ) , irq_desc [ irq ] . affinity ) ) { \
2007-08-03 21:38:03 +04:00
smtc_forward_irq ( irq ) ; \
irq_exit ( ) ; \
return ; \
} \
} while ( 0 )
# else /* Not doing SMTC affinity */
# define IRQ_AFFINITY_HOOK(irq) do { } while (0)
# endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
2007-07-12 19:21:08 +04:00
# ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
2007-09-21 20:13:55 +04:00
2006-04-05 12:45:45 +04:00
/*
* Clear interrupt mask handling " backstop " if irq_hwmask
* entry so indicates . This implies that the ack ( ) or end ( )
* functions will take over re - enabling the low - level mask .
* Otherwise it will be done on return from exception .
*/
2007-01-07 18:50:34 +03:00
# define __DO_IRQ_SMTC_HOOK(irq) \
2006-04-05 12:45:45 +04:00
do { \
2007-08-03 21:38:03 +04:00
IRQ_AFFINITY_HOOK ( irq ) ; \
2006-04-05 12:45:45 +04:00
if ( irq_hwmask [ irq ] & 0x0000ff00 ) \
write_c0_tccontext ( read_c0_tccontext ( ) & \
2007-08-03 21:38:03 +04:00
~ ( irq_hwmask [ irq ] & 0x0000ff00 ) ) ; \
} while ( 0 )
# define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \
do { \
if ( irq_hwmask [ irq ] & 0x0000ff00 ) \
write_c0_tccontext ( read_c0_tccontext ( ) & \
~ ( irq_hwmask [ irq ] & 0x0000ff00 ) ) ; \
2006-04-05 12:45:45 +04:00
} while ( 0 )
2007-08-03 21:38:03 +04:00
2006-04-05 12:45:45 +04:00
# else
2007-09-21 20:13:55 +04:00
2007-08-03 21:38:03 +04:00
# define __DO_IRQ_SMTC_HOOK(irq) \
do { \
IRQ_AFFINITY_HOOK ( irq ) ; \
} while ( 0 )
# define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0)
2006-04-05 12:45:45 +04:00
# endif
2005-04-17 02:20:36 +04:00
/*
* do_IRQ handles all normal device IRQ ' s ( the special
* SMP cross - CPU interrupts have their own specific
* handlers ) .
*
* Ideally there should be away to get this into kernel / irq / handle . c to
* avoid the overhead of a call for just a tiny function . . .
*/
2006-10-07 22:44:33 +04:00
# define do_IRQ(irq) \
2005-04-17 02:20:36 +04:00
do { \
irq_enter ( ) ; \
2007-01-07 18:50:34 +03:00
__DO_IRQ_SMTC_HOOK ( irq ) ; \
2006-11-13 19:13:18 +03:00
generic_handle_irq ( irq ) ; \
2005-04-17 02:20:36 +04:00
irq_exit ( ) ; \
} while ( 0 )
2007-08-03 21:38:03 +04:00
# ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
/*
* To avoid inefficient and in some cases pathological re - checking of
* IRQ affinity , we have this variant that skips the affinity check .
*/
# define do_IRQ_no_affinity(irq) \
do { \
irq_enter ( ) ; \
__NO_AFFINITY_IRQ_SMTC_HOOK ( irq ) ; \
generic_handle_irq ( irq ) ; \
irq_exit ( ) ; \
} while ( 0 )
# endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
2005-04-17 02:20:36 +04:00
extern void arch_init_irq ( void ) ;
2006-10-07 22:44:33 +04:00
extern void spurious_interrupt ( void ) ;
2005-04-17 02:20:36 +04:00
2006-11-06 20:41:06 +03:00
extern int allocate_irqno ( void ) ;
extern void alloc_legacy_irqno ( void ) ;
extern void free_irqno ( unsigned int irq ) ;
2007-06-21 01:27:10 +04:00
/*
* Before R2 the timer and performance counter interrupts were both fixed to
* IE7 . Since R2 their number has to be read from the c0_intctl register .
*/
# define CP0_LEGACY_COMPARE_IRQ 7
extern int cp0_compare_irq ;
extern int cp0_perfcount_irq ;
2005-04-17 02:20:36 +04:00
# endif /* _ASM_IRQ_H */