2005-06-23 22:01:16 -07:00
/*
* linux / arch / xtensa / kernel / irq . c
*
* Xtensa built - in interrupt controller and some generic functions copied
* from i386 .
*
2006-12-10 02:18:47 -08:00
* Copyright ( C ) 2002 - 2006 Tensilica , Inc .
2005-06-23 22:01:16 -07:00
* Copyright ( C ) 1992 , 1998 Linus Torvalds , Ingo Molnar
*
*
* Chris Zankel < chris @ zankel . net >
* Kevin Chea
*
*/
# include <linux/module.h>
# include <linux/seq_file.h>
# include <linux/interrupt.h>
# include <linux/irq.h>
# include <linux/kernel_stat.h>
2012-11-04 00:29:12 +04:00
# include <linux/irqdomain.h>
2005-06-23 22:01:16 -07:00
# include <asm/uaccess.h>
# include <asm/platform.h>
static unsigned int cached_irq_mask ;
atomic_t irq_err_count ;
2012-11-04 00:29:12 +04:00
static struct irq_domain * root_domain ;
2005-06-23 22:01:16 -07:00
/*
* do_IRQ handles all normal device IRQ ' s ( the special
* SMP cross - CPU interrupts have their own specific
* handlers ) .
*/
2012-11-04 00:29:12 +04:00
asmlinkage void do_IRQ ( int hwirq , struct pt_regs * regs )
2005-06-23 22:01:16 -07:00
{
2006-12-10 02:18:47 -08:00
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2012-11-04 00:29:12 +04:00
int irq = irq_find_mapping ( root_domain , hwirq ) ;
2006-12-10 02:18:47 -08:00
2012-11-04 00:29:12 +04:00
if ( hwirq > = NR_IRQS ) {
2006-12-10 02:18:47 -08:00
printk ( KERN_EMERG " %s: cannot handle IRQ %d \n " ,
2012-11-04 00:29:12 +04:00
__func__ , hwirq ) ;
2006-12-10 02:18:47 -08:00
}
2005-06-23 22:01:16 -07:00
irq_enter ( ) ;
# ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
unsigned long sp ;
__asm__ __volatile__ ( " mov %0, a1 \n " : " =a " ( sp ) ) ;
sp & = THREAD_SIZE - 1 ;
if ( unlikely ( sp < ( sizeof ( thread_info ) + 1024 ) ) )
printk ( " Stack overflow in do_IRQ: %ld \n " ,
sp - sizeof ( struct thread_info ) ) ;
}
# endif
2011-02-06 22:10:52 +01:00
generic_handle_irq ( irq ) ;
2005-06-23 22:01:16 -07:00
irq_exit ( ) ;
2006-12-10 02:18:47 -08:00
set_irq_regs ( old_regs ) ;
2005-06-23 22:01:16 -07:00
}
2011-03-24 18:28:40 +01:00
int arch_show_interrupts ( struct seq_file * p , int prec )
2005-06-23 22:01:16 -07:00
{
2011-03-24 18:28:40 +01:00
seq_printf ( p , " %*s: " , prec , " ERR " ) ;
seq_printf ( p , " %10u \n " , atomic_read ( & irq_err_count ) ) ;
2005-06-23 22:01:16 -07:00
return 0 ;
}
2011-04-19 22:52:58 +02:00
static void xtensa_irq_mask ( struct irq_data * d )
2005-06-23 22:01:16 -07:00
{
2012-11-04 00:29:12 +04:00
cached_irq_mask & = ~ ( 1 < < d - > hwirq ) ;
2012-10-15 03:55:38 +04:00
set_sr ( cached_irq_mask , intenable ) ;
2005-06-23 22:01:16 -07:00
}
2011-04-19 22:52:58 +02:00
static void xtensa_irq_unmask ( struct irq_data * d )
2005-06-23 22:01:16 -07:00
{
2012-11-04 00:29:12 +04:00
cached_irq_mask | = 1 < < d - > hwirq ;
2012-10-15 03:55:38 +04:00
set_sr ( cached_irq_mask , intenable ) ;
2005-06-23 22:01:16 -07:00
}
2011-04-19 22:52:58 +02:00
static void xtensa_irq_enable ( struct irq_data * d )
2009-03-04 16:21:31 +01:00
{
2012-11-04 00:29:12 +04:00
variant_irq_enable ( d - > hwirq ) ;
2012-09-17 05:44:34 +04:00
xtensa_irq_unmask ( d ) ;
2009-03-04 16:21:31 +01:00
}
2011-04-19 22:52:58 +02:00
static void xtensa_irq_disable ( struct irq_data * d )
2009-03-04 16:21:31 +01:00
{
2012-09-17 05:44:34 +04:00
xtensa_irq_mask ( d ) ;
2012-11-04 00:29:12 +04:00
variant_irq_disable ( d - > hwirq ) ;
2009-03-04 16:21:31 +01:00
}
2011-04-19 22:52:58 +02:00
static void xtensa_irq_ack ( struct irq_data * d )
2005-06-23 22:01:16 -07:00
{
2012-11-04 00:29:12 +04:00
set_sr ( 1 < < d - > hwirq , intclear ) ;
2005-06-23 22:01:16 -07:00
}
2011-04-19 22:52:58 +02:00
static int xtensa_irq_retrigger ( struct irq_data * d )
2005-06-23 22:01:16 -07:00
{
2012-11-04 00:29:12 +04:00
set_sr ( 1 < < d - > hwirq , intset ) ;
2006-12-10 02:18:47 -08:00
return 1 ;
2005-06-23 22:01:16 -07:00
}
2006-12-10 02:18:47 -08:00
static struct irq_chip xtensa_irq_chip = {
. name = " xtensa " ,
2011-02-06 22:10:52 +01:00
. irq_enable = xtensa_irq_enable ,
. irq_disable = xtensa_irq_disable ,
. irq_mask = xtensa_irq_mask ,
. irq_unmask = xtensa_irq_unmask ,
. irq_ack = xtensa_irq_ack ,
. irq_retrigger = xtensa_irq_retrigger ,
2006-12-10 02:18:47 -08:00
} ;
2005-06-23 22:01:16 -07:00
2012-11-04 00:29:12 +04:00
static int xtensa_irq_map ( struct irq_domain * d , unsigned int irq ,
irq_hw_number_t hw )
2005-06-23 22:01:16 -07:00
{
2012-11-04 00:29:12 +04:00
u32 mask = 1 < < hw ;
if ( mask & XCHAL_INTTYPE_MASK_SOFTWARE ) {
irq_set_chip_and_handler_name ( irq , & xtensa_irq_chip ,
handle_simple_irq , " level " ) ;
irq_set_status_flags ( irq , IRQ_LEVEL ) ;
} else if ( mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE ) {
irq_set_chip_and_handler_name ( irq , & xtensa_irq_chip ,
handle_edge_irq , " edge " ) ;
irq_clear_status_flags ( irq , IRQ_LEVEL ) ;
} else if ( mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL ) {
irq_set_chip_and_handler_name ( irq , & xtensa_irq_chip ,
handle_level_irq , " level " ) ;
irq_set_status_flags ( irq , IRQ_LEVEL ) ;
} else if ( mask & XCHAL_INTTYPE_MASK_TIMER ) {
irq_set_chip_and_handler_name ( irq , & xtensa_irq_chip ,
handle_edge_irq , " edge " ) ;
irq_clear_status_flags ( irq , IRQ_LEVEL ) ;
} else { /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
/* XCHAL_INTTYPE_MASK_NMI */
irq_set_chip_and_handler_name ( irq , & xtensa_irq_chip ,
handle_level_irq , " level " ) ;
irq_set_status_flags ( irq , IRQ_LEVEL ) ;
}
return 0 ;
}
2005-06-23 22:01:16 -07:00
2012-11-04 00:29:12 +04:00
static unsigned map_ext_irq ( unsigned ext_irq )
{
unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL ;
unsigned i ;
2006-12-10 02:18:47 -08:00
2012-11-04 00:29:12 +04:00
for ( i = 0 ; mask ; + + i , mask > > = 1 ) {
if ( ( mask & 1 ) & & ext_irq - - = = 0 )
return i ;
}
return XCHAL_NUM_INTERRUPTS ;
}
2006-12-10 02:18:47 -08:00
2012-11-04 00:29:12 +04:00
/*
* Device Tree IRQ specifier translation function which works with one or
* two cell bindings . First cell value maps directly to the hwirq number .
* Second cell if present specifies whether hwirq number is external ( 1 ) or
* internal ( 0 ) .
*/
int xtensa_irq_domain_xlate ( struct irq_domain * d , struct device_node * ctrlr ,
const u32 * intspec , unsigned int intsize ,
unsigned long * out_hwirq , unsigned int * out_type )
{
if ( WARN_ON ( intsize < 1 | | intsize > 2 ) )
return - EINVAL ;
if ( intsize = = 2 & & intspec [ 1 ] = = 1 ) {
unsigned int_irq = map_ext_irq ( intspec [ 0 ] ) ;
if ( int_irq < XCHAL_NUM_INTERRUPTS )
* out_hwirq = int_irq ;
else
return - EINVAL ;
} else {
* out_hwirq = intspec [ 0 ] ;
}
* out_type = IRQ_TYPE_NONE ;
return 0 ;
}
2006-12-10 02:18:47 -08:00
2012-11-04 00:29:12 +04:00
static const struct irq_domain_ops xtensa_irq_domain_ops = {
. xlate = xtensa_irq_domain_xlate ,
. map = xtensa_irq_map ,
} ;
2006-12-10 02:18:47 -08:00
2012-11-04 00:29:12 +04:00
void __init init_IRQ ( void )
{
struct device_node * intc = NULL ;
2006-12-10 02:18:47 -08:00
cached_irq_mask = 0 ;
2012-11-04 00:29:12 +04:00
set_sr ( ~ 0 , intclear ) ;
root_domain = irq_domain_add_legacy ( intc , NR_IRQS , 0 , 0 ,
& xtensa_irq_domain_ops , NULL ) ;
irq_set_default_host ( root_domain ) ;
2009-05-05 15:03:21 +00:00
variant_init_irq ( ) ;
2005-06-23 22:01:16 -07:00
}