2005-06-23 22:01:16 -07:00
/*
* linux / arch / xtensa / kernel / irq . c
*
* Xtensa built - in interrupt controller and some generic functions copied
* from i386 .
*
2013-10-17 02:42:26 +04:00
* Copyright ( C ) 2002 - 2013 Tensilica , Inc .
2005-06-23 22:01:16 -07:00
* Copyright ( C ) 1992 , 1998 Linus Torvalds , Ingo Molnar
*
*
* Chris Zankel < chris @ zankel . net >
* Kevin Chea
*
*/
# include <linux/module.h>
# include <linux/seq_file.h>
# include <linux/interrupt.h>
# include <linux/irq.h>
# include <linux/kernel_stat.h>
2013-12-01 12:59:49 +04:00
# include <linux/irqchip.h>
2013-10-17 02:42:26 +04:00
# include <linux/irqchip/xtensa-mx.h>
2013-12-01 12:59:49 +04:00
# include <linux/irqchip/xtensa-pic.h>
2012-11-04 00:29:12 +04:00
# include <linux/irqdomain.h>
2012-11-04 00:30:13 +04:00
# include <linux/of.h>
2005-06-23 22:01:16 -07:00
2013-10-17 02:42:26 +04:00
# include <asm/mxregs.h>
2005-06-23 22:01:16 -07:00
# include <asm/uaccess.h>
# include <asm/platform.h>
2015-07-16 10:37:31 +03:00
DECLARE_PER_CPU ( unsigned long , nmi_count ) ;
2005-06-23 22:01:16 -07:00
2012-11-04 00:29:12 +04:00
asmlinkage void do_IRQ ( int hwirq , struct pt_regs * regs )
2005-06-23 22:01:16 -07:00
{
2013-12-01 12:59:49 +04:00
int irq = irq_find_mapping ( NULL , hwirq ) ;
2006-12-10 02:18:47 -08:00
2012-11-04 00:29:12 +04:00
if ( hwirq > = NR_IRQS ) {
2006-12-10 02:18:47 -08:00
printk ( KERN_EMERG " %s: cannot handle IRQ %d \n " ,
2012-11-04 00:29:12 +04:00
__func__ , hwirq ) ;
2006-12-10 02:18:47 -08:00
}
2005-06-23 22:01:16 -07:00
# ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
unsigned long sp ;
__asm__ __volatile__ ( " mov %0, a1 \n " : " =a " ( sp ) ) ;
sp & = THREAD_SIZE - 1 ;
if ( unlikely ( sp < ( sizeof ( thread_info ) + 1024 ) ) )
printk ( " Stack overflow in do_IRQ: %ld \n " ,
sp - sizeof ( struct thread_info ) ) ;
}
# endif
2011-02-06 22:10:52 +01:00
generic_handle_irq ( irq ) ;
2005-06-23 22:01:16 -07:00
}
2011-03-24 18:28:40 +01:00
int arch_show_interrupts ( struct seq_file * p , int prec )
2005-06-23 22:01:16 -07:00
{
2015-07-16 10:37:31 +03:00
unsigned cpu __maybe_unused ;
2013-10-17 02:42:26 +04:00
# ifdef CONFIG_SMP
show_ipi_list ( p , prec ) ;
# endif
2015-07-16 10:37:31 +03:00
# if XTENSA_FAKE_NMI
seq_printf ( p , " %*s: " , prec , " NMI " ) ;
for_each_online_cpu ( cpu )
seq_printf ( p , " %10lu " , per_cpu ( nmi_count , cpu ) ) ;
seq_puts ( p , " Non-maskable interrupts \n " ) ;
# endif
2005-06-23 22:01:16 -07:00
return 0 ;
}
2013-12-01 12:59:49 +04:00
int xtensa_irq_domain_xlate ( const u32 * intspec , unsigned int intsize ,
unsigned long int_irq , unsigned long ext_irq ,
unsigned long * out_hwirq , unsigned int * out_type )
2005-06-23 22:01:16 -07:00
{
2013-12-01 12:59:49 +04:00
if ( WARN_ON ( intsize < 1 | | intsize > 2 ) )
return - EINVAL ;
if ( intsize = = 2 & & intspec [ 1 ] = = 1 ) {
int_irq = xtensa_map_ext_irq ( ext_irq ) ;
if ( int_irq < XCHAL_NUM_INTERRUPTS )
* out_hwirq = int_irq ;
else
return - EINVAL ;
} else {
* out_hwirq = int_irq ;
}
* out_type = IRQ_TYPE_NONE ;
return 0 ;
2005-06-23 22:01:16 -07:00
}
2013-12-01 12:59:49 +04:00
int xtensa_irq_map ( struct irq_domain * d , unsigned int irq ,
2012-11-04 00:29:12 +04:00
irq_hw_number_t hw )
2005-06-23 22:01:16 -07:00
{
2013-12-01 12:59:49 +04:00
struct irq_chip * irq_chip = d - > host_data ;
2012-11-04 00:29:12 +04:00
u32 mask = 1 < < hw ;
if ( mask & XCHAL_INTTYPE_MASK_SOFTWARE ) {
2013-12-01 12:59:49 +04:00
irq_set_chip_and_handler_name ( irq , irq_chip ,
2012-11-04 00:29:12 +04:00
handle_simple_irq , " level " ) ;
irq_set_status_flags ( irq , IRQ_LEVEL ) ;
} else if ( mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE ) {
2013-12-01 12:59:49 +04:00
irq_set_chip_and_handler_name ( irq , irq_chip ,
2012-11-04 00:29:12 +04:00
handle_edge_irq , " edge " ) ;
irq_clear_status_flags ( irq , IRQ_LEVEL ) ;
} else if ( mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL ) {
2013-12-01 12:59:49 +04:00
irq_set_chip_and_handler_name ( irq , irq_chip ,
2012-11-04 00:29:12 +04:00
handle_level_irq , " level " ) ;
irq_set_status_flags ( irq , IRQ_LEVEL ) ;
} else if ( mask & XCHAL_INTTYPE_MASK_TIMER ) {
2013-12-01 12:59:49 +04:00
irq_set_chip_and_handler_name ( irq , irq_chip ,
handle_percpu_irq , " timer " ) ;
2012-11-04 00:29:12 +04:00
irq_clear_status_flags ( irq , IRQ_LEVEL ) ;
2015-06-23 01:53:05 +03:00
# ifdef XCHAL_INTTYPE_MASK_PROFILING
} else if ( mask & XCHAL_INTTYPE_MASK_PROFILING ) {
irq_set_chip_and_handler_name ( irq , irq_chip ,
handle_percpu_irq , " profiling " ) ;
irq_set_status_flags ( irq , IRQ_LEVEL ) ;
# endif
2012-11-04 00:29:12 +04:00
} else { /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
/* XCHAL_INTTYPE_MASK_NMI */
2013-12-01 12:59:49 +04:00
irq_set_chip_and_handler_name ( irq , irq_chip ,
2012-11-04 00:29:12 +04:00
handle_level_irq , " level " ) ;
irq_set_status_flags ( irq , IRQ_LEVEL ) ;
}
return 0 ;
}
2005-06-23 22:01:16 -07:00
2013-12-01 12:59:49 +04:00
unsigned xtensa_map_ext_irq ( unsigned ext_irq )
2012-11-04 00:29:12 +04:00
{
unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL ;
unsigned i ;
2006-12-10 02:18:47 -08:00
2012-11-04 00:29:12 +04:00
for ( i = 0 ; mask ; + + i , mask > > = 1 ) {
if ( ( mask & 1 ) & & ext_irq - - = = 0 )
return i ;
}
return XCHAL_NUM_INTERRUPTS ;
}
2006-12-10 02:18:47 -08:00
2013-12-01 12:04:57 +04:00
unsigned xtensa_get_ext_irq_no ( unsigned irq )
{
unsigned mask = ( XCHAL_INTTYPE_MASK_EXTERN_EDGE |
XCHAL_INTTYPE_MASK_EXTERN_LEVEL ) &
( ( 1u < < irq ) - 1 ) ;
return hweight32 ( mask ) ;
}
2012-11-04 00:29:12 +04:00
void __init init_IRQ ( void )
{
2012-11-04 00:30:13 +04:00
# ifdef CONFIG_OF
2013-12-01 12:59:49 +04:00
irqchip_init ( ) ;
2013-10-17 02:42:26 +04:00
# else
# ifdef CONFIG_HAVE_SMP
xtensa_mx_init_legacy ( NULL ) ;
2012-11-04 00:30:13 +04:00
# else
2013-12-01 12:59:49 +04:00
xtensa_pic_init_legacy ( NULL ) ;
2013-10-17 02:42:26 +04:00
# endif
# endif
# ifdef CONFIG_SMP
ipi_init ( ) ;
2012-11-04 00:30:13 +04:00
# endif
2009-05-05 15:03:21 +00:00
variant_init_irq ( ) ;
2005-06-23 22:01:16 -07:00
}
2013-10-17 02:42:28 +04:00
# ifdef CONFIG_HOTPLUG_CPU
/*
* The CPU has been marked offline . Migrate IRQs off this CPU . If
* the affinity settings do not allow other CPUs , force them onto any
* available CPU .
*/
void migrate_irqs ( void )
{
unsigned int i , cpu = smp_processor_id ( ) ;
2014-02-23 21:40:10 +00:00
for_each_active_irq ( i ) {
struct irq_data * data = irq_get_irq_data ( i ) ;
2015-07-13 20:53:10 +00:00
struct cpumask * mask ;
2013-10-17 02:42:28 +04:00
unsigned int newcpu ;
if ( irqd_is_per_cpu ( data ) )
continue ;
2015-07-13 20:53:10 +00:00
mask = irq_data_get_affinity_mask ( data ) ;
if ( ! cpumask_test_cpu ( cpu , mask ) )
2013-10-17 02:42:28 +04:00
continue ;
2015-07-13 20:53:10 +00:00
newcpu = cpumask_any_and ( mask , cpu_online_mask ) ;
2013-10-17 02:42:28 +04:00
if ( newcpu > = nr_cpu_ids ) {
pr_info_ratelimited ( " IRQ%u no longer affine to CPU%u \n " ,
i , cpu ) ;
2015-07-13 20:53:10 +00:00
cpumask_setall ( mask ) ;
2013-10-17 02:42:28 +04:00
}
2015-07-13 20:53:10 +00:00
irq_set_affinity ( i , mask ) ;
2013-10-17 02:42:28 +04:00
}
}
# endif /* CONFIG_HOTPLUG_CPU */