2008-02-08 04:19:31 -08:00
/* MN10300 Arch-specific interrupt handling
*
* Copyright ( C ) 2007 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation ; either version
* 2 of the Licence , or ( at your option ) any later version .
*/
# include <linux/module.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
# include <linux/seq_file.h>
2010-10-27 17:28:55 +01:00
# include <linux/cpumask.h>
2008-02-08 04:19:31 -08:00
# include <asm/setup.h>
2010-10-27 17:28:55 +01:00
# include <asm/serial-regs.h>
2008-02-08 04:19:31 -08:00
2010-10-27 17:28:55 +01:00
unsigned long __mn10300_irq_enabled_epsw [ NR_CPUS ] __cacheline_aligned_in_smp = {
[ 0 . . . NR_CPUS - 1 ] = EPSW_IE | EPSW_IM_7
} ;
2008-02-08 04:19:31 -08:00
EXPORT_SYMBOL ( __mn10300_irq_enabled_epsw ) ;
2010-10-27 17:28:55 +01:00
# ifdef CONFIG_SMP
static char irq_affinity_online [ NR_IRQS ] = {
[ 0 . . . NR_IRQS - 1 ] = 0
} ;
# define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
static unsigned long irq_affinity_request [ NR_IRQ_WORDS ] = {
[ 0 . . . NR_IRQ_WORDS - 1 ] = 0
} ;
# endif /* CONFIG_SMP */
2008-02-08 04:19:31 -08:00
atomic_t irq_err_count ;
/*
2008-10-01 13:47:06 +01:00
* MN10300 interrupt controller operations
2008-02-08 04:19:31 -08:00
*/
2011-03-18 16:52:51 +00:00
static void mn10300_cpupic_ack ( struct irq_data * d )
2008-02-08 04:19:31 -08:00
{
2011-03-18 16:52:51 +00:00
unsigned int irq = d - > irq ;
2010-10-27 17:28:55 +01:00
unsigned long flags ;
u16 tmp ;
flags = arch_local_cli_save ( ) ;
GxICR_u8 ( irq ) = GxICR_DETECT ;
tmp = GxICR ( irq ) ;
arch_local_irq_restore ( flags ) ;
}
static void __mask_and_set_icr ( unsigned int irq ,
unsigned int mask , unsigned int set )
{
unsigned long flags ;
2008-02-08 04:19:31 -08:00
u16 tmp ;
2010-10-27 17:28:55 +01:00
flags = arch_local_cli_save ( ) ;
2008-02-08 04:19:31 -08:00
tmp = GxICR ( irq ) ;
2010-10-27 17:28:55 +01:00
GxICR ( irq ) = ( tmp & mask ) | set ;
tmp = GxICR ( irq ) ;
arch_local_irq_restore ( flags ) ;
2008-02-08 04:19:31 -08:00
}
2011-03-18 16:52:51 +00:00
static void mn10300_cpupic_mask ( struct irq_data * d )
2008-02-08 04:19:31 -08:00
{
2011-03-18 16:52:51 +00:00
__mask_and_set_icr ( d - > irq , GxICR_LEVEL , 0 ) ;
2008-02-08 04:19:31 -08:00
}
2011-03-18 16:52:51 +00:00
static void mn10300_cpupic_mask_ack ( struct irq_data * d )
2008-02-08 04:19:31 -08:00
{
2011-03-18 16:52:51 +00:00
unsigned int irq = d - > irq ;
2010-10-27 17:28:55 +01:00
# ifdef CONFIG_SMP
unsigned long flags ;
u16 tmp ;
flags = arch_local_cli_save ( ) ;
if ( ! test_and_clear_bit ( irq , irq_affinity_request ) ) {
tmp = GxICR ( irq ) ;
GxICR ( irq ) = ( tmp & GxICR_LEVEL ) | GxICR_DETECT ;
tmp = GxICR ( irq ) ;
} else {
u16 tmp2 ;
tmp = GxICR ( irq ) ;
GxICR ( irq ) = ( tmp & GxICR_LEVEL ) ;
tmp2 = GxICR ( irq ) ;
2010-10-27 17:28:57 +01:00
irq_affinity_online [ irq ] =
2011-05-24 17:12:58 -07:00
cpumask_any_and ( d - > affinity , cpu_online_mask ) ;
2010-10-27 17:28:57 +01:00
CROSS_GxICR ( irq , irq_affinity_online [ irq ] ) =
( tmp & ( GxICR_LEVEL | GxICR_ENABLE ) ) | GxICR_DETECT ;
tmp = CROSS_GxICR ( irq , irq_affinity_online [ irq ] ) ;
2010-10-27 17:28:55 +01:00
}
arch_local_irq_restore ( flags ) ;
# else /* CONFIG_SMP */
__mask_and_set_icr ( irq , GxICR_LEVEL , GxICR_DETECT ) ;
# endif /* CONFIG_SMP */
2008-02-08 04:19:31 -08:00
}
2011-03-18 16:52:51 +00:00
static void mn10300_cpupic_unmask ( struct irq_data * d )
2008-02-08 04:19:31 -08:00
{
2011-03-18 16:52:51 +00:00
__mask_and_set_icr ( d - > irq , GxICR_LEVEL , GxICR_ENABLE ) ;
2008-02-08 04:19:31 -08:00
}
2011-03-18 16:52:51 +00:00
static void mn10300_cpupic_unmask_clear ( struct irq_data * d )
2008-02-08 04:19:31 -08:00
{
2011-03-18 16:52:51 +00:00
unsigned int irq = d - > irq ;
2008-10-01 13:47:06 +01:00
/* the MN10300 PIC latches its interrupt request bit, even after the
* device has ceased to assert its interrupt line and the interrupt
* channel has been disabled in the PIC , so for level - triggered
* interrupts we need to clear the request bit when we re - enable */
2010-10-27 17:28:55 +01:00
# ifdef CONFIG_SMP
unsigned long flags ;
u16 tmp ;
flags = arch_local_cli_save ( ) ;
if ( ! test_and_clear_bit ( irq , irq_affinity_request ) ) {
tmp = GxICR ( irq ) ;
GxICR ( irq ) = ( tmp & GxICR_LEVEL ) | GxICR_ENABLE | GxICR_DETECT ;
tmp = GxICR ( irq ) ;
} else {
tmp = GxICR ( irq ) ;
2011-05-24 17:12:58 -07:00
irq_affinity_online [ irq ] = cpumask_any_and ( d - > affinity ,
cpu_online_mask ) ;
2010-10-27 17:28:57 +01:00
CROSS_GxICR ( irq , irq_affinity_online [ irq ] ) = ( tmp & GxICR_LEVEL ) | GxICR_ENABLE | GxICR_DETECT ;
tmp = CROSS_GxICR ( irq , irq_affinity_online [ irq ] ) ;
2010-10-27 17:28:55 +01:00
}
arch_local_irq_restore ( flags ) ;
# else /* CONFIG_SMP */
__mask_and_set_icr ( irq , GxICR_LEVEL , GxICR_ENABLE | GxICR_DETECT ) ;
# endif /* CONFIG_SMP */
2008-02-08 04:19:31 -08:00
}
2010-10-27 17:28:55 +01:00
# ifdef CONFIG_SMP
static int
2011-03-18 16:52:51 +00:00
mn10300_cpupic_setaffinity ( struct irq_data * d , const struct cpumask * mask ,
bool force )
2010-10-27 17:28:55 +01:00
{
unsigned long flags ;
flags = arch_local_cli_save ( ) ;
2012-12-12 15:36:37 +00:00
set_bit ( d - > irq , irq_affinity_request ) ;
2010-10-27 17:28:55 +01:00
arch_local_irq_restore ( flags ) ;
2012-12-12 15:36:37 +00:00
return 0 ;
2010-10-27 17:28:55 +01:00
}
# endif /* CONFIG_SMP */
2008-10-01 13:47:06 +01:00
/*
* MN10300 PIC level - triggered IRQ handling .
*
* The PIC has no ' ACK ' function per se . It is possible to clear individual
* channel latches , but each latch relatches whether or not the channel is
* masked , so we need to clear the latch when we unmask the channel .
*
* Also for this reason , we don ' t supply an ack ( ) op ( it ' s unused anyway if
* mask_ack ( ) is provided ) , and mask_ack ( ) just masks .
*/
static struct irq_chip mn10300_cpu_pic_level = {
2011-03-18 16:52:51 +00:00
. name = " cpu_l " ,
. irq_disable = mn10300_cpupic_mask ,
. irq_enable = mn10300_cpupic_unmask_clear ,
. irq_ack = NULL ,
. irq_mask = mn10300_cpupic_mask ,
. irq_mask_ack = mn10300_cpupic_mask ,
. irq_unmask = mn10300_cpupic_unmask_clear ,
2010-10-27 17:28:55 +01:00
# ifdef CONFIG_SMP
2011-03-18 16:52:51 +00:00
. irq_set_affinity = mn10300_cpupic_setaffinity ,
2010-10-27 17:28:57 +01:00
# endif
2008-10-01 13:47:06 +01:00
} ;
/*
* MN10300 PIC edge - triggered IRQ handling .
*
* We use the latch clearing function of the PIC as the ' ACK ' function .
*/
static struct irq_chip mn10300_cpu_pic_edge = {
2011-03-18 16:52:51 +00:00
. name = " cpu_e " ,
. irq_disable = mn10300_cpupic_mask ,
. irq_enable = mn10300_cpupic_unmask ,
. irq_ack = mn10300_cpupic_ack ,
. irq_mask = mn10300_cpupic_mask ,
. irq_mask_ack = mn10300_cpupic_mask_ack ,
. irq_unmask = mn10300_cpupic_unmask ,
2010-10-27 17:28:55 +01:00
# ifdef CONFIG_SMP
2011-03-18 16:52:51 +00:00
. irq_set_affinity = mn10300_cpupic_setaffinity ,
2010-10-27 17:28:57 +01:00
# endif
2008-02-08 04:19:31 -08:00
} ;
/*
* ' what should we do if we get a hw irq event on an illegal vector ' .
* each architecture has to answer this themselves .
*/
void ack_bad_irq ( int irq )
{
printk ( KERN_WARNING " unexpected IRQ trap at vector %02x \n " , irq ) ;
}
/*
* change the level at which an IRQ executes
* - must not be called whilst interrupts are being processed !
*/
void set_intr_level ( int irq , u16 level )
{
2010-10-27 17:28:55 +01:00
BUG_ON ( in_interrupt ( ) ) ;
2008-02-08 04:19:31 -08:00
2010-10-27 17:28:55 +01:00
__mask_and_set_icr ( irq , GxICR_ENABLE , level ) ;
}
2008-02-08 04:19:31 -08:00
/*
* mark an interrupt to be ACK ' d after interrupt handlers have been run rather
* than before
*/
2010-10-27 17:28:55 +01:00
void mn10300_set_lateack_irq_type ( int irq )
2008-02-08 04:19:31 -08:00
{
2011-03-24 17:35:56 +01:00
irq_set_chip_and_handler ( irq , & mn10300_cpu_pic_level ,
2008-10-01 13:47:06 +01:00
handle_level_irq ) ;
2008-02-08 04:19:31 -08:00
}
/*
* initialise the interrupt system
*/
void __init init_IRQ ( void )
{
int irq ;
for ( irq = 0 ; irq < NR_IRQS ; irq + + )
2011-03-24 17:35:56 +01:00
if ( irq_get_chip ( irq ) = = & no_irq_chip )
2008-10-01 13:47:06 +01:00
/* due to the PIC latching interrupt requests, even
* when the IRQ is disabled , IRQ_PENDING is superfluous
* and we can use handle_level_irq ( ) for edge - triggered
* interrupts */
2011-03-24 17:35:56 +01:00
irq_set_chip_and_handler ( irq , & mn10300_cpu_pic_edge ,
2008-10-01 13:47:06 +01:00
handle_level_irq ) ;
2010-10-27 17:28:55 +01:00
2008-02-08 04:19:31 -08:00
unit_init_IRQ ( ) ;
}
/*
* handle normal device IRQs
*/
asmlinkage void do_IRQ ( void )
{
unsigned long sp , epsw , irq_disabled_epsw , old_irq_enabled_epsw ;
2010-10-27 17:28:55 +01:00
unsigned int cpu_id = smp_processor_id ( ) ;
2008-02-08 04:19:31 -08:00
int irq ;
sp = current_stack_pointer ( ) ;
2010-10-27 17:28:33 +01:00
BUG_ON ( sp - ( sp & ~ ( THREAD_SIZE - 1 ) ) < STACK_WARN ) ;
2008-02-08 04:19:31 -08:00
/* make sure local_irq_enable() doesn't muck up the interrupt priority
* setting in EPSW */
2010-10-27 17:28:55 +01:00
old_irq_enabled_epsw = __mn10300_irq_enabled_epsw [ cpu_id ] ;
2008-02-08 04:19:31 -08:00
local_save_flags ( epsw ) ;
2010-10-27 17:28:55 +01:00
__mn10300_irq_enabled_epsw [ cpu_id ] = EPSW_IE | ( EPSW_IM & epsw ) ;
2008-02-08 04:19:31 -08:00
irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL ;
2010-10-27 17:28:55 +01:00
# ifdef CONFIG_MN10300_WD_TIMER
__IRQ_STAT ( cpu_id , __irq_count ) + + ;
# endif
2008-02-08 04:19:31 -08:00
irq_enter ( ) ;
for ( ; ; ) {
/* ask the interrupt controller for the next IRQ to process
* - the result we get depends on EPSW . IM
*/
irq = IAGR & IAGR_GN ;
if ( ! irq )
break ;
local_irq_restore ( irq_disabled_epsw ) ;
generic_handle_irq ( irq > > 2 ) ;
/* restore IRQ controls for IAGR access */
local_irq_restore ( epsw ) ;
}
2010-10-27 17:28:55 +01:00
__mn10300_irq_enabled_epsw [ cpu_id ] = old_irq_enabled_epsw ;
2008-02-08 04:19:31 -08:00
irq_exit ( ) ;
}
/*
* Display interrupt management information through / proc / interrupts
*/
2011-03-24 18:54:24 +01:00
int arch_show_interrupts ( struct seq_file * p , int prec )
2008-02-08 04:19:31 -08:00
{
2010-10-27 17:28:55 +01:00
# ifdef CONFIG_MN10300_WD_TIMER
2011-03-24 18:54:24 +01:00
int j ;
2008-02-08 04:19:31 -08:00
2011-03-24 18:54:24 +01:00
seq_printf ( p , " %*s: " , prec , " NMI " ) ;
for ( j = 0 ; j < NR_CPUS ; j + + )
if ( cpu_online ( j ) )
seq_printf ( p , " %10u " , nmi_count ( j ) ) ;
seq_putc ( p , ' \n ' ) ;
# endif
2008-02-08 04:19:31 -08:00
2011-03-24 18:54:24 +01:00
seq_printf ( p , " %*s: " , prec , " ERR " ) ;
seq_printf ( p , " %10u \n " , atomic_read ( & irq_err_count ) ) ;
2008-02-08 04:19:31 -08:00
return 0 ;
}
2010-10-27 17:28:55 +01:00
# ifdef CONFIG_HOTPLUG_CPU
void migrate_irqs ( void )
{
int irq ;
unsigned int self , new ;
unsigned long flags ;
self = smp_processor_id ( ) ;
for ( irq = 0 ; irq < NR_IRQS ; irq + + ) {
2011-03-24 17:36:37 +01:00
struct irq_data * data = irq_get_irq_data ( irq ) ;
2010-10-27 17:28:55 +01:00
2011-03-24 17:36:37 +01:00
if ( irqd_is_per_cpu ( data ) )
2010-10-27 17:28:55 +01:00
continue ;
2011-05-24 17:12:58 -07:00
if ( cpumask_test_cpu ( self , & data - > affinity ) & &
! cpumask_intersects ( & irq_affinity [ irq ] , cpu_online_mask ) ) {
2010-10-27 17:28:55 +01:00
int cpu_id ;
2011-05-24 17:12:58 -07:00
cpu_id = cpumask_first ( cpu_online_mask ) ;
cpumask_set_cpu ( cpu_id , & data - > affinity ) ;
2010-10-27 17:28:55 +01:00
}
/* We need to operate irq_affinity_online atomically. */
arch_local_cli_save ( flags ) ;
if ( irq_affinity_online [ irq ] = = self ) {
u16 x , tmp ;
2010-10-27 17:28:57 +01:00
x = GxICR ( irq ) ;
GxICR ( irq ) = x & GxICR_LEVEL ;
tmp = GxICR ( irq ) ;
2010-10-27 17:28:55 +01:00
2011-05-24 17:12:58 -07:00
new = cpumask_any_and ( & data - > affinity ,
cpu_online_mask ) ;
2010-10-27 17:28:55 +01:00
irq_affinity_online [ irq ] = new ;
CROSS_GxICR ( irq , new ) =
( x & GxICR_LEVEL ) | GxICR_DETECT ;
tmp = CROSS_GxICR ( irq , new ) ;
x & = GxICR_LEVEL | GxICR_ENABLE ;
2011-01-03 14:59:11 -08:00
if ( GxICR ( irq ) & GxICR_REQUEST )
2010-10-27 17:28:55 +01:00
x | = GxICR_REQUEST | GxICR_DETECT ;
CROSS_GxICR ( irq , new ) = x ;
tmp = CROSS_GxICR ( irq , new ) ;
}
arch_local_irq_restore ( flags ) ;
}
}
# endif /* CONFIG_HOTPLUG_CPU */