2007-06-15 01:54:47 +04:00
/*
2011-01-25 11:20:10 +03:00
* Copyright 2010 PMC - Sierra , Inc , derived from irq_cpu . c
2007-06-15 01:54:47 +04:00
*
2011-01-25 11:20:10 +03:00
* This file define the irq handler for MSP CIC subsystem interrupts .
2007-06-15 01:54:47 +04:00
*
2013-01-22 15:59:30 +04:00
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the
2007-06-15 01:54:47 +04:00
* Free Software Foundation ; either version 2 of the License , or ( at your
* option ) any later version .
*/
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/bitops.h>
2010-10-07 17:08:54 +04:00
# include <linux/irq.h>
2007-06-15 01:54:47 +04:00
2011-01-25 11:20:10 +03:00
# include <asm/mipsregs.h>
2007-06-15 01:54:47 +04:00
# include <msp_cic_int.h>
# include <msp_regs.h>
/*
2011-01-25 11:20:10 +03:00
* External API
2007-06-15 01:54:47 +04:00
*/
2011-01-25 11:20:10 +03:00
extern void msp_per_irq_init ( void ) ;
extern void msp_per_irq_dispatch ( void ) ;
2007-06-15 01:54:47 +04:00
2011-01-25 11:20:10 +03:00
/*
* Convenience Macro . Should be somewhere generic .
*/
# define get_current_vpe() \
( ( read_c0_tcbind ( ) > > TCBIND_CURVPE_SHIFT ) & TCBIND_CURVPE )
# ifdef CONFIG_SMP
# define LOCK_VPE(flags, mtflags) \
do { \
local_irq_save ( flags ) ; \
mtflags = dmt ( ) ; \
} while ( 0 )
# define UNLOCK_VPE(flags, mtflags) \
do { \
emt ( mtflags ) ; \
local_irq_restore ( flags ) ; \
} while ( 0 )
# define LOCK_CORE(flags, mtflags) \
do { \
local_irq_save ( flags ) ; \
mtflags = dvpe ( ) ; \
} while ( 0 )
# define UNLOCK_CORE(flags, mtflags) \
do { \
evpe ( mtflags ) ; \
local_irq_restore ( flags ) ; \
} while ( 0 )
# else
# define LOCK_VPE(flags, mtflags)
# define UNLOCK_VPE(flags, mtflags)
# endif
/* ensure writes to cic are completed */
static inline void cic_wmb ( void )
2007-06-15 01:54:47 +04:00
{
2011-01-25 11:20:10 +03:00
const volatile void __iomem * cic_mem = CIC_VPE0_MSK_REG ;
volatile u32 dummy_read ;
2007-06-15 01:54:47 +04:00
2011-01-25 11:20:10 +03:00
wmb ( ) ;
dummy_read = __raw_readl ( cic_mem ) ;
dummy_read + + ;
2007-06-15 01:54:47 +04:00
}
2011-03-24 00:09:06 +03:00
static void unmask_cic_irq ( struct irq_data * d )
2007-06-15 01:54:47 +04:00
{
2011-01-25 11:20:10 +03:00
volatile u32 * cic_msk_reg = CIC_VPE0_MSK_REG ;
int vpe ;
# ifdef CONFIG_SMP
unsigned int mtflags ;
unsigned long flags ;
/*
* Make sure we have IRQ affinity . It may have changed while
* we were processing the IRQ .
*/
2015-07-13 23:45:59 +03:00
if ( ! cpumask_test_cpu ( smp_processor_id ( ) ,
irq_data_get_affinity_mask ( d ) ) )
2011-01-25 11:20:10 +03:00
return ;
# endif
vpe = get_current_vpe ( ) ;
LOCK_VPE ( flags , mtflags ) ;
2011-03-24 00:09:06 +03:00
cic_msk_reg [ vpe ] | = ( 1 < < ( d - > irq - MSP_CIC_INTBASE ) ) ;
2011-01-25 11:20:10 +03:00
UNLOCK_VPE ( flags , mtflags ) ;
cic_wmb ( ) ;
2007-06-15 01:54:47 +04:00
}
2011-03-24 00:09:06 +03:00
static void mask_cic_irq ( struct irq_data * d )
2007-06-15 01:54:47 +04:00
{
2011-01-25 11:20:10 +03:00
volatile u32 * cic_msk_reg = CIC_VPE0_MSK_REG ;
int vpe = get_current_vpe ( ) ;
# ifdef CONFIG_SMP
unsigned long flags , mtflags ;
# endif
LOCK_VPE ( flags , mtflags ) ;
2011-03-24 00:09:06 +03:00
cic_msk_reg [ vpe ] & = ~ ( 1 < < ( d - > irq - MSP_CIC_INTBASE ) ) ;
2011-01-25 11:20:10 +03:00
UNLOCK_VPE ( flags , mtflags ) ;
cic_wmb ( ) ;
}
2011-03-24 00:09:06 +03:00
static void msp_cic_irq_ack ( struct irq_data * d )
2011-01-25 11:20:10 +03:00
{
2011-03-24 00:09:06 +03:00
mask_cic_irq ( d ) ;
2007-06-15 01:54:47 +04:00
/*
2011-01-25 11:20:10 +03:00
* Only really necessary for 18 , 16 - 14 and sometimes 3 : 0
* ( since these can be edge sensitive ) but it doesn ' t
* hurt for the others
*/
2011-03-24 00:09:06 +03:00
* CIC_STS_REG = ( 1 < < ( d - > irq - MSP_CIC_INTBASE ) ) ;
2011-01-25 11:20:10 +03:00
}
2014-05-23 18:29:44 +04:00
/* Note: Limiting to VSMP. */
2011-01-25 11:20:10 +03:00
# ifdef CONFIG_MIPS_MT_SMP
2011-03-24 00:09:06 +03:00
static int msp_cic_irq_set_affinity ( struct irq_data * d ,
const struct cpumask * cpumask , bool force )
2011-01-25 11:20:10 +03:00
{
int cpu ;
unsigned long flags ;
unsigned int mtflags ;
2014-10-19 22:04:26 +04:00
unsigned long imask = ( 1 < < ( d - > irq - MSP_CIC_INTBASE ) ) ;
2011-01-25 11:20:10 +03:00
volatile u32 * cic_mask = ( volatile u32 * ) CIC_VPE0_MSK_REG ;
/* timer balancing should be disabled in kernel code */
2014-10-19 22:04:26 +04:00
BUG_ON ( d - > irq = = MSP_INT_VPE0_TIMER | | d - > irq = = MSP_INT_VPE1_TIMER ) ;
2011-01-25 11:20:10 +03:00
LOCK_CORE ( flags , mtflags ) ;
/* enable if any of each VPE's TCs require this IRQ */
for_each_online_cpu ( cpu ) {
if ( cpumask_test_cpu ( cpu , cpumask ) )
cic_mask [ cpu ] | = imask ;
else
cic_mask [ cpu ] & = ~ imask ;
}
UNLOCK_CORE ( flags , mtflags ) ;
return 0 ;
2007-06-15 01:54:47 +04:00
}
2011-01-25 11:20:10 +03:00
# endif
2007-06-15 01:54:47 +04:00
static struct irq_chip msp_cic_irq_controller = {
. name = " MSP_CIC " ,
2011-03-24 00:09:06 +03:00
. irq_mask = mask_cic_irq ,
. irq_mask_ack = msp_cic_irq_ack ,
. irq_unmask = unmask_cic_irq ,
. irq_ack = msp_cic_irq_ack ,
2011-01-25 11:20:10 +03:00
# ifdef CONFIG_MIPS_MT_SMP
2011-03-24 00:09:06 +03:00
. irq_set_affinity = msp_cic_irq_set_affinity ,
2011-01-25 11:20:10 +03:00
# endif
2007-06-15 01:54:47 +04:00
} ;
void __init msp_cic_irq_init ( void )
{
int i ;
/* Mask/clear interrupts. */
* CIC_VPE0_MSK_REG = 0x00000000 ;
2011-01-25 11:20:10 +03:00
* CIC_VPE1_MSK_REG = 0x00000000 ;
2013-01-22 15:59:30 +04:00
* CIC_STS_REG = 0xFFFFFFFF ;
2007-06-15 01:54:47 +04:00
/*
2011-01-25 11:20:10 +03:00
* The MSP7120 RG and EVBD boards use IRQ [ 6 : 4 ] for PCI .
* These inputs map to EXT_INT_POL [ 6 : 4 ] inside the CIC .
* They are to be active low , level sensitive .
*/
2007-06-15 01:54:47 +04:00
* CIC_EXT_CFG_REG & = 0xFFFF8F8F ;
/* initialize all the IRQ descriptors */
2011-01-25 11:20:10 +03:00
for ( i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i + + ) {
2011-03-27 17:19:28 +04:00
irq_set_chip_and_handler ( i , & msp_cic_irq_controller ,
2007-06-15 01:54:47 +04:00
handle_level_irq ) ;
2011-01-25 11:20:10 +03:00
}
/* Initialize the PER interrupt sub-system */
msp_per_irq_init ( ) ;
2007-06-15 01:54:47 +04:00
}
2011-01-25 11:20:10 +03:00
/* CIC masked by CIC vector processing before dispatch called */
2007-06-15 01:54:47 +04:00
void msp_cic_irq_dispatch ( void )
{
2011-01-25 11:20:10 +03:00
volatile u32 * cic_msk_reg = ( volatile u32 * ) CIC_VPE0_MSK_REG ;
u32 cic_mask ;
u32 pending ;
int cic_status = * CIC_STS_REG ;
cic_mask = cic_msk_reg [ get_current_vpe ( ) ] ;
pending = cic_status & cic_mask ;
if ( pending & ( 1 < < ( MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE ) ) ) {
2007-06-15 01:54:47 +04:00
do_IRQ ( MSP_INT_VPE0_TIMER ) ;
2011-01-25 11:20:10 +03:00
} else if ( pending & ( 1 < < ( MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE ) ) ) {
do_IRQ ( MSP_INT_VPE1_TIMER ) ;
} else if ( pending & ( 1 < < ( MSP_INT_PER - MSP_CIC_INTBASE ) ) ) {
msp_per_irq_dispatch ( ) ;
} else if ( pending ) {
do_IRQ ( ffs ( pending ) + MSP_CIC_INTBASE - 1 ) ;
} else {
spurious_interrupt ( ) ;
}
2007-06-15 01:54:47 +04:00
}