2009-08-18 16:23:37 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2008 Maxime Bizon < mbizon @ freebox . fr >
* Copyright ( C ) 2008 Nicolas Schichan < nschichan @ freebox . fr >
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/module.h>
2010-10-07 17:08:54 +04:00
# include <linux/irq.h>
2014-07-12 14:49:39 +04:00
# include <linux/spinlock.h>
2009-08-18 16:23:37 +04:00
# include <asm/irq_cpu.h>
# include <asm/mipsregs.h>
# include <bcm63xx_cpu.h>
# include <bcm63xx_regs.h>
# include <bcm63xx_io.h>
# include <bcm63xx_irq.h>
2014-07-12 14:49:38 +04:00
2014-07-12 14:49:39 +04:00
static DEFINE_SPINLOCK ( ipic_lock ) ;
static DEFINE_SPINLOCK ( epic_lock ) ;
2014-07-12 14:49:36 +04:00
static u32 irq_stat_addr [ 2 ] ;
static u32 irq_mask_addr [ 2 ] ;
2014-07-12 14:49:38 +04:00
static void ( * dispatch_internal ) ( int cpu ) ;
2011-11-04 22:09:32 +04:00
static int is_ext_irq_cascaded ;
2011-11-04 22:09:34 +04:00
static unsigned int ext_irq_count ;
2011-11-04 22:09:32 +04:00
static unsigned int ext_irq_start , ext_irq_end ;
2011-11-04 22:09:34 +04:00
static unsigned int ext_irq_cfg_reg1 , ext_irq_cfg_reg2 ;
2014-07-12 14:49:41 +04:00
static void ( * internal_irq_mask ) ( struct irq_data * d ) ;
2014-07-12 14:49:42 +04:00
static void ( * internal_irq_unmask ) ( struct irq_data * d , const struct cpumask * m ) ;
2011-11-04 22:09:31 +04:00
2011-11-04 22:09:34 +04:00
static inline u32 get_ext_irq_perf_reg ( int irq )
{
if ( irq < 4 )
return ext_irq_cfg_reg1 ;
return ext_irq_cfg_reg2 ;
}
2011-11-04 22:09:31 +04:00
static inline void handle_internal ( int intbit )
{
2011-11-04 22:09:32 +04:00
if ( is_ext_irq_cascaded & &
intbit > = ext_irq_start & & intbit < = ext_irq_end )
do_IRQ ( intbit - ext_irq_start + IRQ_EXTERNAL_BASE ) ;
else
do_IRQ ( intbit + IRQ_INTERNAL_BASE ) ;
2011-11-04 22:09:31 +04:00
}
2014-07-12 14:49:42 +04:00
static inline int enable_irq_for_cpu ( int cpu , struct irq_data * d ,
const struct cpumask * m )
{
bool enable = cpu_online ( cpu ) ;
# ifdef CONFIG_SMP
if ( m )
2015-03-05 03:19:17 +03:00
enable & = cpumask_test_cpu ( cpu , m ) ;
2014-07-12 14:49:42 +04:00
else if ( irqd_affinity_was_set ( d ) )
2015-07-13 23:45:59 +03:00
enable & = cpumask_test_cpu ( cpu , irq_data_get_affinity_mask ( d ) ) ;
2014-07-12 14:49:42 +04:00
# endif
return enable ;
}
2009-08-18 16:23:37 +04:00
/*
* dispatch internal devices IRQ ( uart , enet , watchdog , . . . ) . do not
* prioritize any interrupt relatively to another . the static counter
* will resume the loop where it ended the last time we left this
* function .
*/
2014-07-12 14:49:35 +04:00
# define BUILD_IPIC_INTERNAL(width) \
2014-07-12 14:49:38 +04:00
void __dispatch_internal_ # # width ( int cpu ) \
2014-07-12 14:49:35 +04:00
{ \
u32 pending [ width / 32 ] ; \
unsigned int src , tgt ; \
bool irqs_pending = false ; \
2014-07-12 14:49:38 +04:00
static unsigned int i [ 2 ] ; \
unsigned int * next = & i [ cpu ] ; \
2014-07-12 14:49:39 +04:00
unsigned long flags ; \
2014-07-12 14:49:35 +04:00
\
/* read registers in reverse order */ \
2014-07-12 14:49:39 +04:00
spin_lock_irqsave ( & ipic_lock , flags ) ; \
2014-07-12 14:49:35 +04:00
for ( src = 0 , tgt = ( width / 32 ) ; src < ( width / 32 ) ; src + + ) { \
u32 val ; \
\
2014-07-12 14:49:38 +04:00
val = bcm_readl ( irq_stat_addr [ cpu ] + src * sizeof ( u32 ) ) ; \
val & = bcm_readl ( irq_mask_addr [ cpu ] + src * sizeof ( u32 ) ) ; \
2014-07-12 14:49:35 +04:00
pending [ - - tgt ] = val ; \
\
if ( val ) \
irqs_pending = true ; \
} \
2014-07-12 14:49:39 +04:00
spin_unlock_irqrestore ( & ipic_lock , flags ) ; \
2014-07-12 14:49:35 +04:00
\
if ( ! irqs_pending ) \
return ; \
\
while ( 1 ) { \
2014-07-12 14:49:38 +04:00
unsigned int to_call = * next ; \
2014-07-12 14:49:35 +04:00
\
2014-07-12 14:49:38 +04:00
* next = ( * next + 1 ) & ( width - 1 ) ; \
2014-07-12 14:49:35 +04:00
if ( pending [ to_call / 32 ] & ( 1 < < ( to_call & 0x1f ) ) ) { \
handle_internal ( to_call ) ; \
break ; \
} \
} \
} \
\
2014-07-12 14:49:41 +04:00
static void __internal_irq_mask_ # # width ( struct irq_data * d ) \
2014-07-12 14:49:35 +04:00
{ \
u32 val ; \
2014-07-12 14:49:41 +04:00
unsigned irq = d - > irq - IRQ_INTERNAL_BASE ; \
2014-07-12 14:49:35 +04:00
unsigned reg = ( irq / 32 ) ^ ( width / 32 - 1 ) ; \
unsigned bit = irq & 0x1f ; \
2014-07-12 14:49:39 +04:00
unsigned long flags ; \
2014-07-12 14:49:40 +04:00
int cpu ; \
2014-07-12 14:49:35 +04:00
\
2014-07-12 14:49:39 +04:00
spin_lock_irqsave ( & ipic_lock , flags ) ; \
2014-07-12 14:49:40 +04:00
for_each_present_cpu ( cpu ) { \
if ( ! irq_mask_addr [ cpu ] ) \
break ; \
\
val = bcm_readl ( irq_mask_addr [ cpu ] + reg * sizeof ( u32 ) ) ; \
val & = ~ ( 1 < < bit ) ; \
bcm_writel ( val , irq_mask_addr [ cpu ] + reg * sizeof ( u32 ) ) ; \
} \
2014-07-12 14:49:39 +04:00
spin_unlock_irqrestore ( & ipic_lock , flags ) ; \
2014-07-12 14:49:35 +04:00
} \
\
2014-07-12 14:49:42 +04:00
static void __internal_irq_unmask_ # # width ( struct irq_data * d , \
const struct cpumask * m ) \
2014-07-12 14:49:35 +04:00
{ \
u32 val ; \
2014-07-12 14:49:41 +04:00
unsigned irq = d - > irq - IRQ_INTERNAL_BASE ; \
2014-07-12 14:49:35 +04:00
unsigned reg = ( irq / 32 ) ^ ( width / 32 - 1 ) ; \
unsigned bit = irq & 0x1f ; \
2014-07-12 14:49:39 +04:00
unsigned long flags ; \
2014-07-12 14:49:40 +04:00
int cpu ; \
2014-07-12 14:49:35 +04:00
\
2014-07-12 14:49:39 +04:00
spin_lock_irqsave ( & ipic_lock , flags ) ; \
2014-07-12 14:49:40 +04:00
for_each_present_cpu ( cpu ) { \
if ( ! irq_mask_addr [ cpu ] ) \
break ; \
\
val = bcm_readl ( irq_mask_addr [ cpu ] + reg * sizeof ( u32 ) ) ; \
2014-07-12 14:49:42 +04:00
if ( enable_irq_for_cpu ( cpu , d , m ) ) \
2014-07-12 14:49:40 +04:00
val | = ( 1 < < bit ) ; \
else \
val & = ~ ( 1 < < bit ) ; \
bcm_writel ( val , irq_mask_addr [ cpu ] + reg * sizeof ( u32 ) ) ; \
} \
2014-07-12 14:49:39 +04:00
spin_unlock_irqrestore ( & ipic_lock , flags ) ; \
2009-08-18 16:23:37 +04:00
}
2014-07-12 14:49:35 +04:00
BUILD_IPIC_INTERNAL ( 32 ) ;
BUILD_IPIC_INTERNAL ( 64 ) ;
2011-11-04 22:09:33 +04:00
2009-08-18 16:23:37 +04:00
asmlinkage void plat_irq_dispatch ( void )
{
u32 cause ;
do {
cause = read_c0_cause ( ) & read_c0_status ( ) & ST0_IM ;
if ( ! cause )
break ;
if ( cause & CAUSEF_IP7 )
do_IRQ ( 7 ) ;
2013-06-03 18:39:34 +04:00
if ( cause & CAUSEF_IP0 )
do_IRQ ( 0 ) ;
if ( cause & CAUSEF_IP1 )
do_IRQ ( 1 ) ;
2009-08-18 16:23:37 +04:00
if ( cause & CAUSEF_IP2 )
2014-07-12 14:49:38 +04:00
dispatch_internal ( 0 ) ;
2014-07-12 14:49:40 +04:00
if ( is_ext_irq_cascaded ) {
if ( cause & CAUSEF_IP3 )
dispatch_internal ( 1 ) ;
} else {
2011-11-04 22:09:32 +04:00
if ( cause & CAUSEF_IP3 )
do_IRQ ( IRQ_EXT_0 ) ;
if ( cause & CAUSEF_IP4 )
do_IRQ ( IRQ_EXT_1 ) ;
if ( cause & CAUSEF_IP5 )
do_IRQ ( IRQ_EXT_2 ) ;
if ( cause & CAUSEF_IP6 )
do_IRQ ( IRQ_EXT_3 ) ;
}
2009-08-18 16:23:37 +04:00
} while ( 1 ) ;
}
/*
* internal IRQs operations : only mask / unmask on PERF irq mask
* register .
*/
2011-11-04 22:09:32 +04:00
static void bcm63xx_internal_irq_mask ( struct irq_data * d )
{
2014-07-12 14:49:41 +04:00
internal_irq_mask ( d ) ;
2011-11-04 22:09:32 +04:00
}
static void bcm63xx_internal_irq_unmask ( struct irq_data * d )
{
2014-07-12 14:49:42 +04:00
internal_irq_unmask ( d , NULL ) ;
2011-11-04 22:09:32 +04:00
}
2009-08-18 16:23:37 +04:00
/*
* external IRQs operations : mask / unmask and clear on PERF external
* irq control register .
*/
2011-03-24 00:08:47 +03:00
static void bcm63xx_external_irq_mask ( struct irq_data * d )
2009-08-18 16:23:37 +04:00
{
2011-11-04 22:09:32 +04:00
unsigned int irq = d - > irq - IRQ_EXTERNAL_BASE ;
2011-11-04 22:09:34 +04:00
u32 reg , regaddr ;
2014-07-12 14:49:39 +04:00
unsigned long flags ;
2009-08-18 16:23:37 +04:00
2011-11-04 22:09:34 +04:00
regaddr = get_ext_irq_perf_reg ( irq ) ;
2014-07-12 14:49:39 +04:00
spin_lock_irqsave ( & epic_lock , flags ) ;
2011-11-04 22:09:34 +04:00
reg = bcm_perf_readl ( regaddr ) ;
if ( BCMCPU_IS_6348 ( ) )
reg & = ~ EXTIRQ_CFG_MASK_6348 ( irq % 4 ) ;
else
reg & = ~ EXTIRQ_CFG_MASK ( irq % 4 ) ;
bcm_perf_writel ( reg , regaddr ) ;
2014-07-12 14:49:39 +04:00
spin_unlock_irqrestore ( & epic_lock , flags ) ;
2011-11-04 22:09:32 +04:00
if ( is_ext_irq_cascaded )
2014-07-12 14:49:41 +04:00
internal_irq_mask ( irq_get_irq_data ( irq + ext_irq_start ) ) ;
2009-08-18 16:23:37 +04:00
}
2011-03-24 00:08:47 +03:00
static void bcm63xx_external_irq_unmask ( struct irq_data * d )
2009-08-18 16:23:37 +04:00
{
2011-11-04 22:09:32 +04:00
unsigned int irq = d - > irq - IRQ_EXTERNAL_BASE ;
2011-11-04 22:09:34 +04:00
u32 reg , regaddr ;
2014-07-12 14:49:39 +04:00
unsigned long flags ;
2011-11-04 22:09:34 +04:00
regaddr = get_ext_irq_perf_reg ( irq ) ;
2014-07-12 14:49:39 +04:00
spin_lock_irqsave ( & epic_lock , flags ) ;
2011-11-04 22:09:34 +04:00
reg = bcm_perf_readl ( regaddr ) ;
if ( BCMCPU_IS_6348 ( ) )
reg | = EXTIRQ_CFG_MASK_6348 ( irq % 4 ) ;
else
reg | = EXTIRQ_CFG_MASK ( irq % 4 ) ;
bcm_perf_writel ( reg , regaddr ) ;
2014-07-12 14:49:39 +04:00
spin_unlock_irqrestore ( & epic_lock , flags ) ;
2009-08-18 16:23:37 +04:00
2011-11-04 22:09:32 +04:00
if ( is_ext_irq_cascaded )
2014-07-12 14:49:42 +04:00
internal_irq_unmask ( irq_get_irq_data ( irq + ext_irq_start ) ,
NULL ) ;
2009-08-18 16:23:37 +04:00
}
2011-03-24 00:08:47 +03:00
static void bcm63xx_external_irq_clear ( struct irq_data * d )
2009-08-18 16:23:37 +04:00
{
2011-11-04 22:09:32 +04:00
unsigned int irq = d - > irq - IRQ_EXTERNAL_BASE ;
2011-11-04 22:09:34 +04:00
u32 reg , regaddr ;
2014-07-12 14:49:39 +04:00
unsigned long flags ;
2011-11-04 22:09:34 +04:00
regaddr = get_ext_irq_perf_reg ( irq ) ;
2014-07-12 14:49:39 +04:00
spin_lock_irqsave ( & epic_lock , flags ) ;
2011-11-04 22:09:34 +04:00
reg = bcm_perf_readl ( regaddr ) ;
2009-08-18 16:23:37 +04:00
2011-11-04 22:09:34 +04:00
if ( BCMCPU_IS_6348 ( ) )
reg | = EXTIRQ_CFG_CLEAR_6348 ( irq % 4 ) ;
else
reg | = EXTIRQ_CFG_CLEAR ( irq % 4 ) ;
bcm_perf_writel ( reg , regaddr ) ;
2014-07-12 14:49:39 +04:00
spin_unlock_irqrestore ( & epic_lock , flags ) ;
2009-08-18 16:23:37 +04:00
}
2011-03-24 00:08:47 +03:00
static int bcm63xx_external_irq_set_type ( struct irq_data * d ,
2009-08-18 16:23:37 +04:00
unsigned int flow_type )
{
2011-11-04 22:09:32 +04:00
unsigned int irq = d - > irq - IRQ_EXTERNAL_BASE ;
2011-11-04 22:09:34 +04:00
u32 reg , regaddr ;
int levelsense , sense , bothedge ;
2014-07-12 14:49:39 +04:00
unsigned long flags ;
2009-08-18 16:23:37 +04:00
flow_type & = IRQ_TYPE_SENSE_MASK ;
if ( flow_type = = IRQ_TYPE_NONE )
flow_type = IRQ_TYPE_LEVEL_LOW ;
2011-11-04 22:09:34 +04:00
levelsense = sense = bothedge = 0 ;
2009-08-18 16:23:37 +04:00
switch ( flow_type ) {
case IRQ_TYPE_EDGE_BOTH :
2011-11-04 22:09:34 +04:00
bothedge = 1 ;
2009-08-18 16:23:37 +04:00
break ;
case IRQ_TYPE_EDGE_RISING :
2011-11-04 22:09:34 +04:00
sense = 1 ;
2009-08-18 16:23:37 +04:00
break ;
case IRQ_TYPE_EDGE_FALLING :
break ;
case IRQ_TYPE_LEVEL_HIGH :
2011-11-04 22:09:34 +04:00
levelsense = 1 ;
sense = 1 ;
2009-08-18 16:23:37 +04:00
break ;
case IRQ_TYPE_LEVEL_LOW :
2011-11-04 22:09:34 +04:00
levelsense = 1 ;
2009-08-18 16:23:37 +04:00
break ;
default :
2015-10-14 14:27:38 +03:00
pr_err ( " bogus flow type combination given ! \n " ) ;
2009-08-18 16:23:37 +04:00
return - EINVAL ;
}
2011-11-04 22:09:34 +04:00
regaddr = get_ext_irq_perf_reg ( irq ) ;
2014-07-12 14:49:39 +04:00
spin_lock_irqsave ( & epic_lock , flags ) ;
2011-11-04 22:09:34 +04:00
reg = bcm_perf_readl ( regaddr ) ;
irq % = 4 ;
2012-07-13 11:46:05 +04:00
switch ( bcm63xx_get_cpu_id ( ) ) {
case BCM6348_CPU_ID :
2011-11-04 22:09:34 +04:00
if ( levelsense )
reg | = EXTIRQ_CFG_LEVELSENSE_6348 ( irq ) ;
else
reg & = ~ EXTIRQ_CFG_LEVELSENSE_6348 ( irq ) ;
if ( sense )
reg | = EXTIRQ_CFG_SENSE_6348 ( irq ) ;
else
reg & = ~ EXTIRQ_CFG_SENSE_6348 ( irq ) ;
if ( bothedge )
reg | = EXTIRQ_CFG_BOTHEDGE_6348 ( irq ) ;
else
reg & = ~ EXTIRQ_CFG_BOTHEDGE_6348 ( irq ) ;
2012-07-13 11:46:05 +04:00
break ;
2011-11-04 22:09:34 +04:00
2013-06-18 20:55:40 +04:00
case BCM3368_CPU_ID :
2012-07-13 11:46:05 +04:00
case BCM6328_CPU_ID :
case BCM6338_CPU_ID :
case BCM6345_CPU_ID :
case BCM6358_CPU_ID :
2013-03-21 18:03:17 +04:00
case BCM6362_CPU_ID :
2012-07-13 11:46:05 +04:00
case BCM6368_CPU_ID :
2011-11-04 22:09:34 +04:00
if ( levelsense )
reg | = EXTIRQ_CFG_LEVELSENSE ( irq ) ;
else
reg & = ~ EXTIRQ_CFG_LEVELSENSE ( irq ) ;
if ( sense )
reg | = EXTIRQ_CFG_SENSE ( irq ) ;
else
reg & = ~ EXTIRQ_CFG_SENSE ( irq ) ;
if ( bothedge )
reg | = EXTIRQ_CFG_BOTHEDGE ( irq ) ;
else
reg & = ~ EXTIRQ_CFG_BOTHEDGE ( irq ) ;
2012-07-13 11:46:05 +04:00
break ;
default :
BUG ( ) ;
2011-11-04 22:09:34 +04:00
}
bcm_perf_writel ( reg , regaddr ) ;
2014-07-12 14:49:39 +04:00
spin_unlock_irqrestore ( & epic_lock , flags ) ;
2009-08-18 16:23:37 +04:00
2011-03-24 00:08:47 +03:00
irqd_set_trigger_type ( d , flow_type ) ;
if ( flow_type & ( IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH ) )
2015-07-13 23:46:02 +03:00
irq_set_handler_locked ( d , handle_level_irq ) ;
2011-03-24 00:08:47 +03:00
else
2015-07-13 23:46:02 +03:00
irq_set_handler_locked ( d , handle_edge_irq ) ;
2009-08-18 16:23:37 +04:00
2011-03-24 00:08:47 +03:00
return IRQ_SET_MASK_OK_NOCOPY ;
2009-08-18 16:23:37 +04:00
}
2014-07-12 14:49:42 +04:00
# ifdef CONFIG_SMP
static int bcm63xx_internal_set_affinity ( struct irq_data * data ,
const struct cpumask * dest ,
bool force )
{
if ( ! irqd_irq_disabled ( data ) )
internal_irq_unmask ( data , dest ) ;
return 0 ;
}
# endif
2009-08-18 16:23:37 +04:00
static struct irq_chip bcm63xx_internal_irq_chip = {
. name = " bcm63xx_ipic " ,
2011-03-24 00:08:47 +03:00
. irq_mask = bcm63xx_internal_irq_mask ,
. irq_unmask = bcm63xx_internal_irq_unmask ,
2009-08-18 16:23:37 +04:00
} ;
static struct irq_chip bcm63xx_external_irq_chip = {
. name = " bcm63xx_epic " ,
2011-03-24 00:08:47 +03:00
. irq_ack = bcm63xx_external_irq_clear ,
2009-08-18 16:23:37 +04:00
2011-03-24 00:08:47 +03:00
. irq_mask = bcm63xx_external_irq_mask ,
. irq_unmask = bcm63xx_external_irq_unmask ,
2009-08-18 16:23:37 +04:00
2011-03-24 00:08:47 +03:00
. irq_set_type = bcm63xx_external_irq_set_type ,
2009-08-18 16:23:37 +04:00
} ;
static struct irqaction cpu_ip2_cascade_action = {
. handler = no_action ,
. name = " cascade_ip2 " ,
2011-07-23 16:41:24 +04:00
. flags = IRQF_NO_THREAD ,
2009-08-18 16:23:37 +04:00
} ;
2014-07-12 14:49:40 +04:00
# ifdef CONFIG_SMP
static struct irqaction cpu_ip3_cascade_action = {
. handler = no_action ,
. name = " cascade_ip3 " ,
. flags = IRQF_NO_THREAD ,
} ;
# endif
2011-11-04 22:09:32 +04:00
static struct irqaction cpu_ext_cascade_action = {
. handler = no_action ,
. name = " cascade_extirq " ,
. flags = IRQF_NO_THREAD ,
} ;
2014-07-12 14:49:34 +04:00
static void bcm63xx_init_irq ( void )
{
int irq_bits ;
2014-07-12 14:49:36 +04:00
irq_stat_addr [ 0 ] = bcm63xx_regset_address ( RSET_PERF ) ;
irq_mask_addr [ 0 ] = bcm63xx_regset_address ( RSET_PERF ) ;
2014-07-12 14:49:37 +04:00
irq_stat_addr [ 1 ] = bcm63xx_regset_address ( RSET_PERF ) ;
irq_mask_addr [ 1 ] = bcm63xx_regset_address ( RSET_PERF ) ;
2014-07-12 14:49:34 +04:00
switch ( bcm63xx_get_cpu_id ( ) ) {
case BCM3368_CPU_ID :
2014-07-12 14:49:36 +04:00
irq_stat_addr [ 0 ] + = PERF_IRQSTAT_3368_REG ;
irq_mask_addr [ 0 ] + = PERF_IRQMASK_3368_REG ;
2014-07-12 14:49:37 +04:00
irq_stat_addr [ 1 ] = 0 ;
2014-08-23 22:33:25 +04:00
irq_mask_addr [ 1 ] = 0 ;
2014-07-12 14:49:34 +04:00
irq_bits = 32 ;
ext_irq_count = 4 ;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368 ;
break ;
case BCM6328_CPU_ID :
2014-07-12 14:49:36 +04:00
irq_stat_addr [ 0 ] + = PERF_IRQSTAT_6328_REG ( 0 ) ;
irq_mask_addr [ 0 ] + = PERF_IRQMASK_6328_REG ( 0 ) ;
2014-07-12 14:49:37 +04:00
irq_stat_addr [ 1 ] + = PERF_IRQSTAT_6328_REG ( 1 ) ;
2014-08-23 22:33:25 +04:00
irq_mask_addr [ 1 ] + = PERF_IRQMASK_6328_REG ( 1 ) ;
2014-07-12 14:49:34 +04:00
irq_bits = 64 ;
ext_irq_count = 4 ;
is_ext_irq_cascaded = 1 ;
ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE ;
ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE ;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328 ;
break ;
case BCM6338_CPU_ID :
2014-07-12 14:49:36 +04:00
irq_stat_addr [ 0 ] + = PERF_IRQSTAT_6338_REG ;
irq_mask_addr [ 0 ] + = PERF_IRQMASK_6338_REG ;
2014-07-12 14:49:37 +04:00
irq_stat_addr [ 1 ] = 0 ;
irq_mask_addr [ 1 ] = 0 ;
2014-07-12 14:49:34 +04:00
irq_bits = 32 ;
ext_irq_count = 4 ;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338 ;
break ;
case BCM6345_CPU_ID :
2014-07-12 14:49:36 +04:00
irq_stat_addr [ 0 ] + = PERF_IRQSTAT_6345_REG ;
irq_mask_addr [ 0 ] + = PERF_IRQMASK_6345_REG ;
2014-07-12 14:49:37 +04:00
irq_stat_addr [ 1 ] = 0 ;
irq_mask_addr [ 1 ] = 0 ;
2014-07-12 14:49:34 +04:00
irq_bits = 32 ;
ext_irq_count = 4 ;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345 ;
break ;
case BCM6348_CPU_ID :
2014-07-12 14:49:36 +04:00
irq_stat_addr [ 0 ] + = PERF_IRQSTAT_6348_REG ;
irq_mask_addr [ 0 ] + = PERF_IRQMASK_6348_REG ;
2014-07-12 14:49:37 +04:00
irq_stat_addr [ 1 ] = 0 ;
irq_mask_addr [ 1 ] = 0 ;
2014-07-12 14:49:34 +04:00
irq_bits = 32 ;
ext_irq_count = 4 ;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348 ;
break ;
case BCM6358_CPU_ID :
2014-07-12 14:49:36 +04:00
irq_stat_addr [ 0 ] + = PERF_IRQSTAT_6358_REG ( 0 ) ;
irq_mask_addr [ 0 ] + = PERF_IRQMASK_6358_REG ( 0 ) ;
2014-07-12 14:49:37 +04:00
irq_stat_addr [ 1 ] + = PERF_IRQSTAT_6358_REG ( 1 ) ;
irq_mask_addr [ 1 ] + = PERF_IRQMASK_6358_REG ( 1 ) ;
2014-07-12 14:49:34 +04:00
irq_bits = 32 ;
ext_irq_count = 4 ;
is_ext_irq_cascaded = 1 ;
ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE ;
ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE ;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358 ;
break ;
case BCM6362_CPU_ID :
2014-07-12 14:49:36 +04:00
irq_stat_addr [ 0 ] + = PERF_IRQSTAT_6362_REG ( 0 ) ;
irq_mask_addr [ 0 ] + = PERF_IRQMASK_6362_REG ( 0 ) ;
2014-07-12 14:49:37 +04:00
irq_stat_addr [ 1 ] + = PERF_IRQSTAT_6362_REG ( 1 ) ;
irq_mask_addr [ 1 ] + = PERF_IRQMASK_6362_REG ( 1 ) ;
2014-07-12 14:49:34 +04:00
irq_bits = 64 ;
ext_irq_count = 4 ;
is_ext_irq_cascaded = 1 ;
ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE ;
ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE ;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362 ;
break ;
case BCM6368_CPU_ID :
2014-07-12 14:49:36 +04:00
irq_stat_addr [ 0 ] + = PERF_IRQSTAT_6368_REG ( 0 ) ;
irq_mask_addr [ 0 ] + = PERF_IRQMASK_6368_REG ( 0 ) ;
2014-07-12 14:49:37 +04:00
irq_stat_addr [ 1 ] + = PERF_IRQSTAT_6368_REG ( 1 ) ;
irq_mask_addr [ 1 ] + = PERF_IRQMASK_6368_REG ( 1 ) ;
2014-07-12 14:49:34 +04:00
irq_bits = 64 ;
ext_irq_count = 6 ;
is_ext_irq_cascaded = 1 ;
ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE ;
ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE ;
ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368 ;
ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368 ;
break ;
default :
BUG ( ) ;
}
if ( irq_bits = = 32 ) {
dispatch_internal = __dispatch_internal_32 ;
internal_irq_mask = __internal_irq_mask_32 ;
internal_irq_unmask = __internal_irq_unmask_32 ;
} else {
dispatch_internal = __dispatch_internal_64 ;
internal_irq_mask = __internal_irq_mask_64 ;
internal_irq_unmask = __internal_irq_unmask_64 ;
}
}
2009-08-18 16:23:37 +04:00
void __init arch_init_irq ( void )
{
int i ;
2011-11-04 22:09:31 +04:00
bcm63xx_init_irq ( ) ;
2009-08-18 16:23:37 +04:00
mips_cpu_irq_init ( ) ;
for ( i = IRQ_INTERNAL_BASE ; i < NR_IRQS ; + + i )
2011-03-27 17:19:28 +04:00
irq_set_chip_and_handler ( i , & bcm63xx_internal_irq_chip ,
2009-08-18 16:23:37 +04:00
handle_level_irq ) ;
2011-11-04 22:09:34 +04:00
for ( i = IRQ_EXTERNAL_BASE ; i < IRQ_EXTERNAL_BASE + ext_irq_count ; + + i )
2011-03-27 17:19:28 +04:00
irq_set_chip_and_handler ( i , & bcm63xx_external_irq_chip ,
2009-08-18 16:23:37 +04:00
handle_edge_irq ) ;
2011-11-04 22:09:32 +04:00
if ( ! is_ext_irq_cascaded ) {
2011-11-04 22:09:34 +04:00
for ( i = 3 ; i < 3 + ext_irq_count ; + + i )
2011-11-04 22:09:32 +04:00
setup_irq ( MIPS_CPU_IRQ_BASE + i , & cpu_ext_cascade_action ) ;
}
setup_irq ( MIPS_CPU_IRQ_BASE + 2 , & cpu_ip2_cascade_action ) ;
2014-07-12 14:49:40 +04:00
# ifdef CONFIG_SMP
2014-07-12 14:49:42 +04:00
if ( is_ext_irq_cascaded ) {
2014-07-12 14:49:40 +04:00
setup_irq ( MIPS_CPU_IRQ_BASE + 3 , & cpu_ip3_cascade_action ) ;
2014-07-12 14:49:42 +04:00
bcm63xx_internal_irq_chip . irq_set_affinity =
bcm63xx_internal_set_affinity ;
cpumask_clear ( irq_default_affinity ) ;
cpumask_set_cpu ( smp_processor_id ( ) , irq_default_affinity ) ;
}
2014-07-12 14:49:40 +04:00
# endif
2009-08-18 16:23:37 +04:00
}