2005-04-16 15:20:36 -07:00
/*
* Copyright ( C ) 2000 , 2001 , 2002 , 2003 Broadcom Corporation
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version 2
* of the License , or ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/linkage.h>
# include <linux/interrupt.h>
# include <linux/spinlock.h>
# include <linux/smp.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/kernel_stat.h>
# include <asm/errno.h>
# include <asm/signal.h>
# include <asm/system.h>
2007-10-11 23:46:09 +01:00
# include <asm/time.h>
2005-04-16 15:20:36 -07:00
# include <asm/io.h>
# include <asm/sibyte/sb1250_regs.h>
# include <asm/sibyte/sb1250_int.h>
# include <asm/sibyte/sb1250_uart.h>
# include <asm/sibyte/sb1250_scd.h>
# include <asm/sibyte/sb1250.h>
/*
* These are the routines that handle all the low level interrupt stuff .
* Actions handled here are : initialization of the interrupt map , requesting of
* interrupt lines by handlers , dispatching if interrupts to handlers , probing
* for interrupt lines
*/
static void end_sb1250_irq ( unsigned int irq ) ;
static void enable_sb1250_irq ( unsigned int irq ) ;
static void disable_sb1250_irq ( unsigned int irq ) ;
static void ack_sb1250_irq ( unsigned int irq ) ;
# ifdef CONFIG_SMP
2009-04-27 17:59:21 -07:00
static int sb1250_set_affinity ( unsigned int irq , const struct cpumask * mask ) ;
2005-04-16 15:20:36 -07:00
# endif
# ifdef CONFIG_SIBYTE_HAS_LDT
extern unsigned long ldt_eoi_space ;
# endif
2006-07-02 14:41:42 +01:00
static struct irq_chip sb1250_irq_type = {
2007-01-15 00:07:25 +09:00
. name = " SB1250-IMR " ,
2005-02-28 13:39:57 +00:00
. ack = ack_sb1250_irq ,
2006-11-02 02:08:36 +09:00
. mask = disable_sb1250_irq ,
. mask_ack = ack_sb1250_irq ,
. unmask = enable_sb1250_irq ,
2005-02-28 13:39:57 +00:00
. end = end_sb1250_irq ,
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
2005-02-28 13:39:57 +00:00
. set_affinity = sb1250_set_affinity
2005-04-16 15:20:36 -07:00
# endif
} ;
/* Store the CPU id (not the logical number) */
int sb1250_irq_owner [ SB1250_NR_IRQS ] ;
DEFINE_SPINLOCK ( sb1250_imr_lock ) ;
void sb1250_mask_irq ( int cpu , int irq )
{
unsigned long flags ;
u64 cur_ints ;
spin_lock_irqsave ( & sb1250_imr_lock , flags ) ;
2005-02-22 21:51:30 +00:00
cur_ints = ____raw_readq ( IOADDR ( A_IMR_MAPPER ( cpu ) +
R_IMR_INTERRUPT_MASK ) ) ;
2005-04-16 15:20:36 -07:00
cur_ints | = ( ( ( u64 ) 1 ) < < irq ) ;
2005-02-22 21:51:30 +00:00
____raw_writeq ( cur_ints , IOADDR ( A_IMR_MAPPER ( cpu ) +
R_IMR_INTERRUPT_MASK ) ) ;
2005-04-16 15:20:36 -07:00
spin_unlock_irqrestore ( & sb1250_imr_lock , flags ) ;
}
void sb1250_unmask_irq ( int cpu , int irq )
{
unsigned long flags ;
u64 cur_ints ;
spin_lock_irqsave ( & sb1250_imr_lock , flags ) ;
2005-02-22 21:51:30 +00:00
cur_ints = ____raw_readq ( IOADDR ( A_IMR_MAPPER ( cpu ) +
R_IMR_INTERRUPT_MASK ) ) ;
2005-04-16 15:20:36 -07:00
cur_ints & = ~ ( ( ( u64 ) 1 ) < < irq ) ;
2005-02-22 21:51:30 +00:00
____raw_writeq ( cur_ints , IOADDR ( A_IMR_MAPPER ( cpu ) +
R_IMR_INTERRUPT_MASK ) ) ;
2005-04-16 15:20:36 -07:00
spin_unlock_irqrestore ( & sb1250_imr_lock , flags ) ;
}
# ifdef CONFIG_SMP
2009-04-27 17:59:21 -07:00
static int sb1250_set_affinity ( unsigned int irq , const struct cpumask * mask )
2005-04-16 15:20:36 -07:00
{
int i = 0 , old_cpu , cpu , int_on ;
u64 cur_ints ;
unsigned long flags ;
2008-12-13 21:20:26 +10:30
i = cpumask_first ( mask ) ;
2005-04-16 15:20:36 -07:00
/* Convert logical CPU to physical CPU */
cpu = cpu_logical_map ( i ) ;
/* Protect against other affinity changers and IMR manipulation */
2009-05-04 23:51:54 +02:00
spin_lock_irqsave ( & sb1250_imr_lock , flags ) ;
2005-04-16 15:20:36 -07:00
/* Swizzle each CPU's IMR (but leave the IP selection alone) */
old_cpu = sb1250_irq_owner [ irq ] ;
2005-02-22 21:51:30 +00:00
cur_ints = ____raw_readq ( IOADDR ( A_IMR_MAPPER ( old_cpu ) +
R_IMR_INTERRUPT_MASK ) ) ;
2005-04-16 15:20:36 -07:00
int_on = ! ( cur_ints & ( ( ( u64 ) 1 ) < < irq ) ) ;
if ( int_on ) {
/* If it was on, mask it */
cur_ints | = ( ( ( u64 ) 1 ) < < irq ) ;
2005-02-22 21:51:30 +00:00
____raw_writeq ( cur_ints , IOADDR ( A_IMR_MAPPER ( old_cpu ) +
R_IMR_INTERRUPT_MASK ) ) ;
2005-04-16 15:20:36 -07:00
}
sb1250_irq_owner [ irq ] = cpu ;
if ( int_on ) {
/* unmask for the new CPU */
2005-02-22 21:51:30 +00:00
cur_ints = ____raw_readq ( IOADDR ( A_IMR_MAPPER ( cpu ) +
R_IMR_INTERRUPT_MASK ) ) ;
2005-04-16 15:20:36 -07:00
cur_ints & = ~ ( ( ( u64 ) 1 ) < < irq ) ;
2005-02-22 21:51:30 +00:00
____raw_writeq ( cur_ints , IOADDR ( A_IMR_MAPPER ( cpu ) +
R_IMR_INTERRUPT_MASK ) ) ;
2005-04-16 15:20:36 -07:00
}
2009-05-04 23:51:54 +02:00
spin_unlock_irqrestore ( & sb1250_imr_lock , flags ) ;
2009-04-27 17:59:21 -07:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
# endif
/*****************************************************************************/
static void disable_sb1250_irq ( unsigned int irq )
{
sb1250_mask_irq ( sb1250_irq_owner [ irq ] , irq ) ;
}
static void enable_sb1250_irq ( unsigned int irq )
{
sb1250_unmask_irq ( sb1250_irq_owner [ irq ] , irq ) ;
}
static void ack_sb1250_irq ( unsigned int irq )
{
# ifdef CONFIG_SIBYTE_HAS_LDT
u64 pending ;
/*
* If the interrupt was an HT interrupt , now is the time to
* clear it . NOTE : we assume the HT bridge was set up to
* deliver the interrupts to all CPUs ( which makes affinity
* changing easier for us )
*/
2005-02-22 21:51:30 +00:00
pending = __raw_readq ( IOADDR ( A_IMR_REGISTER ( sb1250_irq_owner [ irq ] ,
R_IMR_LDT_INTERRUPT ) ) ) ;
2005-04-16 15:20:36 -07:00
pending & = ( ( u64 ) 1 < < ( irq ) ) ;
if ( pending ) {
int i ;
for ( i = 0 ; i < NR_CPUS ; i + + ) {
int cpu ;
# ifdef CONFIG_SMP
cpu = cpu_logical_map ( i ) ;
# else
cpu = i ;
# endif
/*
* Clear for all CPUs so an affinity switch
* doesn ' t find an old status
*/
2005-02-22 21:51:30 +00:00
__raw_writeq ( pending ,
IOADDR ( A_IMR_REGISTER ( cpu ,
2005-04-16 15:20:36 -07:00
R_IMR_LDT_INTERRUPT_CLR ) ) ) ;
}
/*
* Generate EOI . For Pass 1 parts , EOI is a nop . For
* Pass 2 , the LDT world may be edge - triggered , but
* this EOI shouldn ' t hurt . If they are
* level - sensitive , the EOI is required .
*/
* ( uint32_t * ) ( ldt_eoi_space + ( irq < < 16 ) + ( 7 < < 2 ) ) = 0 ;
}
# endif
sb1250_mask_irq ( sb1250_irq_owner [ irq ] , irq ) ;
}
static void end_sb1250_irq ( unsigned int irq )
{
if ( ! ( irq_desc [ irq ] . status & ( IRQ_DISABLED | IRQ_INPROGRESS ) ) ) {
sb1250_unmask_irq ( sb1250_irq_owner [ irq ] , irq ) ;
}
}
void __init init_sb1250_irqs ( void )
{
int i ;
2006-11-02 02:08:36 +09:00
for ( i = 0 ; i < SB1250_NR_IRQS ; i + + ) {
2009-03-30 14:49:44 +02:00
set_irq_chip_and_handler ( i , & sb1250_irq_type , handle_level_irq ) ;
2006-11-02 02:08:36 +09:00
sb1250_irq_owner [ i ] = 0 ;
2005-04-16 15:20:36 -07:00
}
}
/*
* arch_init_irq is called early in the boot sequence from init / main . c via
* init_IRQ . It is responsible for setting up the interrupt mapper and
* installing the handler that will be responsible for dispatching interrupts
* to the " right " place .
*/
/*
* For now , map all interrupts to IP [ 2 ] . We could save
* some cycles by parceling out system interrupts to different
* IP lines , but keep it simple for bringup . We ' ll also direct
* all interrupts to a single CPU ; we should probably route
* PCI and LDT to one cpu and everything else to the other
* to balance the load a bit .
*
* On the second cpu , everything is set to IP5 , which is
* ignored , EXCEPT the mailbox interrupt . That one is
* set to IP [ 2 ] so it is handled . This is needed so we
* can do cross - cpu function calls , as requred by SMP
*/
# define IMR_IP2_VAL K_INT_MAP_I0
# define IMR_IP3_VAL K_INT_MAP_I1
# define IMR_IP4_VAL K_INT_MAP_I2
# define IMR_IP5_VAL K_INT_MAP_I3
# define IMR_IP6_VAL K_INT_MAP_I4
void __init arch_init_irq ( void )
{
unsigned int i ;
u64 tmp ;
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
STATUSF_IP1 | STATUSF_IP0 ;
/* Default everything to IP2 */
for ( i = 0 ; i < SB1250_NR_IRQS ; i + + ) { /* was I0 */
2005-02-22 21:51:30 +00:00
__raw_writeq ( IMR_IP2_VAL ,
IOADDR ( A_IMR_REGISTER ( 0 ,
R_IMR_INTERRUPT_MAP_BASE ) +
( i < < 3 ) ) ) ;
__raw_writeq ( IMR_IP2_VAL ,
IOADDR ( A_IMR_REGISTER ( 1 ,
R_IMR_INTERRUPT_MAP_BASE ) +
( i < < 3 ) ) ) ;
2005-04-16 15:20:36 -07:00
}
init_sb1250_irqs ( ) ;
/*
* Map the high 16 bits of the mailbox registers to IP [ 3 ] , for
* inter - cpu messages
*/
/* Was I1 */
2005-02-22 21:51:30 +00:00
__raw_writeq ( IMR_IP3_VAL ,
IOADDR ( A_IMR_REGISTER ( 0 , R_IMR_INTERRUPT_MAP_BASE ) +
( K_INT_MBOX_0 < < 3 ) ) ) ;
__raw_writeq ( IMR_IP3_VAL ,
IOADDR ( A_IMR_REGISTER ( 1 , R_IMR_INTERRUPT_MAP_BASE ) +
( K_INT_MBOX_0 < < 3 ) ) ) ;
2005-04-16 15:20:36 -07:00
/* Clear the mailboxes. The firmware may leave them dirty */
2005-02-22 21:51:30 +00:00
__raw_writeq ( 0xffffffffffffffffULL ,
IOADDR ( A_IMR_REGISTER ( 0 , R_IMR_MAILBOX_CLR_CPU ) ) ) ;
__raw_writeq ( 0xffffffffffffffffULL ,
IOADDR ( A_IMR_REGISTER ( 1 , R_IMR_MAILBOX_CLR_CPU ) ) ) ;
2005-04-16 15:20:36 -07:00
/* Mask everything except the mailbox registers for both cpus */
tmp = ~ ( ( u64 ) 0 ) ^ ( ( ( u64 ) 1 ) < < K_INT_MBOX_0 ) ;
2005-02-22 21:51:30 +00:00
__raw_writeq ( tmp , IOADDR ( A_IMR_REGISTER ( 0 , R_IMR_INTERRUPT_MASK ) ) ) ;
__raw_writeq ( tmp , IOADDR ( A_IMR_REGISTER ( 1 , R_IMR_INTERRUPT_MASK ) ) ) ;
2005-04-16 15:20:36 -07:00
/*
* Note that the timer interrupts are also mapped , but this is
2005-09-03 15:56:17 -07:00
* done in sb1250_time_init ( ) . Also , the profiling driver
2005-04-16 15:20:36 -07:00
* does its own management of IP7 .
*/
/* Enable necessary IPs, disable the rest */
change_c0_status ( ST0_IM , imask ) ;
}
2006-10-07 19:44:33 +01:00
extern void sb1250_mailbox_interrupt ( void ) ;
2006-06-18 05:23:47 +01:00
2007-10-22 10:38:44 +01:00
static inline void dispatch_ip2 ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
unsigned long long mask ;
/*
* Default . . . we ' ve hit an IP [ 2 ] interrupt , which means we ' ve got to
* check the 1250 interrupt registers to figure out what to do . Need
* to detect which CPU we ' re on , now that smp_affinity is supported .
*/
mask = __raw_readq ( IOADDR ( A_IMR_REGISTER ( cpu ,
R_IMR_INTERRUPT_STATUS_BASE ) ) ) ;
if ( mask )
do_IRQ ( fls64 ( mask ) - 1 ) ;
}
2006-10-07 19:44:33 +01:00
asmlinkage void plat_irq_dispatch ( void )
2006-04-03 17:56:36 +01:00
{
2007-10-19 08:22:38 +01:00
unsigned int cpu = smp_processor_id ( ) ;
2006-04-03 17:56:36 +01:00
unsigned int pending ;
/*
* What a pain . We have to be really careful saving the upper 32 bits
* of any * register across function calls if we don ' t want them
* trashed - - since were running in - o32 , the calling routing never saves
* the full 64 bits of a register across a function call . Being the
* interrupt handler , we ' re guaranteed that interrupts are disabled
* during this code so we don ' t have to worry about random interrupts
* blasting the high 32 bits .
*/
2007-03-19 00:13:37 +00:00
pending = read_c0_cause ( ) & read_c0_status ( ) & ST0_IM ;
2006-04-03 17:56:36 +01:00
2007-10-11 23:46:09 +01:00
if ( pending & CAUSEF_IP7 ) /* CPU performance counter interrupt */
do_IRQ ( MIPS_CPU_IRQ_BASE + 7 ) ;
else if ( pending & CAUSEF_IP4 )
2007-10-19 08:22:38 +01:00
do_IRQ ( K_INT_TIMER_0 + cpu ) ; /* sb1250_timer_interrupt() */
2006-04-03 17:56:36 +01:00
# ifdef CONFIG_SMP
2006-07-05 14:26:38 +01:00
else if ( pending & CAUSEF_IP3 )
2006-10-07 19:44:33 +01:00
sb1250_mailbox_interrupt ( ) ;
2006-04-03 17:56:36 +01:00
# endif
2007-10-22 10:38:44 +01:00
else if ( pending & CAUSEF_IP2 )
dispatch_ip2 ( ) ;
else
2006-10-07 19:44:33 +01:00
spurious_interrupt ( ) ;
2006-04-03 17:56:36 +01:00
}