2005-04-16 15:20:36 -07:00
/*
* Code to handle x86 style IRQs plus some generic interrupt stuff .
*
* Copyright ( C ) 1992 Linus Torvalds
* Copyright ( C ) 1994 , 1995 , 1996 , 1997 , 1998 Ralf Baechle
* Copyright ( C ) 1999 SuSE GmbH ( Philipp Rumpf , prumpf @ tux . org )
* Copyright ( C ) 1999 - 2000 Grant Grundler
* Copyright ( c ) 2005 Matthew Wilcox
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*/
# include <linux/bitops.h>
# include <linux/errno.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
# include <linux/seq_file.h>
# include <linux/spinlock.h>
# include <linux/types.h>
2005-11-17 16:28:37 -05:00
# include <asm/io.h>
2005-04-16 15:20:36 -07:00
2005-11-17 16:27:44 -05:00
# include <asm/smp.h>
2005-04-16 15:20:36 -07:00
# undef PARISC_IRQ_CR16_COUNTS
2006-10-06 20:47:23 -06:00
extern irqreturn_t timer_interrupt ( int , void * ) ;
extern irqreturn_t ipi_interrupt ( int , void * ) ;
2005-04-16 15:20:36 -07:00
# define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
/* Bits in EIEM correlate with cpu_irq_action[].
* * Numbered * Big Endian * ! ( ie bit 0 is MSB )
*/
static volatile unsigned long cpu_eiem = 0 ;
2006-09-09 12:36:25 -07:00
/*
2007-06-10 16:31:41 -06:00
* * local ACK bitmap . . . habitually set to 1 , but reset to zero
2006-09-09 12:36:25 -07:00
* * between - > ack ( ) and - > end ( ) of the interrupt to prevent
* * re - interruption of a processing interrupt .
*/
static DEFINE_PER_CPU ( unsigned long , local_ack_eiem ) = ~ 0UL ;
2011-02-06 20:45:52 +00:00
static void cpu_mask_irq ( struct irq_data * d )
2005-04-16 15:20:36 -07:00
{
2011-02-06 20:45:52 +00:00
unsigned long eirr_bit = EIEM_MASK ( d - > irq ) ;
2005-04-16 15:20:36 -07:00
cpu_eiem & = ~ eirr_bit ;
2005-11-17 16:27:02 -05:00
/* Do nothing on the other CPUs. If they get this interrupt,
* The & cpu_eiem in the do_cpu_irq_mask ( ) ensures they won ' t
* handle it , and the set_eiem ( ) at the bottom will ensure it
* then gets disabled */
2005-04-16 15:20:36 -07:00
}
2011-02-06 20:45:52 +00:00
static void __cpu_unmask_irq ( unsigned int irq )
2005-04-16 15:20:36 -07:00
{
unsigned long eirr_bit = EIEM_MASK ( irq ) ;
cpu_eiem | = eirr_bit ;
2005-11-17 16:27:02 -05:00
/* This is just a simple NOP IPI. But what it does is cause
* all the other CPUs to do a set_eiem ( cpu_eiem ) at the end
* of the interrupt handler */
smp_send_all_nop ( ) ;
2005-04-16 15:20:36 -07:00
}
2011-02-06 20:45:52 +00:00
static void cpu_unmask_irq ( struct irq_data * d )
{
__cpu_unmask_irq ( d - > irq ) ;
}
void cpu_ack_irq ( struct irq_data * d )
2006-09-09 12:36:25 -07:00
{
2011-02-06 20:45:52 +00:00
unsigned long mask = EIEM_MASK ( d - > irq ) ;
2006-09-09 12:36:25 -07:00
int cpu = smp_processor_id ( ) ;
/* Clear in EIEM so we can no longer process */
2007-06-10 16:31:41 -06:00
per_cpu ( local_ack_eiem , cpu ) & = ~ mask ;
2006-09-09 12:36:25 -07:00
/* disable the interrupt */
2007-06-10 16:31:41 -06:00
set_eiem ( cpu_eiem & per_cpu ( local_ack_eiem , cpu ) ) ;
2006-09-09 12:36:25 -07:00
/* and now ack it */
mtctl ( mask , 23 ) ;
}
2011-02-06 20:45:52 +00:00
void cpu_eoi_irq ( struct irq_data * d )
2006-09-09 12:36:25 -07:00
{
2011-02-06 20:45:52 +00:00
unsigned long mask = EIEM_MASK ( d - > irq ) ;
2006-09-09 12:36:25 -07:00
int cpu = smp_processor_id ( ) ;
/* set it in the eiems---it's no longer in process */
2007-06-10 16:31:41 -06:00
per_cpu ( local_ack_eiem , cpu ) | = mask ;
2006-09-09 12:36:25 -07:00
/* enable the interrupt */
2007-06-10 16:31:41 -06:00
set_eiem ( cpu_eiem & per_cpu ( local_ack_eiem , cpu ) ) ;
2006-09-09 12:36:25 -07:00
}
2005-11-17 16:28:37 -05:00
# ifdef CONFIG_SMP
2011-02-06 20:45:52 +00:00
int cpu_check_affinity ( struct irq_data * d , const struct cpumask * dest )
2005-11-17 16:28:37 -05:00
{
int cpu_dest ;
/* timer and ipi have to always be received on all CPUs */
2011-03-24 17:48:47 +01:00
if ( irqd_is_per_cpu ( d ) )
2005-11-17 16:28:37 -05:00
return - EINVAL ;
/* whatever mask they set, we just allow one CPU */
cpu_dest = first_cpu ( * dest ) ;
2009-02-16 03:20:54 -05:00
return cpu_dest ;
2005-11-17 16:28:37 -05:00
}
2011-02-06 20:45:52 +00:00
static int cpu_set_affinity_irq ( struct irq_data * d , const struct cpumask * dest ,
bool force )
2005-11-17 16:28:37 -05:00
{
2009-02-16 03:20:54 -05:00
int cpu_dest ;
2011-02-06 20:45:52 +00:00
cpu_dest = cpu_check_affinity ( d , dest ) ;
2009-02-16 03:20:54 -05:00
if ( cpu_dest < 0 )
2009-04-27 17:59:21 -07:00
return - 1 ;
2005-11-17 16:28:37 -05:00
2011-02-06 20:45:52 +00:00
cpumask_copy ( d - > affinity , dest ) ;
2009-04-27 17:59:21 -07:00
return 0 ;
2005-11-17 16:28:37 -05:00
}
# endif
2009-06-10 19:56:04 +00:00
static struct irq_chip cpu_interrupt_type = {
2011-02-06 20:45:52 +00:00
. name = " CPU " ,
. irq_mask = cpu_mask_irq ,
. irq_unmask = cpu_unmask_irq ,
. irq_ack = cpu_ack_irq ,
. irq_eoi = cpu_eoi_irq ,
2005-11-17 16:28:37 -05:00
# ifdef CONFIG_SMP
2011-02-06 20:45:52 +00:00
. irq_set_affinity = cpu_set_affinity_irq ,
2005-11-17 16:28:37 -05:00
# endif
2006-06-29 02:24:44 -07:00
/* XXX: Needs to be written. We managed without it so far, but
* we really ought to write it .
*/
2011-02-06 20:45:52 +00:00
. irq_retrigger = NULL ,
2005-04-16 15:20:36 -07:00
} ;
int show_interrupts ( struct seq_file * p , void * v )
{
int i = * ( loff_t * ) v , j ;
unsigned long flags ;
if ( i = = 0 ) {
seq_puts ( p , " " ) ;
for_each_online_cpu ( j )
seq_printf ( p , " CPU%d " , j ) ;
# ifdef PARISC_IRQ_CR16_COUNTS
seq_printf ( p , " [min/avg/max] (CPU cycle counts) " ) ;
# endif
seq_putc ( p , ' \n ' ) ;
}
if ( i < NR_IRQS ) {
2011-03-28 13:47:54 +02:00
struct irq_desc * desc = irq_to_desc ( i ) ;
2005-04-16 15:20:36 -07:00
struct irqaction * action ;
2011-03-28 13:47:54 +02:00
raw_spin_lock_irqsave ( & desc - > lock , flags ) ;
action = desc - > action ;
2005-04-16 15:20:36 -07:00
if ( ! action )
goto skip ;
seq_printf ( p , " %3d: " , i ) ;
# ifdef CONFIG_SMP
for_each_online_cpu ( j )
2009-01-11 00:29:15 -08:00
seq_printf ( p , " %10u " , kstat_irqs_cpu ( i , j ) ) ;
2005-04-16 15:20:36 -07:00
# else
seq_printf ( p , " %10u " , kstat_irqs ( i ) ) ;
# endif
2011-03-28 13:47:54 +02:00
seq_printf ( p , " %14s " , irq_desc_get_chip ( desc ) - > name ) ;
2005-04-16 15:20:36 -07:00
# ifndef PARISC_IRQ_CR16_COUNTS
seq_printf ( p , " %s " , action - > name ) ;
while ( ( action = action - > next ) )
seq_printf ( p , " , %s " , action - > name ) ;
# else
for ( ; action ; action = action - > next ) {
unsigned int k , avg , min , max ;
min = max = action - > cr16_hist [ 0 ] ;
for ( avg = k = 0 ; k < PARISC_CR16_HIST_SIZE ; k + + ) {
int hist = action - > cr16_hist [ k ] ;
if ( hist ) {
avg + = hist ;
} else
break ;
if ( hist > max ) max = hist ;
if ( hist < min ) min = hist ;
}
avg / = k ;
seq_printf ( p , " %s[%d/%d/%d] " , action - > name ,
min , avg , max ) ;
}
# endif
seq_putc ( p , ' \n ' ) ;
skip :
2011-03-28 13:47:54 +02:00
raw_spin_unlock_irqrestore ( & desc - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
}
return 0 ;
}
/*
* * The following form a " set " : Virtual IRQ , Transaction Address , Trans Data .
* * Respectively , these map to IRQ region + EIRR , Processor HPA , EIRR bit .
* *
* * To use txn_XXX ( ) interfaces , get a Virtual IRQ first .
* * Then use that to get the Transaction address and data .
*/
2006-08-13 22:25:45 -04:00
int cpu_claim_irq ( unsigned int irq , struct irq_chip * type , void * data )
2005-04-16 15:20:36 -07:00
{
2011-03-28 13:47:54 +02:00
if ( irq_has_action ( irq ) )
2005-04-16 15:20:36 -07:00
return - EBUSY ;
2011-03-24 17:41:44 +01:00
if ( irq_get_chip ( irq ) ! = & cpu_interrupt_type )
2005-04-16 15:20:36 -07:00
return - EBUSY ;
2010-10-13 21:00:55 -04:00
/* for iosapic interrupts */
2005-04-16 15:20:36 -07:00
if ( type ) {
2011-03-24 17:41:44 +01:00
irq_set_chip_and_handler ( irq , type , handle_percpu_irq ) ;
irq_set_chip_data ( irq , data ) ;
2011-02-06 20:45:52 +00:00
__cpu_unmask_irq ( irq ) ;
2005-04-16 15:20:36 -07:00
}
return 0 ;
}
int txn_claim_irq ( int irq )
{
return cpu_claim_irq ( irq , NULL , NULL ) ? - 1 : irq ;
}
/*
* The bits_wide parameter accommodates the limitations of the HW / SW which
* use these bits :
* Legacy PA I / O ( GSC / NIO ) : 5 bits ( architected EIM register )
* V - class ( EPIC ) : 6 bits
* N / L / A - class ( iosapic ) : 8 bits
* PCI 2.2 MSI : 16 bits
* Some PCI devices : 32 bits ( Symbios SCSI / ATM / HyperFabric )
*
* On the service provider side :
* o PA 1.1 ( and PA2 .0 narrow mode ) 5 - bits ( width of EIR register )
* o PA 2.0 wide mode 6 - bits ( per processor )
* o IA64 8 - bits ( 0 - 256 total )
*
* So a Legacy PA I / O device on a PA 2.0 box can ' t use all the bits supported
* by the processor . . . and the N / L - class I / O subsystem supports more bits than
* PA2 .0 has . The first case is the problem .
*/
int txn_alloc_irq ( unsigned int bits_wide )
{
int irq ;
/* never return irq 0 cause that's the interval timer */
for ( irq = CPU_IRQ_BASE + 1 ; irq < = CPU_IRQ_MAX ; irq + + ) {
if ( cpu_claim_irq ( irq , NULL , NULL ) < 0 )
continue ;
if ( ( irq - CPU_IRQ_BASE ) > = ( 1 < < bits_wide ) )
continue ;
return irq ;
}
/* unlikely, but be prepared */
return - 1 ;
}
2005-11-17 16:29:16 -05:00
2005-11-17 16:28:37 -05:00
unsigned long txn_affinity_addr ( unsigned int irq , int cpu )
{
2005-11-17 16:29:16 -05:00
# ifdef CONFIG_SMP
2011-02-06 20:45:52 +00:00
struct irq_data * d = irq_get_irq_data ( irq ) ;
cpumask_copy ( d - > affinity , cpumask_of ( cpu ) ) ;
2005-11-17 16:29:16 -05:00
# endif
2005-11-17 16:28:37 -05:00
2008-12-31 03:12:10 +00:00
return per_cpu ( cpu_data , cpu ) . txn_addr ;
2005-11-17 16:28:37 -05:00
}
2005-11-17 16:29:16 -05:00
2005-04-16 15:20:36 -07:00
unsigned long txn_alloc_addr ( unsigned int virt_irq )
{
static int next_cpu = - 1 ;
next_cpu + + ; /* assign to "next" CPU we want this bugger on */
/* validate entry */
2009-03-16 14:19:37 +10:30
while ( ( next_cpu < nr_cpu_ids ) & &
2008-12-31 03:12:10 +00:00
( ! per_cpu ( cpu_data , next_cpu ) . txn_addr | |
! cpu_online ( next_cpu ) ) )
2005-04-16 15:20:36 -07:00
next_cpu + + ;
2009-03-16 14:19:37 +10:30
if ( next_cpu > = nr_cpu_ids )
2005-04-16 15:20:36 -07:00
next_cpu = 0 ; /* nothing else, assign monarch */
2005-11-17 16:28:37 -05:00
return txn_affinity_addr ( virt_irq , next_cpu ) ;
2005-04-16 15:20:36 -07:00
}
unsigned int txn_alloc_data ( unsigned int virt_irq )
{
return virt_irq - CPU_IRQ_BASE ;
}
2006-09-09 12:36:25 -07:00
static inline int eirr_to_irq ( unsigned long eirr )
{
2006-12-30 19:24:37 -05:00
int bit = fls_long ( eirr ) ;
2006-09-09 12:36:25 -07:00
return ( BITS_PER_LONG - bit ) + TIMER_IRQ ;
}
2005-04-16 15:20:36 -07:00
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask ( struct pt_regs * regs )
{
2006-10-07 05:11:07 -06:00
struct pt_regs * old_regs ;
2005-04-16 15:20:36 -07:00
unsigned long eirr_val ;
2006-09-09 12:36:25 -07:00
int irq , cpu = smp_processor_id ( ) ;
2005-11-17 16:29:16 -05:00
# ifdef CONFIG_SMP
2011-02-06 20:45:52 +00:00
struct irq_desc * desc ;
2006-09-09 12:36:25 -07:00
cpumask_t dest ;
2005-11-17 16:29:16 -05:00
# endif
2005-04-16 15:20:36 -07:00
2006-10-07 05:11:07 -06:00
old_regs = set_irq_regs ( regs ) ;
2006-09-09 12:36:25 -07:00
local_irq_disable ( ) ;
irq_enter ( ) ;
2005-04-16 15:20:36 -07:00
2007-06-10 16:31:41 -06:00
eirr_val = mfctl ( 23 ) & cpu_eiem & per_cpu ( local_ack_eiem , cpu ) ;
2006-09-09 12:36:25 -07:00
if ( ! eirr_val )
goto set_out ;
irq = eirr_to_irq ( eirr_val ) ;
2005-11-17 16:28:37 -05:00
2006-09-09 12:36:25 -07:00
# ifdef CONFIG_SMP
2011-02-06 20:45:52 +00:00
desc = irq_to_desc ( irq ) ;
cpumask_copy ( & dest , desc - > irq_data . affinity ) ;
2011-03-24 17:48:47 +01:00
if ( irqd_is_per_cpu ( & desc - > irq_data ) & &
2006-09-09 12:36:25 -07:00
! cpu_isset ( smp_processor_id ( ) , dest ) ) {
int cpu = first_cpu ( dest ) ;
printk ( KERN_DEBUG " redirecting irq %d from CPU %d to %d \n " ,
irq , smp_processor_id ( ) , cpu ) ;
gsc_writel ( irq + CPU_IRQ_BASE ,
2008-12-31 03:12:10 +00:00
per_cpu ( cpu_data , cpu ) . hpa ) ;
2006-09-09 12:36:25 -07:00
goto set_out ;
2005-04-16 15:20:36 -07:00
}
2006-09-09 12:36:25 -07:00
# endif
2010-10-13 21:00:55 -04:00
generic_handle_irq ( irq ) ;
2005-11-17 16:26:20 -05:00
2006-09-09 12:36:25 -07:00
out :
2005-04-16 15:20:36 -07:00
irq_exit ( ) ;
2006-10-07 05:11:07 -06:00
set_irq_regs ( old_regs ) ;
2006-09-09 12:36:25 -07:00
return ;
2005-04-16 15:20:36 -07:00
2006-09-09 12:36:25 -07:00
set_out :
2007-06-10 16:31:41 -06:00
set_eiem ( cpu_eiem & per_cpu ( local_ack_eiem , cpu ) ) ;
2006-09-09 12:36:25 -07:00
goto out ;
}
2005-04-16 15:20:36 -07:00
static struct irqaction timer_action = {
. handler = timer_interrupt ,
. name = " timer " ,
2007-05-08 00:35:36 -07:00
. flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL ,
2005-04-16 15:20:36 -07:00
} ;
# ifdef CONFIG_SMP
static struct irqaction ipi_action = {
. handler = ipi_interrupt ,
. name = " IPI " ,
2006-09-09 12:36:25 -07:00
. flags = IRQF_DISABLED | IRQF_PERCPU ,
2005-04-16 15:20:36 -07:00
} ;
# endif
static void claim_cpu_irqs ( void )
{
int i ;
for ( i = CPU_IRQ_BASE ; i < = CPU_IRQ_MAX ; i + + ) {
2011-03-24 17:41:44 +01:00
irq_set_chip_and_handler ( i , & cpu_interrupt_type ,
2010-12-02 23:36:47 +00:00
handle_percpu_irq ) ;
2005-04-16 15:20:36 -07:00
}
2011-03-24 17:41:44 +01:00
irq_set_handler ( TIMER_IRQ , handle_percpu_irq ) ;
2010-10-13 21:00:55 -04:00
setup_irq ( TIMER_IRQ , & timer_action ) ;
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
2011-03-24 17:41:44 +01:00
irq_set_handler ( IPI_IRQ , handle_percpu_irq ) ;
2010-10-13 21:00:55 -04:00
setup_irq ( IPI_IRQ , & ipi_action ) ;
2005-04-16 15:20:36 -07:00
# endif
}
void __init init_IRQ ( void )
{
local_irq_disable ( ) ; /* PARANOID - should already be disabled */
mtctl ( ~ 0UL , 23 ) ; /* EIRR : clear all pending external intr */
claim_cpu_irqs ( ) ;
# ifdef CONFIG_SMP
if ( ! cpu_eiem )
cpu_eiem = EIEM_MASK ( IPI_IRQ ) | EIEM_MASK ( TIMER_IRQ ) ;
# else
cpu_eiem = EIEM_MASK ( TIMER_IRQ ) ;
# endif
set_eiem ( cpu_eiem ) ; /* EIEM : enable all external intr */
}
2010-10-13 21:00:55 -04:00