2005-04-16 15:20:36 -07:00
/*
* linux / arch / ia64 / kernel / irq . c
*
* Copyright ( C ) 1992 , 1998 Linus Torvalds , Ingo Molnar
*
* This file contains the code used by various IRQ handling routines :
2007-05-11 14:55:43 -07:00
* asking for different IRQs should be done through these routines
2005-04-16 15:20:36 -07:00
* instead of just grabbing them . Thus setups with different IRQ numbers
* shouldn ' t result in any weird surprises , and installing new handlers
* should be easier .
*
* Copyright ( C ) Ashok Raj < ashok . raj @ intel . com > , Intel Corporation 2004
*
* 4 / 14 / 2004 : Added code to handle cpu migration and do safe irq
2007-05-11 14:55:43 -07:00
* migration without losing interrupts for iosapic
2005-04-16 15:20:36 -07:00
* architecture .
*/
# include <asm/delay.h>
# include <asm/uaccess.h>
# include <linux/module.h>
# include <linux/seq_file.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
/*
* ' what should we do if we get a hw irq event on an illegal vector ' .
* each architecture has to answer this themselves .
*/
void ack_bad_irq ( unsigned int irq )
{
printk ( KERN_ERR " Unexpected irq vector 0x%x on CPU %u! \n " , irq , smp_processor_id ( ) ) ;
}
# ifdef CONFIG_IA64_GENERIC
2007-08-13 10:31:26 -07:00
ia64_vector __ia64_irq_to_vector ( int irq )
{
return irq_cfg [ irq ] . vector ;
}
2005-04-16 15:20:36 -07:00
unsigned int __ia64_local_vector_to_irq ( ia64_vector vec )
{
2007-07-17 21:22:23 +09:00
return __get_cpu_var ( vector_irq ) [ vec ] ;
2005-04-16 15:20:36 -07:00
}
# endif
/*
* Interrupt statistics :
*/
atomic_t irq_err_count ;
/*
* / proc / interrupts printing :
*/
2011-03-25 21:04:38 +01:00
int arch_show_interrupts ( struct seq_file * p , int prec )
2005-04-16 15:20:36 -07:00
{
2011-03-25 21:04:38 +01:00
seq_printf ( p , " ERR: %10u \n " , atomic_read ( & irq_err_count ) ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
# ifdef CONFIG_SMP
static char irq_redir [ NR_IRQS ] ; // = { [0 ... NR_IRQS-1] = 1 };
void set_irq_affinity_info ( unsigned int irq , int hwid , int redir )
{
if ( irq < NR_IRQS ) {
2011-03-24 16:44:38 +01:00
cpumask_copy ( irq_get_irq_data ( irq ) - > affinity ,
2009-01-04 05:18:00 -08:00
cpumask_of ( cpu_logical_id ( hwid ) ) ) ;
2005-04-16 15:20:36 -07:00
irq_redir [ irq ] = ( char ) ( redir & 0xff ) ;
}
}
2007-05-10 22:42:44 -07:00
2009-01-04 05:18:00 -08:00
bool is_affinity_mask_valid ( const struct cpumask * cpumask )
2007-05-10 22:42:44 -07:00
{
if ( ia64_platform_is ( " sn2 " ) ) {
/* Only allow one CPU to be specified in the smp_affinity mask */
2009-01-03 12:50:46 +01:00
if ( cpumask_weight ( cpumask ) ! = 1 )
2007-05-10 22:42:44 -07:00
return false ;
}
return true ;
}
2005-04-16 15:20:36 -07:00
# endif /* CONFIG_SMP */
# ifdef CONFIG_HOTPLUG_CPU
unsigned int vectors_in_migration [ NR_IRQS ] ;
/*
2009-01-04 05:18:00 -08:00
* Since cpu_online_mask is already updated , we just need to check for
2005-04-16 15:20:36 -07:00
* affinity that has zeros
*/
static void migrate_irqs ( void )
{
int irq , new_cpu ;
for ( irq = 0 ; irq < NR_IRQS ; irq + + ) {
2011-03-25 20:12:33 +01:00
struct irq_desc * desc = irq_to_desc ( irq ) ;
struct irq_data * data = irq_desc_get_irq_data ( desc ) ;
struct irq_chip * chip = irq_data_get_irq_chip ( data ) ;
2005-04-16 15:20:36 -07:00
2011-03-25 20:50:49 +01:00
if ( irqd_irq_disabled ( data ) )
2007-02-03 01:13:48 -08:00
continue ;
2005-04-16 15:20:36 -07:00
/*
* No handling for now .
* TBD : Implement a disable function so we can now
* tell CPU not to respond to these local intr sources .
* such as ITV , CPEI , MCA etc .
*/
2011-03-25 20:12:33 +01:00
if ( irqd_is_per_cpu ( data ) )
2005-04-16 15:20:36 -07:00
continue ;
2011-03-25 20:12:33 +01:00
if ( cpumask_any_and ( data - > affinity , cpu_online_mask )
2008-12-13 21:20:26 +10:30
> = nr_cpu_ids ) {
2005-04-16 15:20:36 -07:00
/*
* Save it for phase 2 processing
*/
vectors_in_migration [ irq ] = irq ;
2009-01-04 05:18:00 -08:00
new_cpu = cpumask_any ( cpu_online_mask ) ;
2005-04-16 15:20:36 -07:00
/*
* Al three are essential , currently WARN_ON . . maybe panic ?
*/
2011-03-25 20:12:33 +01:00
if ( chip & & chip - > irq_disable & &
chip - > irq_enable & & chip - > irq_set_affinity ) {
chip - > irq_disable ( data ) ;
chip - > irq_set_affinity ( data ,
cpumask_of ( new_cpu ) , false ) ;
chip - > irq_enable ( data ) ;
2005-04-16 15:20:36 -07:00
} else {
2011-03-25 20:12:33 +01:00
WARN_ON ( ( ! chip | | ! chip - > irq_disable | |
! chip - > irq_enable | |
! chip - > irq_set_affinity ) ) ;
2005-04-16 15:20:36 -07:00
}
}
}
}
void fixup_irqs ( void )
{
unsigned int irq ;
extern void ia64_process_pending_intr ( void ) ;
2005-11-11 14:32:40 -08:00
extern volatile int time_keeper_id ;
2008-04-30 16:50:43 +09:00
/* Mask ITV to disable timer */
ia64_set_itv ( 1 < < 16 ) ;
2005-11-11 14:32:40 -08:00
/*
* Find a new timesync master
*/
if ( smp_processor_id ( ) = = time_keeper_id ) {
2009-01-04 05:18:00 -08:00
time_keeper_id = cpumask_first ( cpu_online_mask ) ;
2005-11-11 14:32:40 -08:00
printk ( " CPU %d is now promoted to time-keeper master \n " , time_keeper_id ) ;
}
2005-04-16 15:20:36 -07:00
/*
2007-05-11 14:55:43 -07:00
* Phase 1 : Locate IRQs bound to this cpu and
2005-04-16 15:20:36 -07:00
* relocate them for cpu removal .
*/
migrate_irqs ( ) ;
/*
* Phase 2 : Perform interrupt processing for all entries reported in
* local APIC .
*/
ia64_process_pending_intr ( ) ;
/*
* Phase 3 : Now handle any interrupts not captured in local APIC .
* This is to account for cases that device interrupted during the time the
* rte was being disabled and re - programmed .
*/
for ( irq = 0 ; irq < NR_IRQS ; irq + + ) {
if ( vectors_in_migration [ irq ] ) {
2006-10-06 10:09:41 -07:00
struct pt_regs * old_regs = set_irq_regs ( NULL ) ;
2005-04-16 15:20:36 -07:00
vectors_in_migration [ irq ] = 0 ;
2006-11-16 00:43:07 -08:00
generic_handle_irq ( irq ) ;
2006-10-06 10:09:41 -07:00
set_irq_regs ( old_regs ) ;
2005-04-16 15:20:36 -07:00
}
}
/*
* Now let processor die . We do irq disable and max_xtp ( ) to
* ensure there is no more interrupts routed to this processor .
* But the local timer interrupt can have 1 pending which we
* take care in timer_interrupt ( ) .
*/
max_xtp ( ) ;
local_irq_disable ( ) ;
}
# endif