2010-09-22 19:09:43 +04:00
/*
* Copyright ( C ) 1992 , 1998 - 2006 Linus Torvalds , Ingo Molnar
* Copyright ( C ) 2005 - 2006 , Thomas Gleixner , Russell King
*
* This file contains the interrupt descriptor management code
*
* Detailed information is available in Documentation / DocBook / genericirq
*
*/
# include <linux/irq.h>
# include <linux/slab.h>
# include <linux/module.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
# include <linux/radix-tree.h>
2010-09-27 19:48:26 +04:00
# include <linux/bitmap.h>
2010-09-22 19:09:43 +04:00
# include "internals.h"
/*
* lockdep : we want to handle all irq_desc locks as a single lock - class :
*/
2010-09-29 19:18:47 +04:00
static struct lock_class_key irq_desc_lock_class ;
2010-09-22 19:09:43 +04:00
# if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
static void __init init_irq_default_affinity ( void )
{
alloc_cpumask_var ( & irq_default_affinity , GFP_NOWAIT ) ;
cpumask_setall ( irq_default_affinity ) ;
}
# else
static void __init init_irq_default_affinity ( void )
{
}
# endif
2010-09-27 19:48:26 +04:00
# ifdef CONFIG_SMP
static int alloc_masks ( struct irq_desc * desc , gfp_t gfp , int node )
{
if ( ! zalloc_cpumask_var_node ( & desc - > irq_data . affinity , gfp , node ) )
return - ENOMEM ;
# ifdef CONFIG_GENERIC_PENDING_IRQ
if ( ! zalloc_cpumask_var_node ( & desc - > pending_mask , gfp , node ) ) {
free_cpumask_var ( desc - > irq_data . affinity ) ;
return - ENOMEM ;
}
# endif
return 0 ;
}
static void desc_smp_init ( struct irq_desc * desc , int node )
{
2010-09-27 22:02:56 +04:00
desc - > irq_data . node = node ;
2010-09-27 19:48:26 +04:00
cpumask_copy ( desc - > irq_data . affinity , irq_default_affinity ) ;
2010-09-29 20:46:55 +04:00
# ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear ( desc - > pending_mask ) ;
# endif
}
static inline int desc_node ( struct irq_desc * desc )
{
return desc - > irq_data . node ;
2010-09-27 19:48:26 +04:00
}
# else
static inline int
alloc_masks ( struct irq_desc * desc , gfp_t gfp , int node ) { return 0 ; }
static inline void desc_smp_init ( struct irq_desc * desc , int node ) { }
2010-09-29 20:46:55 +04:00
static inline int desc_node ( struct irq_desc * desc ) { return 0 ; }
2010-09-27 19:48:26 +04:00
# endif
static void desc_set_defaults ( unsigned int irq , struct irq_desc * desc , int node )
{
desc - > irq_data . irq = irq ;
desc - > irq_data . chip = & no_irq_chip ;
desc - > irq_data . chip_data = NULL ;
desc - > irq_data . handler_data = NULL ;
desc - > irq_data . msi_desc = NULL ;
desc - > status = IRQ_DEFAULT_INIT_FLAGS ;
desc - > handle_irq = handle_bad_irq ;
desc - > depth = 1 ;
2010-09-29 20:46:55 +04:00
desc - > irq_count = 0 ;
desc - > irqs_unhandled = 0 ;
2010-09-27 19:48:26 +04:00
desc - > name = NULL ;
memset ( desc - > kstat_irqs , 0 , nr_cpu_ids * sizeof ( * ( desc - > kstat_irqs ) ) ) ;
desc_smp_init ( desc , node ) ;
}
2010-09-22 19:09:43 +04:00
int nr_irqs = NR_IRQS ;
EXPORT_SYMBOL_GPL ( nr_irqs ) ;
2010-09-29 19:18:47 +04:00
static DEFINE_RAW_SPINLOCK ( sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
static DECLARE_BITMAP ( allocated_irqs , NR_IRQS ) ;
2010-09-22 19:09:43 +04:00
# ifdef CONFIG_SPARSE_IRQ
static RADIX_TREE ( irq_desc_tree , GFP_ATOMIC ) ;
2010-09-27 19:48:26 +04:00
static void irq_insert_desc ( unsigned int irq , struct irq_desc * desc )
2010-09-22 19:09:43 +04:00
{
radix_tree_insert ( & irq_desc_tree , irq , desc ) ;
}
struct irq_desc * irq_to_desc ( unsigned int irq )
{
return radix_tree_lookup ( & irq_desc_tree , irq ) ;
}
2010-09-27 19:48:26 +04:00
static void delete_irq_desc ( unsigned int irq )
{
radix_tree_delete ( & irq_desc_tree , irq ) ;
}
# ifdef CONFIG_SMP
static void free_masks ( struct irq_desc * desc )
{
# ifdef CONFIG_GENERIC_PENDING_IRQ
free_cpumask_var ( desc - > pending_mask ) ;
# endif
free_cpumask_var ( desc - > affinity ) ;
}
# else
static inline void free_masks ( struct irq_desc * desc ) { }
# endif
static struct irq_desc * alloc_desc ( int irq , int node )
{
2010-09-27 22:02:56 +04:00
/* Temporary hack until we can switch to GFP_KERNEL */
gfp_t gfp = gfp_allowed_mask = = GFP_BOOT_MASK ? GFP_NOWAIT : GFP_ATOMIC ;
2010-09-27 19:48:26 +04:00
struct irq_desc * desc ;
desc = kzalloc_node ( sizeof ( * desc ) , gfp , node ) ;
if ( ! desc )
return NULL ;
/* allocate based on nr_cpu_ids */
desc - > kstat_irqs = kzalloc_node ( nr_cpu_ids * sizeof ( * desc - > kstat_irqs ) ,
gfp , node ) ;
if ( ! desc - > kstat_irqs )
goto err_desc ;
if ( alloc_masks ( desc , gfp , node ) )
goto err_kstat ;
raw_spin_lock_init ( & desc - > lock ) ;
lockdep_set_class ( & desc - > lock , & irq_desc_lock_class ) ;
desc_set_defaults ( irq , desc , node ) ;
return desc ;
err_kstat :
kfree ( desc - > kstat_irqs ) ;
err_desc :
kfree ( desc ) ;
return NULL ;
}
static void free_desc ( unsigned int irq )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
unsigned long flags ;
2010-09-30 04:46:07 +04:00
unregister_irq_proc ( irq , desc ) ;
2010-09-27 19:48:26 +04:00
raw_spin_lock_irqsave ( & sparse_irq_lock , flags ) ;
delete_irq_desc ( irq ) ;
raw_spin_unlock_irqrestore ( & sparse_irq_lock , flags ) ;
free_masks ( desc ) ;
kfree ( desc - > kstat_irqs ) ;
kfree ( desc ) ;
}
static int alloc_descs ( unsigned int start , unsigned int cnt , int node )
{
struct irq_desc * desc ;
unsigned long flags ;
int i ;
for ( i = 0 ; i < cnt ; i + + ) {
desc = alloc_desc ( start + i , node ) ;
if ( ! desc )
goto err ;
raw_spin_lock_irqsave ( & sparse_irq_lock , flags ) ;
irq_insert_desc ( start + i , desc ) ;
raw_spin_unlock_irqrestore ( & sparse_irq_lock , flags ) ;
}
return start ;
err :
for ( i - - ; i > = 0 ; i - - )
free_desc ( start + i ) ;
raw_spin_lock_irqsave ( & sparse_irq_lock , flags ) ;
bitmap_clear ( allocated_irqs , start , cnt ) ;
raw_spin_unlock_irqrestore ( & sparse_irq_lock , flags ) ;
return - ENOMEM ;
}
2010-09-27 22:02:56 +04:00
struct irq_desc * __ref irq_to_desc_alloc_node ( unsigned int irq , int node )
{
int res = irq_alloc_descs ( irq , irq , 1 , node ) ;
2010-09-22 19:09:43 +04:00
2010-09-27 22:02:56 +04:00
if ( res = = - EEXIST | | res = = irq )
return irq_to_desc ( irq ) ;
return NULL ;
}
2010-09-22 19:09:43 +04:00
int __init early_irq_init ( void )
{
2010-09-27 22:55:03 +04:00
int i , initcnt , node = first_online_node ;
2010-09-22 19:09:43 +04:00
struct irq_desc * desc ;
init_irq_default_affinity ( ) ;
2010-09-27 22:55:03 +04:00
/* Let arch update nr_irqs and return the nr of preallocated irqs */
initcnt = arch_probe_nr_irqs ( ) ;
printk ( KERN_INFO " NR_IRQS:%d nr_irqs:%d %d \n " , NR_IRQS , nr_irqs , initcnt ) ;
2010-09-22 19:09:43 +04:00
2010-09-27 22:55:03 +04:00
for ( i = 0 ; i < initcnt ; i + + ) {
2010-09-27 22:02:56 +04:00
desc = alloc_desc ( i , node ) ;
set_bit ( i , allocated_irqs ) ;
irq_insert_desc ( i , desc ) ;
2010-09-22 19:09:43 +04:00
}
return arch_early_irq_init ( ) ;
}
# else /* !CONFIG_SPARSE_IRQ */
struct irq_desc irq_desc [ NR_IRQS ] __cacheline_aligned_in_smp = {
[ 0 . . . NR_IRQS - 1 ] = {
2010-09-27 23:01:37 +04:00
. status = IRQ_DEFAULT_INIT_FLAGS ,
2010-09-22 19:09:43 +04:00
. handle_irq = handle_bad_irq ,
. depth = 1 ,
. lock = __RAW_SPIN_LOCK_UNLOCKED ( irq_desc - > lock ) ,
}
} ;
static unsigned int kstat_irqs_all [ NR_IRQS ] [ NR_CPUS ] ;
int __init early_irq_init ( void )
{
2010-09-27 22:02:56 +04:00
int count , i , node = first_online_node ;
2010-09-22 19:09:43 +04:00
struct irq_desc * desc ;
init_irq_default_affinity ( ) ;
printk ( KERN_INFO " NR_IRQS:%d \n " , NR_IRQS ) ;
desc = irq_desc ;
count = ARRAY_SIZE ( irq_desc ) ;
for ( i = 0 ; i < count ; i + + ) {
desc [ i ] . irq_data . irq = i ;
desc [ i ] . irq_data . chip = & no_irq_chip ;
desc [ i ] . kstat_irqs = kstat_irqs_all [ i ] ;
2010-09-27 22:02:56 +04:00
alloc_masks ( desc + i , GFP_KERNEL , node ) ;
desc_smp_init ( desc + i , node ) ;
2010-09-22 17:58:45 +04:00
lockdep_set_class ( & desc [ i ] . lock , & irq_desc_lock_class ) ;
2010-09-22 19:09:43 +04:00
}
return arch_early_irq_init ( ) ;
}
struct irq_desc * irq_to_desc ( unsigned int irq )
{
return ( irq < NR_IRQS ) ? irq_desc + irq : NULL ;
}
struct irq_desc * irq_to_desc_alloc_node ( unsigned int irq , int node )
{
return irq_to_desc ( irq ) ;
}
2010-09-27 19:48:26 +04:00
static void free_desc ( unsigned int irq )
{
2010-09-29 20:46:55 +04:00
dynamic_irq_cleanup ( irq ) ;
2010-09-27 19:48:26 +04:00
}
static inline int alloc_descs ( unsigned int start , unsigned int cnt , int node )
{
return start ;
}
2010-09-22 19:09:43 +04:00
# endif /* !CONFIG_SPARSE_IRQ */
2010-09-27 19:48:26 +04:00
/* Dynamic interrupt handling */
/**
* irq_free_descs - free irq descriptors
* @ from : Start of descriptor range
* @ cnt : Number of consecutive irqs to free
*/
void irq_free_descs ( unsigned int from , unsigned int cnt )
{
unsigned long flags ;
int i ;
if ( from > = nr_irqs | | ( from + cnt ) > nr_irqs )
return ;
for ( i = 0 ; i < cnt ; i + + )
free_desc ( from + i ) ;
raw_spin_lock_irqsave ( & sparse_irq_lock , flags ) ;
bitmap_clear ( allocated_irqs , from , cnt ) ;
raw_spin_unlock_irqrestore ( & sparse_irq_lock , flags ) ;
}
/**
* irq_alloc_descs - allocate and initialize a range of irq descriptors
* @ irq : Allocate for specific irq number if irq > = 0
* @ from : Start the search from this irq number
* @ cnt : Number of consecutive irqs to allocate .
* @ node : Preferred node on which the irq descriptor should be allocated
*
* Returns the first irq number or error code
*/
int __ref
irq_alloc_descs ( int irq , unsigned int from , unsigned int cnt , int node )
{
unsigned long flags ;
int start , ret ;
if ( ! cnt )
return - EINVAL ;
raw_spin_lock_irqsave ( & sparse_irq_lock , flags ) ;
start = bitmap_find_next_zero_area ( allocated_irqs , nr_irqs , from , cnt , 0 ) ;
ret = - EEXIST ;
if ( irq > = 0 & & start ! = irq )
goto err ;
ret = - ENOMEM ;
if ( start > = nr_irqs )
goto err ;
bitmap_set ( allocated_irqs , start , cnt ) ;
raw_spin_unlock_irqrestore ( & sparse_irq_lock , flags ) ;
return alloc_descs ( start , cnt , node ) ;
err :
raw_spin_unlock_irqrestore ( & sparse_irq_lock , flags ) ;
return ret ;
}
2010-10-12 14:31:46 +04:00
/**
* irq_reserve_irqs - mark irqs allocated
* @ from : mark from irq number
* @ cnt : number of irqs to mark
*
* Returns 0 on success or an appropriate error code
*/
int irq_reserve_irqs ( unsigned int from , unsigned int cnt )
{
unsigned long flags ;
unsigned int start ;
int ret = 0 ;
if ( ! cnt | | ( from + cnt ) > nr_irqs )
return - EINVAL ;
raw_spin_lock_irqsave ( & sparse_irq_lock , flags ) ;
start = bitmap_find_next_zero_area ( allocated_irqs , nr_irqs , from , cnt , 0 ) ;
if ( start = = from )
bitmap_set ( allocated_irqs , start , cnt ) ;
else
ret = - EEXIST ;
raw_spin_unlock_irqrestore ( & sparse_irq_lock , flags ) ;
return ret ;
}
2010-09-30 12:45:07 +04:00
/**
* irq_get_next_irq - get next allocated irq number
* @ offset : where to start the search
*
* Returns next irq number after offset or nr_irqs if none is found .
*/
unsigned int irq_get_next_irq ( unsigned int offset )
{
return find_next_bit ( allocated_irqs , nr_irqs , offset ) ;
}
2010-09-29 20:46:55 +04:00
/**
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
* @ irq : irq number to initialize
*/
void dynamic_irq_cleanup ( unsigned int irq )
2010-09-22 19:09:43 +04:00
{
2010-09-29 20:46:55 +04:00
struct irq_desc * desc = irq_to_desc ( irq ) ;
unsigned long flags ;
raw_spin_lock_irqsave ( & desc - > lock , flags ) ;
desc_set_defaults ( irq , desc , desc_node ( desc ) ) ;
raw_spin_unlock_irqrestore ( & desc - > lock , flags ) ;
2010-09-22 19:09:43 +04:00
}
unsigned int kstat_irqs_cpu ( unsigned int irq , int cpu )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
return desc ? desc - > kstat_irqs [ cpu ] : 0 ;
}