2010-09-22 19:09:43 +04:00
/*
* Copyright ( C ) 1992 , 1998 - 2006 Linus Torvalds , Ingo Molnar
* Copyright ( C ) 2005 - 2006 , Thomas Gleixner , Russell King
*
* This file contains the interrupt descriptor management code
*
* Detailed information is available in Documentation / DocBook / genericirq
*
*/
# include <linux/irq.h>
# include <linux/slab.h>
# include <linux/module.h>
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
# include <linux/radix-tree.h>
2010-09-27 19:48:26 +04:00
# include <linux/bitmap.h>
2010-09-22 19:09:43 +04:00
# include "internals.h"
/*
* lockdep : we want to handle all irq_desc locks as a single lock - class :
*/
2010-09-29 19:18:47 +04:00
static struct lock_class_key irq_desc_lock_class ;
2010-09-22 19:09:43 +04:00
2011-05-18 14:53:03 +04:00
# if defined(CONFIG_SMP)
2010-09-22 19:09:43 +04:00
static void __init init_irq_default_affinity ( void )
{
alloc_cpumask_var ( & irq_default_affinity , GFP_NOWAIT ) ;
cpumask_setall ( irq_default_affinity ) ;
}
# else
static void __init init_irq_default_affinity ( void )
{
}
# endif
2010-09-27 19:48:26 +04:00
# ifdef CONFIG_SMP
static int alloc_masks ( struct irq_desc * desc , gfp_t gfp , int node )
{
if ( ! zalloc_cpumask_var_node ( & desc - > irq_data . affinity , gfp , node ) )
return - ENOMEM ;
# ifdef CONFIG_GENERIC_PENDING_IRQ
if ( ! zalloc_cpumask_var_node ( & desc - > pending_mask , gfp , node ) ) {
free_cpumask_var ( desc - > irq_data . affinity ) ;
return - ENOMEM ;
}
# endif
return 0 ;
}
static void desc_smp_init ( struct irq_desc * desc , int node )
{
2010-09-27 22:02:56 +04:00
desc - > irq_data . node = node ;
2010-09-27 19:48:26 +04:00
cpumask_copy ( desc - > irq_data . affinity , irq_default_affinity ) ;
2010-09-29 20:46:55 +04:00
# ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear ( desc - > pending_mask ) ;
# endif
}
static inline int desc_node ( struct irq_desc * desc )
{
return desc - > irq_data . node ;
2010-09-27 19:48:26 +04:00
}
# else
static inline int
alloc_masks ( struct irq_desc * desc , gfp_t gfp , int node ) { return 0 ; }
static inline void desc_smp_init ( struct irq_desc * desc , int node ) { }
2010-09-29 20:46:55 +04:00
static inline int desc_node ( struct irq_desc * desc ) { return 0 ; }
2010-09-27 19:48:26 +04:00
# endif
static void desc_set_defaults ( unsigned int irq , struct irq_desc * desc , int node )
{
2011-01-14 02:45:38 +03:00
int cpu ;
2010-09-27 19:48:26 +04:00
desc - > irq_data . irq = irq ;
desc - > irq_data . chip = & no_irq_chip ;
desc - > irq_data . chip_data = NULL ;
desc - > irq_data . handler_data = NULL ;
desc - > irq_data . msi_desc = NULL ;
2011-02-09 16:54:49 +03:00
irq_settings_clr_and_set ( desc , ~ 0 , _IRQ_DEFAULT_INIT_FLAGS ) ;
2011-03-27 13:02:49 +04:00
irqd_set ( & desc - > irq_data , IRQD_IRQ_DISABLED ) ;
2010-09-27 19:48:26 +04:00
desc - > handle_irq = handle_bad_irq ;
desc - > depth = 1 ;
2010-09-29 20:46:55 +04:00
desc - > irq_count = 0 ;
desc - > irqs_unhandled = 0 ;
2010-09-27 19:48:26 +04:00
desc - > name = NULL ;
2011-01-14 02:45:38 +03:00
for_each_possible_cpu ( cpu )
* per_cpu_ptr ( desc - > kstat_irqs , cpu ) = 0 ;
2010-09-27 19:48:26 +04:00
desc_smp_init ( desc , node ) ;
}
2010-09-22 19:09:43 +04:00
int nr_irqs = NR_IRQS ;
EXPORT_SYMBOL_GPL ( nr_irqs ) ;
2010-10-08 14:47:53 +04:00
static DEFINE_MUTEX ( sparse_irq_lock ) ;
2011-02-17 19:45:15 +03:00
static DECLARE_BITMAP ( allocated_irqs , IRQ_BITMAP_BITS ) ;
2010-09-27 19:48:26 +04:00
2010-09-22 19:09:43 +04:00
# ifdef CONFIG_SPARSE_IRQ
2010-10-05 17:14:35 +04:00
static RADIX_TREE ( irq_desc_tree , GFP_KERNEL ) ;
2010-09-22 19:09:43 +04:00
2010-09-27 19:48:26 +04:00
static void irq_insert_desc ( unsigned int irq , struct irq_desc * desc )
2010-09-22 19:09:43 +04:00
{
radix_tree_insert ( & irq_desc_tree , irq , desc ) ;
}
struct irq_desc * irq_to_desc ( unsigned int irq )
{
return radix_tree_lookup ( & irq_desc_tree , irq ) ;
}
2010-09-27 19:48:26 +04:00
static void delete_irq_desc ( unsigned int irq )
{
radix_tree_delete ( & irq_desc_tree , irq ) ;
}
# ifdef CONFIG_SMP
static void free_masks ( struct irq_desc * desc )
{
# ifdef CONFIG_GENERIC_PENDING_IRQ
free_cpumask_var ( desc - > pending_mask ) ;
# endif
2010-10-12 23:58:27 +04:00
free_cpumask_var ( desc - > irq_data . affinity ) ;
2010-09-27 19:48:26 +04:00
}
# else
static inline void free_masks ( struct irq_desc * desc ) { }
# endif
static struct irq_desc * alloc_desc ( int irq , int node )
{
struct irq_desc * desc ;
2010-10-05 17:14:35 +04:00
gfp_t gfp = GFP_KERNEL ;
2010-09-27 19:48:26 +04:00
desc = kzalloc_node ( sizeof ( * desc ) , gfp , node ) ;
if ( ! desc )
return NULL ;
/* allocate based on nr_cpu_ids */
2011-01-14 02:45:38 +03:00
desc - > kstat_irqs = alloc_percpu ( unsigned int ) ;
2010-09-27 19:48:26 +04:00
if ( ! desc - > kstat_irqs )
goto err_desc ;
if ( alloc_masks ( desc , gfp , node ) )
goto err_kstat ;
raw_spin_lock_init ( & desc - > lock ) ;
lockdep_set_class ( & desc - > lock , & irq_desc_lock_class ) ;
desc_set_defaults ( irq , desc , node ) ;
return desc ;
err_kstat :
2011-01-14 02:45:38 +03:00
free_percpu ( desc - > kstat_irqs ) ;
2010-09-27 19:48:26 +04:00
err_desc :
kfree ( desc ) ;
return NULL ;
}
static void free_desc ( unsigned int irq )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
2010-09-30 04:46:07 +04:00
unregister_irq_proc ( irq , desc ) ;
2010-10-08 14:47:53 +04:00
mutex_lock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
delete_irq_desc ( irq ) ;
2010-10-08 14:47:53 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
free_masks ( desc ) ;
2011-01-14 02:45:38 +03:00
free_percpu ( desc - > kstat_irqs ) ;
2010-09-27 19:48:26 +04:00
kfree ( desc ) ;
}
static int alloc_descs ( unsigned int start , unsigned int cnt , int node )
{
struct irq_desc * desc ;
int i ;
for ( i = 0 ; i < cnt ; i + + ) {
desc = alloc_desc ( start + i , node ) ;
if ( ! desc )
goto err ;
2010-10-08 14:47:53 +04:00
mutex_lock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
irq_insert_desc ( start + i , desc ) ;
2010-10-08 14:47:53 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
}
return start ;
err :
for ( i - - ; i > = 0 ; i - - )
free_desc ( start + i ) ;
2010-10-08 14:47:53 +04:00
mutex_lock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
bitmap_clear ( allocated_irqs , start , cnt ) ;
2010-10-08 14:47:53 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
return - ENOMEM ;
}
2011-02-19 22:07:37 +03:00
static int irq_expand_nr_irqs ( unsigned int nr )
2011-02-16 19:12:57 +03:00
{
2011-02-19 22:07:37 +03:00
if ( nr > IRQ_BITMAP_BITS )
2011-02-16 19:12:57 +03:00
return - ENOMEM ;
2011-02-19 22:07:37 +03:00
nr_irqs = nr ;
2011-02-16 19:12:57 +03:00
return 0 ;
}
2010-09-22 19:09:43 +04:00
int __init early_irq_init ( void )
{
2010-09-27 22:55:03 +04:00
int i , initcnt , node = first_online_node ;
2010-09-22 19:09:43 +04:00
struct irq_desc * desc ;
init_irq_default_affinity ( ) ;
2010-09-27 22:55:03 +04:00
/* Let arch update nr_irqs and return the nr of preallocated irqs */
initcnt = arch_probe_nr_irqs ( ) ;
printk ( KERN_INFO " NR_IRQS:%d nr_irqs:%d %d \n " , NR_IRQS , nr_irqs , initcnt ) ;
2010-09-22 19:09:43 +04:00
2011-02-17 19:45:15 +03:00
if ( WARN_ON ( nr_irqs > IRQ_BITMAP_BITS ) )
nr_irqs = IRQ_BITMAP_BITS ;
if ( WARN_ON ( initcnt > IRQ_BITMAP_BITS ) )
initcnt = IRQ_BITMAP_BITS ;
if ( initcnt > nr_irqs )
nr_irqs = initcnt ;
2010-09-27 22:55:03 +04:00
for ( i = 0 ; i < initcnt ; i + + ) {
2010-09-27 22:02:56 +04:00
desc = alloc_desc ( i , node ) ;
set_bit ( i , allocated_irqs ) ;
irq_insert_desc ( i , desc ) ;
2010-09-22 19:09:43 +04:00
}
return arch_early_irq_init ( ) ;
}
# else /* !CONFIG_SPARSE_IRQ */
struct irq_desc irq_desc [ NR_IRQS ] __cacheline_aligned_in_smp = {
[ 0 . . . NR_IRQS - 1 ] = {
. handle_irq = handle_bad_irq ,
. depth = 1 ,
. lock = __RAW_SPIN_LOCK_UNLOCKED ( irq_desc - > lock ) ,
}
} ;
int __init early_irq_init ( void )
{
2010-09-27 22:02:56 +04:00
int count , i , node = first_online_node ;
2010-09-22 19:09:43 +04:00
struct irq_desc * desc ;
init_irq_default_affinity ( ) ;
printk ( KERN_INFO " NR_IRQS:%d \n " , NR_IRQS ) ;
desc = irq_desc ;
count = ARRAY_SIZE ( irq_desc ) ;
for ( i = 0 ; i < count ; i + + ) {
2011-01-14 02:45:38 +03:00
desc [ i ] . kstat_irqs = alloc_percpu ( unsigned int ) ;
2011-05-31 20:14:39 +04:00
alloc_masks ( & desc [ i ] , GFP_KERNEL , node ) ;
raw_spin_lock_init ( & desc [ i ] . lock ) ;
2010-09-22 17:58:45 +04:00
lockdep_set_class ( & desc [ i ] . lock , & irq_desc_lock_class ) ;
2011-05-31 20:14:39 +04:00
desc_set_defaults ( i , & desc [ i ] , node ) ;
2010-09-22 19:09:43 +04:00
}
return arch_early_irq_init ( ) ;
}
struct irq_desc * irq_to_desc ( unsigned int irq )
{
return ( irq < NR_IRQS ) ? irq_desc + irq : NULL ;
}
2010-09-27 19:48:26 +04:00
static void free_desc ( unsigned int irq )
{
2010-09-29 20:46:55 +04:00
dynamic_irq_cleanup ( irq ) ;
2010-09-27 19:48:26 +04:00
}
static inline int alloc_descs ( unsigned int start , unsigned int cnt , int node )
{
return start ;
}
2011-02-16 19:12:57 +03:00
2011-02-19 22:07:37 +03:00
static int irq_expand_nr_irqs ( unsigned int nr )
2011-02-16 19:12:57 +03:00
{
return - ENOMEM ;
}
2010-09-22 19:09:43 +04:00
# endif /* !CONFIG_SPARSE_IRQ */
2011-05-18 14:48:00 +04:00
/**
* generic_handle_irq - Invoke the handler for a particular irq
* @ irq : The irq number to handle
*
*/
int generic_handle_irq ( unsigned int irq )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
if ( ! desc )
return - EINVAL ;
generic_handle_irq_desc ( irq , desc ) ;
return 0 ;
}
2011-05-18 13:39:04 +04:00
EXPORT_SYMBOL_GPL ( generic_handle_irq ) ;
2011-05-18 14:48:00 +04:00
2010-09-27 19:48:26 +04:00
/* Dynamic interrupt handling */
/**
* irq_free_descs - free irq descriptors
* @ from : Start of descriptor range
* @ cnt : Number of consecutive irqs to free
*/
void irq_free_descs ( unsigned int from , unsigned int cnt )
{
int i ;
if ( from > = nr_irqs | | ( from + cnt ) > nr_irqs )
return ;
for ( i = 0 ; i < cnt ; i + + )
free_desc ( from + i ) ;
2010-10-08 14:47:53 +04:00
mutex_lock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
bitmap_clear ( allocated_irqs , from , cnt ) ;
2010-10-08 14:47:53 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
}
2011-05-18 13:39:04 +04:00
EXPORT_SYMBOL_GPL ( irq_free_descs ) ;
2010-09-27 19:48:26 +04:00
/**
* irq_alloc_descs - allocate and initialize a range of irq descriptors
* @ irq : Allocate for specific irq number if irq > = 0
* @ from : Start the search from this irq number
* @ cnt : Number of consecutive irqs to allocate .
* @ node : Preferred node on which the irq descriptor should be allocated
*
* Returns the first irq number or error code
*/
int __ref
irq_alloc_descs ( int irq , unsigned int from , unsigned int cnt , int node )
{
int start , ret ;
if ( ! cnt )
return - EINVAL ;
2011-06-02 21:55:13 +04:00
if ( irq > = 0 ) {
if ( from > irq )
return - EINVAL ;
from = irq ;
}
2010-10-08 14:47:53 +04:00
mutex_lock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
2011-02-19 22:07:37 +03:00
start = bitmap_find_next_zero_area ( allocated_irqs , IRQ_BITMAP_BITS ,
from , cnt , 0 ) ;
2010-09-27 19:48:26 +04:00
ret = - EEXIST ;
if ( irq > = 0 & & start ! = irq )
goto err ;
2011-02-19 22:07:37 +03:00
if ( start + cnt > nr_irqs ) {
ret = irq_expand_nr_irqs ( start + cnt ) ;
2011-02-16 19:12:57 +03:00
if ( ret )
goto err ;
}
2010-09-27 19:48:26 +04:00
bitmap_set ( allocated_irqs , start , cnt ) ;
2010-10-08 14:47:53 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
return alloc_descs ( start , cnt , node ) ;
err :
2010-10-08 14:47:53 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
return ret ;
}
2011-05-18 13:39:04 +04:00
EXPORT_SYMBOL_GPL ( irq_alloc_descs ) ;
2010-09-27 19:48:26 +04:00
2010-10-12 14:31:46 +04:00
/**
* irq_reserve_irqs - mark irqs allocated
* @ from : mark from irq number
* @ cnt : number of irqs to mark
*
* Returns 0 on success or an appropriate error code
*/
int irq_reserve_irqs ( unsigned int from , unsigned int cnt )
{
unsigned int start ;
int ret = 0 ;
if ( ! cnt | | ( from + cnt ) > nr_irqs )
return - EINVAL ;
2010-10-08 14:47:53 +04:00
mutex_lock ( & sparse_irq_lock ) ;
2010-10-12 14:31:46 +04:00
start = bitmap_find_next_zero_area ( allocated_irqs , nr_irqs , from , cnt , 0 ) ;
if ( start = = from )
bitmap_set ( allocated_irqs , start , cnt ) ;
else
ret = - EEXIST ;
2010-10-08 14:47:53 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
2010-10-12 14:31:46 +04:00
return ret ;
}
2010-09-30 12:45:07 +04:00
/**
* irq_get_next_irq - get next allocated irq number
* @ offset : where to start the search
*
* Returns next irq number after offset or nr_irqs if none is found .
*/
unsigned int irq_get_next_irq ( unsigned int offset )
{
return find_next_bit ( allocated_irqs , nr_irqs , offset ) ;
}
2011-02-12 14:16:16 +03:00
struct irq_desc *
__irq_get_desc_lock ( unsigned int irq , unsigned long * flags , bool bus )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
if ( desc ) {
if ( bus )
chip_bus_lock ( desc ) ;
raw_spin_lock_irqsave ( & desc - > lock , * flags ) ;
}
return desc ;
}
void __irq_put_desc_unlock ( struct irq_desc * desc , unsigned long flags , bool bus )
{
raw_spin_unlock_irqrestore ( & desc - > lock , flags ) ;
if ( bus )
chip_bus_sync_unlock ( desc ) ;
}
2010-09-29 20:46:55 +04:00
/**
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
* @ irq : irq number to initialize
*/
void dynamic_irq_cleanup ( unsigned int irq )
2010-09-22 19:09:43 +04:00
{
2010-09-29 20:46:55 +04:00
struct irq_desc * desc = irq_to_desc ( irq ) ;
unsigned long flags ;
raw_spin_lock_irqsave ( & desc - > lock , flags ) ;
desc_set_defaults ( irq , desc , desc_node ( desc ) ) ;
raw_spin_unlock_irqrestore ( & desc - > lock , flags ) ;
2010-09-22 19:09:43 +04:00
}
unsigned int kstat_irqs_cpu ( unsigned int irq , int cpu )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
2011-01-14 02:45:38 +03:00
return desc & & desc - > kstat_irqs ?
* per_cpu_ptr ( desc - > kstat_irqs , cpu ) : 0 ;
2010-09-22 19:09:43 +04:00
}
2010-10-28 02:34:15 +04:00
unsigned int kstat_irqs ( unsigned int irq )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
int cpu ;
int sum = 0 ;
2011-01-14 02:45:38 +03:00
if ( ! desc | | ! desc - > kstat_irqs )
2010-10-28 02:34:15 +04:00
return 0 ;
for_each_possible_cpu ( cpu )
2011-01-14 02:45:38 +03:00
sum + = * per_cpu_ptr ( desc - > kstat_irqs , cpu ) ;
2010-10-28 02:34:15 +04:00
return sum ;
}