2018-03-15 00:15:19 +03:00
// SPDX-License-Identifier: GPL-2.0
2010-09-22 19:09:43 +04:00
/*
* Copyright ( C ) 1992 , 1998 - 2006 Linus Torvalds , Ingo Molnar
* Copyright ( C ) 2005 - 2006 , Thomas Gleixner , Russell King
*
2018-03-15 00:15:16 +03:00
* This file contains the interrupt descriptor management code . Detailed
* information is available in Documentation / core - api / genericirq . rst
2010-09-22 19:09:43 +04:00
*
*/
# include <linux/irq.h>
# include <linux/slab.h>
2011-09-20 04:33:19 +04:00
# include <linux/export.h>
2010-09-22 19:09:43 +04:00
# include <linux/interrupt.h>
# include <linux/kernel_stat.h>
2023-05-19 16:49:02 +03:00
# include <linux/maple_tree.h>
genirq: Add irq_domain-aware core IRQ handler
Calling irq_find_mapping from outside a irq_{enter,exit} section is
unsafe and produces ugly messages if CONFIG_PROVE_RCU is enabled:
If coming from the idle state, the rcu_read_lock call in irq_find_mapping
will generate an unpleasant warning:
<quote>
===============================
[ INFO: suspicious RCU usage. ]
3.16.0-rc1+ #135 Not tainted
-------------------------------
include/linux/rcupdate.h:871 rcu_read_lock() used illegally while idle!
other info that might help us debug this:
RCU used illegally from idle CPU!
rcu_scheduler_active = 1, debug_locks = 0
RCU used illegally from extended quiescent state!
1 lock held by swapper/0/0:
#0: (rcu_read_lock){......}, at: [<ffffffc00010206c>]
irq_find_mapping+0x4c/0x198
</quote>
As this issue is fairly widespread and involves at least three
different architectures, a possible solution is to add a new
handle_domain_irq entry point into the generic IRQ code that
the interrupt controller code can call.
This new function takes an irq_domain, and calls into irq_find_domain
inside the irq_{enter,exit} block. An additional "lookup" parameter is
used to allow non-domain architecture code to be replaced by this as well.
Interrupt controllers can then be updated to use the new mechanism.
This code is sitting behind a new CONFIG_HANDLE_DOMAIN_IRQ, as not all
architectures implement set_irq_regs (yes, mn10300, I'm looking at you...).
Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Link: https://lkml.kernel.org/r/1409047421-27649-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Jason Cooper <jason@lakedaemon.net>
2014-08-26 14:03:16 +04:00
# include <linux/irqdomain.h>
2016-09-13 19:14:51 +03:00
# include <linux/sysfs.h>
2010-09-22 19:09:43 +04:00
# include "internals.h"
/*
* lockdep : we want to handle all irq_desc locks as a single lock - class :
*/
2010-09-29 19:18:47 +04:00
static struct lock_class_key irq_desc_lock_class ;
2010-09-22 19:09:43 +04:00
2011-05-18 14:53:03 +04:00
# if defined(CONFIG_SMP)
2016-02-03 21:52:23 +03:00
static int __init irq_affinity_setup ( char * str )
{
2017-11-01 07:14:51 +03:00
alloc_bootmem_cpumask_var ( & irq_default_affinity ) ;
2016-02-03 21:52:23 +03:00
cpulist_parse ( str , irq_default_affinity ) ;
/*
* Set at least the boot cpu . We don ' t want to end up with
2021-03-22 06:21:30 +03:00
* bugreports caused by random commandline masks
2016-02-03 21:52:23 +03:00
*/
cpumask_set_cpu ( smp_processor_id ( ) , irq_default_affinity ) ;
return 1 ;
}
__setup ( " irqaffinity= " , irq_affinity_setup ) ;
2010-09-22 19:09:43 +04:00
static void __init init_irq_default_affinity ( void )
{
2017-11-01 07:14:51 +03:00
if ( ! cpumask_available ( irq_default_affinity ) )
2016-02-03 21:52:23 +03:00
zalloc_cpumask_var ( & irq_default_affinity , GFP_NOWAIT ) ;
if ( cpumask_empty ( irq_default_affinity ) )
cpumask_setall ( irq_default_affinity ) ;
2010-09-22 19:09:43 +04:00
}
# else
static void __init init_irq_default_affinity ( void )
{
}
# endif
2010-09-27 19:48:26 +04:00
# ifdef CONFIG_SMP
2017-06-20 02:37:36 +03:00
static int alloc_masks ( struct irq_desc * desc , int node )
2010-09-27 19:48:26 +04:00
{
2015-06-03 06:47:50 +03:00
if ( ! zalloc_cpumask_var_node ( & desc - > irq_common_data . affinity ,
2017-06-20 02:37:36 +03:00
GFP_KERNEL , node ) )
2010-09-27 19:48:26 +04:00
return - ENOMEM ;
2017-06-20 02:37:38 +03:00
# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
if ( ! zalloc_cpumask_var_node ( & desc - > irq_common_data . effective_affinity ,
GFP_KERNEL , node ) ) {
free_cpumask_var ( desc - > irq_common_data . affinity ) ;
return - ENOMEM ;
}
# endif
2010-09-27 19:48:26 +04:00
# ifdef CONFIG_GENERIC_PENDING_IRQ
2017-06-20 02:37:36 +03:00
if ( ! zalloc_cpumask_var_node ( & desc - > pending_mask , GFP_KERNEL , node ) ) {
2017-06-20 02:37:38 +03:00
# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
free_cpumask_var ( desc - > irq_common_data . effective_affinity ) ;
# endif
2015-06-03 06:47:50 +03:00
free_cpumask_var ( desc - > irq_common_data . affinity ) ;
2010-09-27 19:48:26 +04:00
return - ENOMEM ;
}
# endif
return 0 ;
}
2016-07-04 11:39:25 +03:00
static void desc_smp_init ( struct irq_desc * desc , int node ,
const struct cpumask * affinity )
2010-09-27 19:48:26 +04:00
{
2016-07-04 11:39:25 +03:00
if ( ! affinity )
affinity = irq_default_affinity ;
cpumask_copy ( desc - > irq_common_data . affinity , affinity ) ;
2010-09-29 20:46:55 +04:00
# ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear ( desc - > pending_mask ) ;
# endif
2015-06-01 11:05:16 +03:00
# ifdef CONFIG_NUMA
desc - > irq_common_data . node = node ;
# endif
2010-09-29 20:46:55 +04:00
}
2010-09-27 19:48:26 +04:00
# else
static inline int
2017-06-20 02:37:36 +03:00
alloc_masks ( struct irq_desc * desc , int node ) { return 0 ; }
2016-07-04 11:39:25 +03:00
static inline void
desc_smp_init ( struct irq_desc * desc , int node , const struct cpumask * affinity ) { }
2010-09-27 19:48:26 +04:00
# endif
2011-07-11 14:17:31 +04:00
static void desc_set_defaults ( unsigned int irq , struct irq_desc * desc , int node ,
2016-07-04 11:39:25 +03:00
const struct cpumask * affinity , struct module * owner )
2010-09-27 19:48:26 +04:00
{
2011-01-14 02:45:38 +03:00
int cpu ;
2015-06-01 11:05:21 +03:00
desc - > irq_common_data . handler_data = NULL ;
2015-06-01 11:05:43 +03:00
desc - > irq_common_data . msi_desc = NULL ;
2015-06-01 11:05:21 +03:00
2015-06-01 11:05:12 +03:00
desc - > irq_data . common = & desc - > irq_common_data ;
2010-09-27 19:48:26 +04:00
desc - > irq_data . irq = irq ;
desc - > irq_data . chip = & no_irq_chip ;
desc - > irq_data . chip_data = NULL ;
2011-02-09 16:54:49 +03:00
irq_settings_clr_and_set ( desc , ~ 0 , _IRQ_DEFAULT_INIT_FLAGS ) ;
2011-03-27 13:02:49 +04:00
irqd_set ( & desc - > irq_data , IRQD_IRQ_DISABLED ) ;
2017-06-26 14:33:33 +03:00
irqd_set ( & desc - > irq_data , IRQD_IRQ_MASKED ) ;
2010-09-27 19:48:26 +04:00
desc - > handle_irq = handle_bad_irq ;
desc - > depth = 1 ;
2010-09-29 20:46:55 +04:00
desc - > irq_count = 0 ;
desc - > irqs_unhandled = 0 ;
2019-02-08 16:48:03 +03:00
desc - > tot_count = 0 ;
2010-09-27 19:48:26 +04:00
desc - > name = NULL ;
2011-07-11 14:17:31 +04:00
desc - > owner = owner ;
2011-01-14 02:45:38 +03:00
for_each_possible_cpu ( cpu )
* per_cpu_ptr ( desc - > kstat_irqs , cpu ) = 0 ;
2016-07-04 11:39:25 +03:00
desc_smp_init ( desc , node , affinity ) ;
2010-09-27 19:48:26 +04:00
}
2010-09-22 19:09:43 +04:00
int nr_irqs = NR_IRQS ;
EXPORT_SYMBOL_GPL ( nr_irqs ) ;
2010-10-08 14:47:53 +04:00
static DEFINE_MUTEX ( sparse_irq_lock ) ;
2023-05-19 16:49:02 +03:00
static struct maple_tree sparse_irqs = MTREE_INIT_EXT ( sparse_irqs ,
MT_FLAGS_ALLOC_RANGE |
MT_FLAGS_LOCK_EXTERN |
MT_FLAGS_USE_RCU ,
sparse_irq_lock ) ;
2023-05-19 16:49:01 +03:00
static int irq_find_free_area ( unsigned int from , unsigned int cnt )
{
2023-05-19 16:49:02 +03:00
MA_STATE ( mas , & sparse_irqs , 0 , 0 ) ;
if ( mas_empty_area ( & mas , from , MAX_SPARSE_IRQS , cnt ) )
return - ENOSPC ;
return mas . index ;
2023-05-19 16:49:01 +03:00
}
static unsigned int irq_find_at_or_after ( unsigned int offset )
{
2023-05-19 16:49:02 +03:00
unsigned long index = offset ;
struct irq_desc * desc = mt_find ( & sparse_irqs , & index , nr_irqs ) ;
return desc ? irq_desc_get_irq ( desc ) : nr_irqs ;
}
static void irq_insert_desc ( unsigned int irq , struct irq_desc * desc )
{
MA_STATE ( mas , & sparse_irqs , irq , irq ) ;
WARN_ON ( mas_store_gfp ( & mas , desc , GFP_KERNEL ) ! = 0 ) ;
}
static void delete_irq_desc ( unsigned int irq )
{
MA_STATE ( mas , & sparse_irqs , irq , irq ) ;
mas_erase ( & mas ) ;
2023-05-19 16:49:01 +03:00
}
2010-09-27 19:48:26 +04:00
2010-09-22 19:09:43 +04:00
# ifdef CONFIG_SPARSE_IRQ
2016-09-13 19:14:51 +03:00
static void irq_kobj_release ( struct kobject * kobj ) ;
# ifdef CONFIG_SYSFS
static struct kobject * irq_kobj_base ;
# define IRQ_ATTR_RO(_name) \
static struct kobj_attribute _name # # _attr = __ATTR_RO ( _name )
static ssize_t per_cpu_count_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct irq_desc * desc = container_of ( kobj , struct irq_desc , kobj ) ;
ssize_t ret = 0 ;
char * p = " " ;
2020-12-10 22:25:44 +03:00
int cpu ;
2016-09-13 19:14:51 +03:00
for_each_possible_cpu ( cpu ) {
2020-12-10 22:25:44 +03:00
unsigned int c = irq_desc_kstat_cpu ( desc , cpu ) ;
2016-09-13 19:14:51 +03:00
ret + = scnprintf ( buf + ret , PAGE_SIZE - ret , " %s%u " , p , c ) ;
p = " , " ;
}
ret + = scnprintf ( buf + ret , PAGE_SIZE - ret , " \n " ) ;
return ret ;
}
IRQ_ATTR_RO ( per_cpu_count ) ;
static ssize_t chip_name_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct irq_desc * desc = container_of ( kobj , struct irq_desc , kobj ) ;
ssize_t ret = 0 ;
raw_spin_lock_irq ( & desc - > lock ) ;
if ( desc - > irq_data . chip & & desc - > irq_data . chip - > name ) {
ret = scnprintf ( buf , PAGE_SIZE , " %s \n " ,
desc - > irq_data . chip - > name ) ;
}
raw_spin_unlock_irq ( & desc - > lock ) ;
return ret ;
}
IRQ_ATTR_RO ( chip_name ) ;
static ssize_t hwirq_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct irq_desc * desc = container_of ( kobj , struct irq_desc , kobj ) ;
ssize_t ret = 0 ;
raw_spin_lock_irq ( & desc - > lock ) ;
if ( desc - > irq_data . domain )
2021-07-01 16:27:50 +03:00
ret = sprintf ( buf , " %lu \n " , desc - > irq_data . hwirq ) ;
2016-09-13 19:14:51 +03:00
raw_spin_unlock_irq ( & desc - > lock ) ;
return ret ;
}
IRQ_ATTR_RO ( hwirq ) ;
static ssize_t type_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct irq_desc * desc = container_of ( kobj , struct irq_desc , kobj ) ;
ssize_t ret = 0 ;
raw_spin_lock_irq ( & desc - > lock ) ;
ret = sprintf ( buf , " %s \n " ,
irqd_is_level_type ( & desc - > irq_data ) ? " level " : " edge " ) ;
raw_spin_unlock_irq ( & desc - > lock ) ;
return ret ;
}
IRQ_ATTR_RO ( type ) ;
2018-02-26 18:50:43 +03:00
static ssize_t wakeup_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct irq_desc * desc = container_of ( kobj , struct irq_desc , kobj ) ;
ssize_t ret = 0 ;
raw_spin_lock_irq ( & desc - > lock ) ;
ret = sprintf ( buf , " %s \n " ,
irqd_is_wakeup_set ( & desc - > irq_data ) ? " enabled " : " disabled " ) ;
raw_spin_unlock_irq ( & desc - > lock ) ;
return ret ;
}
IRQ_ATTR_RO ( wakeup ) ;
2016-09-13 19:14:51 +03:00
static ssize_t name_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct irq_desc * desc = container_of ( kobj , struct irq_desc , kobj ) ;
ssize_t ret = 0 ;
raw_spin_lock_irq ( & desc - > lock ) ;
if ( desc - > name )
ret = scnprintf ( buf , PAGE_SIZE , " %s \n " , desc - > name ) ;
raw_spin_unlock_irq ( & desc - > lock ) ;
return ret ;
}
IRQ_ATTR_RO ( name ) ;
static ssize_t actions_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct irq_desc * desc = container_of ( kobj , struct irq_desc , kobj ) ;
struct irqaction * action ;
ssize_t ret = 0 ;
char * p = " " ;
raw_spin_lock_irq ( & desc - > lock ) ;
2022-07-10 14:26:14 +03:00
for_each_action_of_desc ( desc , action ) {
2016-09-13 19:14:51 +03:00
ret + = scnprintf ( buf + ret , PAGE_SIZE - ret , " %s%s " ,
p , action - > name ) ;
p = " , " ;
}
raw_spin_unlock_irq ( & desc - > lock ) ;
if ( ret )
ret + = scnprintf ( buf + ret , PAGE_SIZE - ret , " \n " ) ;
return ret ;
}
IRQ_ATTR_RO ( actions ) ;
static struct attribute * irq_attrs [ ] = {
& per_cpu_count_attr . attr ,
& chip_name_attr . attr ,
& hwirq_attr . attr ,
& type_attr . attr ,
2018-02-26 18:50:43 +03:00
& wakeup_attr . attr ,
2016-09-13 19:14:51 +03:00
& name_attr . attr ,
& actions_attr . attr ,
NULL
} ;
2019-04-02 05:51:41 +03:00
ATTRIBUTE_GROUPS ( irq ) ;
2016-09-13 19:14:51 +03:00
2023-02-17 06:16:25 +03:00
static const struct kobj_type irq_kobj_type = {
2016-09-13 19:14:51 +03:00
. release = irq_kobj_release ,
. sysfs_ops = & kobj_sysfs_ops ,
2019-04-02 05:51:41 +03:00
. default_groups = irq_groups ,
2016-09-13 19:14:51 +03:00
} ;
static void irq_sysfs_add ( int irq , struct irq_desc * desc )
{
if ( irq_kobj_base ) {
/*
* Continue even in case of failure as this is nothing
2022-11-28 18:16:12 +03:00
* crucial and failures in the late irq_sysfs_init ( )
* cannot be rolled back .
2016-09-13 19:14:51 +03:00
*/
if ( kobject_add ( & desc - > kobj , irq_kobj_base , " %d " , irq ) )
pr_warn ( " Failed to add kobject for irq %d \n " , irq ) ;
2022-11-28 18:16:12 +03:00
else
desc - > istate | = IRQS_SYSFS ;
2016-09-13 19:14:51 +03:00
}
}
2019-08-02 02:53:53 +03:00
static void irq_sysfs_del ( struct irq_desc * desc )
{
/*
2022-11-28 18:16:12 +03:00
* Only invoke kobject_del ( ) when kobject_add ( ) was successfully
* invoked for the descriptor . This covers both early boot , where
* sysfs is not initialized yet , and the case of a failed
* kobject_add ( ) invocation .
2019-08-02 02:53:53 +03:00
*/
2022-11-28 18:16:12 +03:00
if ( desc - > istate & IRQS_SYSFS )
2019-08-02 02:53:53 +03:00
kobject_del ( & desc - > kobj ) ;
}
2016-09-13 19:14:51 +03:00
static int __init irq_sysfs_init ( void )
{
struct irq_desc * desc ;
int irq ;
/* Prevent concurrent irq alloc/free */
irq_lock_sparse ( ) ;
irq_kobj_base = kobject_create_and_add ( " irq " , kernel_kobj ) ;
if ( ! irq_kobj_base ) {
irq_unlock_sparse ( ) ;
return - ENOMEM ;
}
/* Add the already allocated interrupts */
for_each_irq_desc ( irq , desc )
irq_sysfs_add ( irq , desc ) ;
irq_unlock_sparse ( ) ;
return 0 ;
}
postcore_initcall ( irq_sysfs_init ) ;
# else /* !CONFIG_SYSFS */
2023-02-17 06:16:25 +03:00
static const struct kobj_type irq_kobj_type = {
2016-09-13 19:14:51 +03:00
. release = irq_kobj_release ,
} ;
static void irq_sysfs_add ( int irq , struct irq_desc * desc ) { }
2019-08-02 02:53:53 +03:00
static void irq_sysfs_del ( struct irq_desc * desc ) { }
2016-09-13 19:14:51 +03:00
# endif /* CONFIG_SYSFS */
2010-09-22 19:09:43 +04:00
struct irq_desc * irq_to_desc ( unsigned int irq )
{
2023-05-19 16:49:02 +03:00
return mtree_load ( & sparse_irqs , irq ) ;
2010-09-22 19:09:43 +04:00
}
2020-12-25 14:30:58 +03:00
# ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
2020-12-10 22:26:06 +03:00
EXPORT_SYMBOL_GPL ( irq_to_desc ) ;
# endif
2010-09-22 19:09:43 +04:00
2010-09-27 19:48:26 +04:00
# ifdef CONFIG_SMP
static void free_masks ( struct irq_desc * desc )
{
# ifdef CONFIG_GENERIC_PENDING_IRQ
free_cpumask_var ( desc - > pending_mask ) ;
# endif
2015-06-03 06:47:50 +03:00
free_cpumask_var ( desc - > irq_common_data . affinity ) ;
2017-06-20 02:37:38 +03:00
# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
free_cpumask_var ( desc - > irq_common_data . effective_affinity ) ;
# endif
2010-09-27 19:48:26 +04:00
}
# else
static inline void free_masks ( struct irq_desc * desc ) { }
# endif
2014-12-12 01:01:41 +03:00
void irq_lock_sparse ( void )
{
mutex_lock ( & sparse_irq_lock ) ;
}
void irq_unlock_sparse ( void )
{
mutex_unlock ( & sparse_irq_lock ) ;
}
2016-07-04 11:39:25 +03:00
static struct irq_desc * alloc_desc ( int irq , int node , unsigned int flags ,
const struct cpumask * affinity ,
struct module * owner )
2010-09-27 19:48:26 +04:00
{
struct irq_desc * desc ;
2017-06-20 02:37:36 +03:00
desc = kzalloc_node ( sizeof ( * desc ) , GFP_KERNEL , node ) ;
2010-09-27 19:48:26 +04:00
if ( ! desc )
return NULL ;
/* allocate based on nr_cpu_ids */
2011-01-14 02:45:38 +03:00
desc - > kstat_irqs = alloc_percpu ( unsigned int ) ;
2010-09-27 19:48:26 +04:00
if ( ! desc - > kstat_irqs )
goto err_desc ;
2017-06-20 02:37:36 +03:00
if ( alloc_masks ( desc , node ) )
2010-09-27 19:48:26 +04:00
goto err_kstat ;
raw_spin_lock_init ( & desc - > lock ) ;
lockdep_set_class ( & desc - > lock , & irq_desc_lock_class ) ;
2017-06-30 00:33:37 +03:00
mutex_init ( & desc - > request_mutex ) ;
2015-12-13 20:02:22 +03:00
init_rcu_head ( & desc - > rcu ) ;
genirq: Synchronize interrupt thread startup
A kernel hang can be observed when running setserial in a loop on a kernel
with force threaded interrupts. The sequence of events is:
setserial
open("/dev/ttyXXX")
request_irq()
do_stuff()
-> serial interrupt
-> wake(irq_thread)
desc->threads_active++;
close()
free_irq()
kthread_stop(irq_thread)
synchronize_irq() <- hangs because desc->threads_active != 0
The thread is created in request_irq() and woken up, but does not get on a
CPU to reach the actual thread function, which would handle the pending
wake-up. kthread_stop() sets the should stop condition which makes the
thread immediately exit, which in turn leaves the stale threads_active
count around.
This problem was introduced with commit 519cc8652b3a, which addressed a
interrupt sharing issue in the PCIe code.
Before that commit free_irq() invoked synchronize_irq(), which waits for
the hard interrupt handler and also for associated threads to complete.
To address the PCIe issue synchronize_irq() was replaced with
__synchronize_hardirq(), which only waits for the hard interrupt handler to
complete, but not for threaded handlers.
This was done under the assumption, that the interrupt thread already
reached the thread function and waits for a wake-up, which is guaranteed to
be handled before acting on the stop condition. The problematic case, that
the thread would not reach the thread function, was obviously overlooked.
Make sure that the interrupt thread is really started and reaches
thread_fn() before returning from __setup_irq().
This utilizes the existing wait queue in the interrupt descriptor. The
wait queue is unused for non-shared interrupts. For shared interrupts the
usage might cause a spurious wake-up of a waiter in synchronize_irq() or the
completion of a threaded handler might cause a spurious wake-up of the
waiter for the ready flag. Both are harmless and have no functional impact.
[ tglx: Amended changelog ]
Fixes: 519cc8652b3a ("genirq: Synchronize only with single thread on free_irq()")
Signed-off-by: Thomas Pfaff <tpfaff@pcs.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/552fe7b4-9224-b183-bb87-a8f36d335690@pcs.com
2022-05-02 14:28:29 +03:00
init_waitqueue_head ( & desc - > wait_for_threads ) ;
2010-09-27 19:48:26 +04:00
2016-07-04 11:39:25 +03:00
desc_set_defaults ( irq , desc , node , affinity , owner ) ;
irqd_set ( & desc - > irq_data , flags ) ;
2016-09-13 19:14:51 +03:00
kobject_init ( & desc - > kobj , & irq_kobj_type ) ;
2023-05-19 16:49:00 +03:00
irq_resend_init ( desc ) ;
2010-09-27 19:48:26 +04:00
return desc ;
err_kstat :
2011-01-14 02:45:38 +03:00
free_percpu ( desc - > kstat_irqs ) ;
2010-09-27 19:48:26 +04:00
err_desc :
kfree ( desc ) ;
return NULL ;
}
2016-09-13 19:14:51 +03:00
static void irq_kobj_release ( struct kobject * kobj )
2015-12-13 20:02:22 +03:00
{
2016-09-13 19:14:51 +03:00
struct irq_desc * desc = container_of ( kobj , struct irq_desc , kobj ) ;
2015-12-13 20:02:22 +03:00
free_masks ( desc ) ;
free_percpu ( desc - > kstat_irqs ) ;
kfree ( desc ) ;
}
2016-09-13 19:14:51 +03:00
static void delayed_free_desc ( struct rcu_head * rhp )
{
struct irq_desc * desc = container_of ( rhp , struct irq_desc , rcu ) ;
kobject_put ( & desc - > kobj ) ;
}
2010-09-27 19:48:26 +04:00
static void free_desc ( unsigned int irq )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
2017-06-20 02:37:17 +03:00
irq_remove_debugfs_entry ( desc ) ;
2010-09-30 04:46:07 +04:00
unregister_irq_proc ( irq , desc ) ;
2014-12-12 01:01:41 +03:00
/*
* sparse_irq_lock protects also show_interrupts ( ) and
* kstat_irq_usr ( ) . Once we deleted the descriptor from the
* sparse tree we can free it . Access in proc will fail to
* lookup the descriptor .
2016-09-13 19:14:51 +03:00
*
* The sysfs entry must be serialized against a concurrent
* irq_sysfs_init ( ) as well .
2014-12-12 01:01:41 +03:00
*/
2019-08-02 02:53:53 +03:00
irq_sysfs_del ( desc ) ;
2010-09-27 19:48:26 +04:00
delete_irq_desc ( irq ) ;
2015-12-13 20:02:22 +03:00
/*
* We free the descriptor , masks and stat fields via RCU . That
* allows demultiplex interrupts to do rcu based management of
* the child interrupts .
2018-06-18 15:56:12 +03:00
* This also allows us to use rcu in kstat_irqs_usr ( ) .
2015-12-13 20:02:22 +03:00
*/
call_rcu ( & desc - > rcu , delayed_free_desc ) ;
2010-09-27 19:48:26 +04:00
}
2011-07-11 14:17:31 +04:00
static int alloc_descs ( unsigned int start , unsigned int cnt , int node ,
2018-12-04 18:51:20 +03:00
const struct irq_affinity_desc * affinity ,
struct module * owner )
2010-09-27 19:48:26 +04:00
{
struct irq_desc * desc ;
2016-09-14 17:18:49 +03:00
int i ;
2016-07-04 11:39:25 +03:00
2016-09-14 17:18:49 +03:00
/* Validate affinity mask(s) */
if ( affinity ) {
2019-01-17 06:00:09 +03:00
for ( i = 0 ; i < cnt ; i + + ) {
2018-12-04 18:51:20 +03:00
if ( cpumask_empty ( & affinity [ i ] . mask ) )
2016-09-14 17:18:49 +03:00
return - EINVAL ;
}
}
2016-07-04 11:39:25 +03:00
2010-09-27 19:48:26 +04:00
for ( i = 0 ; i < cnt ; i + + ) {
2018-12-04 18:51:20 +03:00
const struct cpumask * mask = NULL ;
2018-12-04 18:51:21 +03:00
unsigned int flags = 0 ;
2018-12-04 18:51:20 +03:00
2016-07-04 11:39:25 +03:00
if ( affinity ) {
2018-12-04 18:51:21 +03:00
if ( affinity - > is_managed ) {
flags = IRQD_AFFINITY_MANAGED |
IRQD_MANAGED_SHUTDOWN ;
}
2018-12-04 18:51:20 +03:00
mask = & affinity - > mask ;
2018-12-04 18:51:21 +03:00
node = cpu_to_node ( cpumask_first ( mask ) ) ;
2016-09-14 17:18:49 +03:00
affinity + + ;
2016-07-04 11:39:25 +03:00
}
2018-12-04 18:51:21 +03:00
2016-07-04 11:39:25 +03:00
desc = alloc_desc ( start + i , node , flags , mask , owner ) ;
2010-09-27 19:48:26 +04:00
if ( ! desc )
goto err ;
irq_insert_desc ( start + i , desc ) ;
2016-09-13 19:14:51 +03:00
irq_sysfs_add ( start + i , desc ) ;
2017-09-14 00:29:04 +03:00
irq_add_debugfs_entry ( start + i , desc ) ;
2010-09-27 19:48:26 +04:00
}
return start ;
err :
for ( i - - ; i > = 0 ; i - - )
free_desc ( start + i ) ;
return - ENOMEM ;
}
2011-02-19 22:07:37 +03:00
static int irq_expand_nr_irqs ( unsigned int nr )
2011-02-16 19:12:57 +03:00
{
2023-05-19 16:49:01 +03:00
if ( nr > MAX_SPARSE_IRQS )
2011-02-16 19:12:57 +03:00
return - ENOMEM ;
2011-02-19 22:07:37 +03:00
nr_irqs = nr ;
2011-02-16 19:12:57 +03:00
return 0 ;
}
2010-09-22 19:09:43 +04:00
int __init early_irq_init ( void )
{
2010-09-27 22:55:03 +04:00
int i , initcnt , node = first_online_node ;
2010-09-22 19:09:43 +04:00
struct irq_desc * desc ;
init_irq_default_affinity ( ) ;
2010-09-27 22:55:03 +04:00
/* Let arch update nr_irqs and return the nr of preallocated irqs */
initcnt = arch_probe_nr_irqs ( ) ;
2017-05-09 11:34:09 +03:00
printk ( KERN_INFO " NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d \n " ,
NR_IRQS , nr_irqs , initcnt ) ;
2010-09-22 19:09:43 +04:00
2023-05-19 16:49:01 +03:00
if ( WARN_ON ( nr_irqs > MAX_SPARSE_IRQS ) )
nr_irqs = MAX_SPARSE_IRQS ;
2011-02-17 19:45:15 +03:00
2023-05-19 16:49:01 +03:00
if ( WARN_ON ( initcnt > MAX_SPARSE_IRQS ) )
initcnt = MAX_SPARSE_IRQS ;
2011-02-17 19:45:15 +03:00
if ( initcnt > nr_irqs )
nr_irqs = initcnt ;
2010-09-27 22:55:03 +04:00
for ( i = 0 ; i < initcnt ; i + + ) {
2016-07-04 11:39:25 +03:00
desc = alloc_desc ( i , node , 0 , NULL , NULL ) ;
2010-09-27 22:02:56 +04:00
irq_insert_desc ( i , desc ) ;
2010-09-22 19:09:43 +04:00
}
return arch_early_irq_init ( ) ;
}
# else /* !CONFIG_SPARSE_IRQ */
struct irq_desc irq_desc [ NR_IRQS ] __cacheline_aligned_in_smp = {
[ 0 . . . NR_IRQS - 1 ] = {
. handle_irq = handle_bad_irq ,
. depth = 1 ,
. lock = __RAW_SPIN_LOCK_UNLOCKED ( irq_desc - > lock ) ,
}
} ;
int __init early_irq_init ( void )
{
2010-09-27 22:02:56 +04:00
int count , i , node = first_online_node ;
2010-09-22 19:09:43 +04:00
struct irq_desc * desc ;
init_irq_default_affinity ( ) ;
2017-05-09 11:34:09 +03:00
printk ( KERN_INFO " NR_IRQS: %d \n " , NR_IRQS ) ;
2010-09-22 19:09:43 +04:00
desc = irq_desc ;
count = ARRAY_SIZE ( irq_desc ) ;
for ( i = 0 ; i < count ; i + + ) {
2011-01-14 02:45:38 +03:00
desc [ i ] . kstat_irqs = alloc_percpu ( unsigned int ) ;
2017-06-20 02:37:36 +03:00
alloc_masks ( & desc [ i ] , node ) ;
2011-05-31 20:14:39 +04:00
raw_spin_lock_init ( & desc [ i ] . lock ) ;
2010-09-22 17:58:45 +04:00
lockdep_set_class ( & desc [ i ] . lock , & irq_desc_lock_class ) ;
2019-04-04 10:45:12 +03:00
mutex_init ( & desc [ i ] . request_mutex ) ;
genirq: Synchronize interrupt thread startup
A kernel hang can be observed when running setserial in a loop on a kernel
with force threaded interrupts. The sequence of events is:
setserial
open("/dev/ttyXXX")
request_irq()
do_stuff()
-> serial interrupt
-> wake(irq_thread)
desc->threads_active++;
close()
free_irq()
kthread_stop(irq_thread)
synchronize_irq() <- hangs because desc->threads_active != 0
The thread is created in request_irq() and woken up, but does not get on a
CPU to reach the actual thread function, which would handle the pending
wake-up. kthread_stop() sets the should stop condition which makes the
thread immediately exit, which in turn leaves the stale threads_active
count around.
This problem was introduced with commit 519cc8652b3a, which addressed a
interrupt sharing issue in the PCIe code.
Before that commit free_irq() invoked synchronize_irq(), which waits for
the hard interrupt handler and also for associated threads to complete.
To address the PCIe issue synchronize_irq() was replaced with
__synchronize_hardirq(), which only waits for the hard interrupt handler to
complete, but not for threaded handlers.
This was done under the assumption, that the interrupt thread already
reached the thread function and waits for a wake-up, which is guaranteed to
be handled before acting on the stop condition. The problematic case, that
the thread would not reach the thread function, was obviously overlooked.
Make sure that the interrupt thread is really started and reaches
thread_fn() before returning from __setup_irq().
This utilizes the existing wait queue in the interrupt descriptor. The
wait queue is unused for non-shared interrupts. For shared interrupts the
usage might cause a spurious wake-up of a waiter in synchronize_irq() or the
completion of a threaded handler might cause a spurious wake-up of the
waiter for the ready flag. Both are harmless and have no functional impact.
[ tglx: Amended changelog ]
Fixes: 519cc8652b3a ("genirq: Synchronize only with single thread on free_irq()")
Signed-off-by: Thomas Pfaff <tpfaff@pcs.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/552fe7b4-9224-b183-bb87-a8f36d335690@pcs.com
2022-05-02 14:28:29 +03:00
init_waitqueue_head ( & desc [ i ] . wait_for_threads ) ;
2016-07-04 11:39:25 +03:00
desc_set_defaults ( i , & desc [ i ] , node , NULL , NULL ) ;
2023-05-19 16:49:00 +03:00
irq_resend_init ( desc ) ;
2010-09-22 19:09:43 +04:00
}
return arch_early_irq_init ( ) ;
}
struct irq_desc * irq_to_desc ( unsigned int irq )
{
return ( irq < NR_IRQS ) ? irq_desc + irq : NULL ;
}
2014-02-10 22:39:53 +04:00
EXPORT_SYMBOL ( irq_to_desc ) ;
2010-09-22 19:09:43 +04:00
2010-09-27 19:48:26 +04:00
static void free_desc ( unsigned int irq )
{
2014-05-07 19:44:23 +04:00
struct irq_desc * desc = irq_to_desc ( irq ) ;
unsigned long flags ;
raw_spin_lock_irqsave ( & desc - > lock , flags ) ;
2016-07-04 11:39:25 +03:00
desc_set_defaults ( irq , desc , irq_desc_get_node ( desc ) , NULL , NULL ) ;
2014-05-07 19:44:23 +04:00
raw_spin_unlock_irqrestore ( & desc - > lock , flags ) ;
2023-05-19 16:49:02 +03:00
delete_irq_desc ( irq ) ;
2010-09-27 19:48:26 +04:00
}
2011-07-11 14:17:31 +04:00
static inline int alloc_descs ( unsigned int start , unsigned int cnt , int node ,
2018-12-04 18:51:20 +03:00
const struct irq_affinity_desc * affinity ,
2011-07-11 14:17:31 +04:00
struct module * owner )
2010-09-27 19:48:26 +04:00
{
2011-07-11 14:17:31 +04:00
u32 i ;
for ( i = 0 ; i < cnt ; i + + ) {
struct irq_desc * desc = irq_to_desc ( start + i ) ;
desc - > owner = owner ;
2023-05-19 16:49:02 +03:00
irq_insert_desc ( start + i , desc ) ;
2011-07-11 14:17:31 +04:00
}
2010-09-27 19:48:26 +04:00
return start ;
}
2011-02-16 19:12:57 +03:00
2011-02-19 22:07:37 +03:00
static int irq_expand_nr_irqs ( unsigned int nr )
2011-02-16 19:12:57 +03:00
{
return - ENOMEM ;
}
2014-05-07 19:44:21 +04:00
void irq_mark_irq ( unsigned int irq )
{
mutex_lock ( & sparse_irq_lock ) ;
2023-05-19 16:49:02 +03:00
irq_insert_desc ( irq , irq_desc + irq ) ;
2014-05-07 19:44:21 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
}
2014-05-07 19:44:22 +04:00
# ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc ( unsigned int irq )
{
2014-05-07 19:44:23 +04:00
free_desc ( irq ) ;
2014-05-07 19:44:22 +04:00
}
# endif
2010-09-22 19:09:43 +04:00
# endif /* !CONFIG_SPARSE_IRQ */
2021-05-04 16:24:37 +03:00
int handle_irq_desc ( struct irq_desc * desc )
2011-05-18 14:48:00 +04:00
{
2020-03-06 16:03:43 +03:00
struct irq_data * data ;
2011-05-18 14:48:00 +04:00
if ( ! desc )
return - EINVAL ;
2020-03-06 16:03:43 +03:00
data = irq_desc_get_irq_data ( desc ) ;
2022-01-28 14:07:27 +03:00
if ( WARN_ON_ONCE ( ! in_hardirq ( ) & & handle_enforce_irqctx ( data ) ) )
2020-03-06 16:03:43 +03:00
return - EPERM ;
2015-09-14 11:42:37 +03:00
generic_handle_irq_desc ( desc ) ;
2011-05-18 14:48:00 +04:00
return 0 ;
}
2021-05-04 16:24:37 +03:00
/**
* generic_handle_irq - Invoke the handler for a particular irq
* @ irq : The irq number to handle
*
2021-10-20 22:23:09 +03:00
* Returns : 0 on success , or - EINVAL if conversion has failed
*
* This function must be called from an IRQ context with irq regs
* initialized .
*/
2021-05-04 16:24:37 +03:00
int generic_handle_irq ( unsigned int irq )
{
return handle_irq_desc ( irq_to_desc ( irq ) ) ;
}
2011-05-18 13:39:04 +04:00
EXPORT_SYMBOL_GPL ( generic_handle_irq ) ;
2011-05-18 14:48:00 +04:00
2022-02-11 21:14:54 +03:00
/**
* generic_handle_irq_safe - Invoke the handler for a particular irq from any
* context .
* @ irq : The irq number to handle
*
* Returns : 0 on success , a negative value on error .
*
* This function can be called from any context ( IRQ or process context ) . It
* will report an error if not invoked from IRQ context and the irq has been
* marked to enforce IRQ - context only .
*/
int generic_handle_irq_safe ( unsigned int irq )
{
unsigned long flags ;
int ret ;
local_irq_save ( flags ) ;
ret = handle_irq_desc ( irq_to_desc ( irq ) ) ;
local_irq_restore ( flags ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( generic_handle_irq_safe ) ;
2021-05-12 18:18:15 +03:00
# ifdef CONFIG_IRQ_DOMAIN
genirq: Add irq_domain-aware core IRQ handler
Calling irq_find_mapping from outside a irq_{enter,exit} section is
unsafe and produces ugly messages if CONFIG_PROVE_RCU is enabled:
If coming from the idle state, the rcu_read_lock call in irq_find_mapping
will generate an unpleasant warning:
<quote>
===============================
[ INFO: suspicious RCU usage. ]
3.16.0-rc1+ #135 Not tainted
-------------------------------
include/linux/rcupdate.h:871 rcu_read_lock() used illegally while idle!
other info that might help us debug this:
RCU used illegally from idle CPU!
rcu_scheduler_active = 1, debug_locks = 0
RCU used illegally from extended quiescent state!
1 lock held by swapper/0/0:
#0: (rcu_read_lock){......}, at: [<ffffffc00010206c>]
irq_find_mapping+0x4c/0x198
</quote>
As this issue is fairly widespread and involves at least three
different architectures, a possible solution is to add a new
handle_domain_irq entry point into the generic IRQ code that
the interrupt controller code can call.
This new function takes an irq_domain, and calls into irq_find_domain
inside the irq_{enter,exit} block. An additional "lookup" parameter is
used to allow non-domain architecture code to be replaced by this as well.
Interrupt controllers can then be updated to use the new mechanism.
This code is sitting behind a new CONFIG_HANDLE_DOMAIN_IRQ, as not all
architectures implement set_irq_regs (yes, mn10300, I'm looking at you...).
Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Link: https://lkml.kernel.org/r/1409047421-27649-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Jason Cooper <jason@lakedaemon.net>
2014-08-26 14:03:16 +04:00
/**
2021-05-12 15:45:52 +03:00
* generic_handle_domain_irq - Invoke the handler for a HW irq belonging
2021-10-20 22:23:09 +03:00
* to a domain .
2021-05-12 15:45:52 +03:00
* @ domain : The domain where to perform the lookup
* @ hwirq : The HW irq number to convert to a logical one
*
* Returns : 0 on success , or - EINVAL if conversion has failed
*
2021-10-20 22:23:09 +03:00
* This function must be called from an IRQ context with irq regs
* initialized .
2021-05-12 15:45:52 +03:00
*/
int generic_handle_domain_irq ( struct irq_domain * domain , unsigned int hwirq )
{
return handle_irq_desc ( irq_resolve_mapping ( domain , hwirq ) ) ;
}
EXPORT_SYMBOL_GPL ( generic_handle_domain_irq ) ;
2022-05-09 17:04:08 +03:00
/**
* generic_handle_irq_safe - Invoke the handler for a HW irq belonging
* to a domain from any context .
* @ domain : The domain where to perform the lookup
* @ hwirq : The HW irq number to convert to a logical one
*
* Returns : 0 on success , a negative value on error .
*
* This function can be called from any context ( IRQ or process
* context ) . If the interrupt is marked as ' enforce IRQ - context only ' then
* the function must be invoked from hard interrupt context .
*/
int generic_handle_domain_irq_safe ( struct irq_domain * domain , unsigned int hwirq )
{
unsigned long flags ;
int ret ;
local_irq_save ( flags ) ;
ret = handle_irq_desc ( irq_resolve_mapping ( domain , hwirq ) ) ;
local_irq_restore ( flags ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( generic_handle_domain_irq_safe ) ;
irq: add a (temporary) CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY
Going forward we want architecture/entry code to perform all the
necessary work to enter/exit IRQ context, with irqchip code merely
handling the mapping of the interrupt to any handler(s). Among other
reasons, this is necessary to consistently fix some longstanding issues
with the ordering of lockdep/RCU/tracing instrumentation which many
architectures get wrong today in their entry code.
Importantly, rcu_irq_{enter,exit}() must be called precisely once per
IRQ exception, so that rcu_is_cpu_rrupt_from_idle() can correctly
identify when an interrupt was taken from an idle context which must be
explicitly preempted. Currently handle_domain_irq() calls
rcu_irq_{enter,exit}() via irq_{enter,exit}(), but entry code needs to
be able to call rcu_irq_{enter,exit}() earlier for correct ordering
across lockdep/RCU/tracing updates for sequences such as:
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter();
trace_hardirqs_off_finish();
To permit each architecture to be converted to the new style in turn,
this patch adds a new CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY selected by all
current users of HANDLE_DOMAIN_IRQ, which gates the existing behaviour.
When CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY is not selected,
handle_domain_irq() requires entry code to perform the
irq_{enter,exit}() work, with an explicit check for this matching the
style of handle_domain_nmi().
Subsequent patches will:
1) Add the necessary IRQ entry accounting to each architecture in turn,
dropping CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY from that architecture's
Kconfig.
2) Remove CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY once it is no longer
selected.
3) Convert irqchip drivers to consistently use
generic_handle_domain_irq() rather than handle_domain_irq().
4) Remove handle_domain_irq() and CONFIG_HANDLE_DOMAIN_IRQ.
... which should leave us with a clear split of responsiblity across the
entry and irqchip code, making it possible to perform additional
cleanups and fixes for the aforementioned longstanding issues with entry
code.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
2021-10-19 13:12:31 +03:00
/**
2021-10-20 22:23:09 +03:00
* generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging
* to a domain .
irq: add a (temporary) CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY
Going forward we want architecture/entry code to perform all the
necessary work to enter/exit IRQ context, with irqchip code merely
handling the mapping of the interrupt to any handler(s). Among other
reasons, this is necessary to consistently fix some longstanding issues
with the ordering of lockdep/RCU/tracing instrumentation which many
architectures get wrong today in their entry code.
Importantly, rcu_irq_{enter,exit}() must be called precisely once per
IRQ exception, so that rcu_is_cpu_rrupt_from_idle() can correctly
identify when an interrupt was taken from an idle context which must be
explicitly preempted. Currently handle_domain_irq() calls
rcu_irq_{enter,exit}() via irq_{enter,exit}(), but entry code needs to
be able to call rcu_irq_{enter,exit}() earlier for correct ordering
across lockdep/RCU/tracing updates for sequences such as:
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter();
trace_hardirqs_off_finish();
To permit each architecture to be converted to the new style in turn,
this patch adds a new CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY selected by all
current users of HANDLE_DOMAIN_IRQ, which gates the existing behaviour.
When CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY is not selected,
handle_domain_irq() requires entry code to perform the
irq_{enter,exit}() work, with an explicit check for this matching the
style of handle_domain_nmi().
Subsequent patches will:
1) Add the necessary IRQ entry accounting to each architecture in turn,
dropping CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY from that architecture's
Kconfig.
2) Remove CONFIG_HANDLE_DOMAIN_IRQ_IRQENTRY once it is no longer
selected.
3) Convert irqchip drivers to consistently use
generic_handle_domain_irq() rather than handle_domain_irq().
4) Remove handle_domain_irq() and CONFIG_HANDLE_DOMAIN_IRQ.
... which should leave us with a clear split of responsiblity across the
entry and irqchip code, making it possible to perform additional
cleanups and fixes for the aforementioned longstanding issues with entry
code.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
2021-10-19 13:12:31 +03:00
* @ domain : The domain where to perform the lookup
* @ hwirq : The HW irq number to convert to a logical one
*
* Returns : 0 on success , or - EINVAL if conversion has failed
2019-06-11 12:38:09 +03:00
*
2021-10-20 22:23:09 +03:00
* This function must be called from an NMI context with irq regs
* initialized .
* */
int generic_handle_domain_nmi ( struct irq_domain * domain , unsigned int hwirq )
2019-01-31 17:54:01 +03:00
{
2021-10-20 22:23:09 +03:00
WARN_ON_ONCE ( ! in_nmi ( ) ) ;
return handle_irq_desc ( irq_resolve_mapping ( domain , hwirq ) ) ;
2019-01-31 17:54:01 +03:00
}
# endif
genirq: Add irq_domain-aware core IRQ handler
Calling irq_find_mapping from outside a irq_{enter,exit} section is
unsafe and produces ugly messages if CONFIG_PROVE_RCU is enabled:
If coming from the idle state, the rcu_read_lock call in irq_find_mapping
will generate an unpleasant warning:
<quote>
===============================
[ INFO: suspicious RCU usage. ]
3.16.0-rc1+ #135 Not tainted
-------------------------------
include/linux/rcupdate.h:871 rcu_read_lock() used illegally while idle!
other info that might help us debug this:
RCU used illegally from idle CPU!
rcu_scheduler_active = 1, debug_locks = 0
RCU used illegally from extended quiescent state!
1 lock held by swapper/0/0:
#0: (rcu_read_lock){......}, at: [<ffffffc00010206c>]
irq_find_mapping+0x4c/0x198
</quote>
As this issue is fairly widespread and involves at least three
different architectures, a possible solution is to add a new
handle_domain_irq entry point into the generic IRQ code that
the interrupt controller code can call.
This new function takes an irq_domain, and calls into irq_find_domain
inside the irq_{enter,exit} block. An additional "lookup" parameter is
used to allow non-domain architecture code to be replaced by this as well.
Interrupt controllers can then be updated to use the new mechanism.
This code is sitting behind a new CONFIG_HANDLE_DOMAIN_IRQ, as not all
architectures implement set_irq_regs (yes, mn10300, I'm looking at you...).
Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Link: https://lkml.kernel.org/r/1409047421-27649-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Jason Cooper <jason@lakedaemon.net>
2014-08-26 14:03:16 +04:00
2010-09-27 19:48:26 +04:00
/* Dynamic interrupt handling */
/**
* irq_free_descs - free irq descriptors
* @ from : Start of descriptor range
* @ cnt : Number of consecutive irqs to free
*/
void irq_free_descs ( unsigned int from , unsigned int cnt )
{
int i ;
if ( from > = nr_irqs | | ( from + cnt ) > nr_irqs )
return ;
2017-09-05 11:12:20 +03:00
mutex_lock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
for ( i = 0 ; i < cnt ; i + + )
free_desc ( from + i ) ;
2010-10-08 14:47:53 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
}
2011-05-18 13:39:04 +04:00
EXPORT_SYMBOL_GPL ( irq_free_descs ) ;
2010-09-27 19:48:26 +04:00
/**
2019-11-13 17:41:33 +03:00
* __irq_alloc_descs - allocate and initialize a range of irq descriptors
2010-09-27 19:48:26 +04:00
* @ irq : Allocate for specific irq number if irq > = 0
* @ from : Start the search from this irq number
* @ cnt : Number of consecutive irqs to allocate .
* @ node : Preferred node on which the irq descriptor should be allocated
2011-08-18 23:19:27 +04:00
* @ owner : Owning module ( can be NULL )
2016-09-14 17:18:49 +03:00
* @ affinity : Optional pointer to an affinity mask array of size @ cnt which
* hints where the irq descriptors should be allocated and which
* default affinities to use
2010-09-27 19:48:26 +04:00
*
* Returns the first irq number or error code
*/
int __ref
2011-07-11 14:17:31 +04:00
__irq_alloc_descs ( int irq , unsigned int from , unsigned int cnt , int node ,
2018-12-04 18:51:20 +03:00
struct module * owner , const struct irq_affinity_desc * affinity )
2010-09-27 19:48:26 +04:00
{
int start , ret ;
if ( ! cnt )
return - EINVAL ;
2011-06-02 21:55:13 +04:00
if ( irq > = 0 ) {
if ( from > irq )
return - EINVAL ;
from = irq ;
2014-04-24 11:50:53 +04:00
} else {
/*
* For interrupts which are freely allocated the
* architecture can force a lower bound to the @ from
* argument . x86 uses this to exclude the GSI space .
*/
from = arch_dynirq_lower_bound ( from ) ;
2011-06-02 21:55:13 +04:00
}
2010-10-08 14:47:53 +04:00
mutex_lock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
2023-05-19 16:49:01 +03:00
start = irq_find_free_area ( from , cnt ) ;
2010-09-27 19:48:26 +04:00
ret = - EEXIST ;
if ( irq > = 0 & & start ! = irq )
2017-09-05 11:12:20 +03:00
goto unlock ;
2010-09-27 19:48:26 +04:00
2011-02-19 22:07:37 +03:00
if ( start + cnt > nr_irqs ) {
ret = irq_expand_nr_irqs ( start + cnt ) ;
2011-02-16 19:12:57 +03:00
if ( ret )
2017-09-05 11:12:20 +03:00
goto unlock ;
2011-02-16 19:12:57 +03:00
}
2017-09-05 11:12:20 +03:00
ret = alloc_descs ( start , cnt , node , affinity , owner ) ;
unlock :
2010-10-08 14:47:53 +04:00
mutex_unlock ( & sparse_irq_lock ) ;
2010-09-27 19:48:26 +04:00
return ret ;
}
2011-07-11 14:17:31 +04:00
EXPORT_SYMBOL_GPL ( __irq_alloc_descs ) ;
2010-09-27 19:48:26 +04:00
2010-09-30 12:45:07 +04:00
/**
* irq_get_next_irq - get next allocated irq number
* @ offset : where to start the search
*
2023-05-19 16:49:02 +03:00
* Returns next irq number after offset or nr_irqs if none is found .
2010-09-30 12:45:07 +04:00
*/
unsigned int irq_get_next_irq ( unsigned int offset )
{
2023-05-19 16:49:01 +03:00
return irq_find_at_or_after ( offset ) ;
2010-09-30 12:45:07 +04:00
}
2011-02-12 14:16:16 +03:00
struct irq_desc *
genirq: Add support for per-cpu dev_id interrupts
The ARM GIC interrupt controller offers per CPU interrupts (PPIs),
which are usually used to connect local timers to each core. Each CPU
has its own private interface to the GIC, and only sees the PPIs that
are directly connect to it.
While these timers are separate devices and have a separate interrupt
line to a core, they all use the same IRQ number.
For these devices, request_irq() is not the right API as it assumes
that an IRQ number is visible by a number of CPUs (through the
affinity setting), but makes it very awkward to express that an IRQ
number can be handled by all CPUs, and yet be a different interrupt
line on each CPU, requiring a different dev_id cookie to be passed
back to the handler.
The *_percpu_irq() functions is designed to overcome these
limitations, by providing a per-cpu dev_id vector:
int request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
void free_percpu_irq(unsigned int, void __percpu *);
int setup_percpu_irq(unsigned int irq, struct irqaction *new);
void remove_percpu_irq(unsigned int irq, struct irqaction *act);
void enable_percpu_irq(unsigned int irq);
void disable_percpu_irq(unsigned int irq);
The API has a number of limitations:
- no interrupt sharing
- no threading
- common handler across all the CPUs
Once the interrupt is requested using setup_percpu_irq() or
request_percpu_irq(), it must be enabled by each core that wishes its
local interrupt to be delivered.
Based on an initial patch by Thomas Gleixner.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2011-09-23 20:03:06 +04:00
__irq_get_desc_lock ( unsigned int irq , unsigned long * flags , bool bus ,
unsigned int check )
2011-02-12 14:16:16 +03:00
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
if ( desc ) {
genirq: Add support for per-cpu dev_id interrupts
The ARM GIC interrupt controller offers per CPU interrupts (PPIs),
which are usually used to connect local timers to each core. Each CPU
has its own private interface to the GIC, and only sees the PPIs that
are directly connect to it.
While these timers are separate devices and have a separate interrupt
line to a core, they all use the same IRQ number.
For these devices, request_irq() is not the right API as it assumes
that an IRQ number is visible by a number of CPUs (through the
affinity setting), but makes it very awkward to express that an IRQ
number can be handled by all CPUs, and yet be a different interrupt
line on each CPU, requiring a different dev_id cookie to be passed
back to the handler.
The *_percpu_irq() functions is designed to overcome these
limitations, by providing a per-cpu dev_id vector:
int request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
void free_percpu_irq(unsigned int, void __percpu *);
int setup_percpu_irq(unsigned int irq, struct irqaction *new);
void remove_percpu_irq(unsigned int irq, struct irqaction *act);
void enable_percpu_irq(unsigned int irq);
void disable_percpu_irq(unsigned int irq);
The API has a number of limitations:
- no interrupt sharing
- no threading
- common handler across all the CPUs
Once the interrupt is requested using setup_percpu_irq() or
request_percpu_irq(), it must be enabled by each core that wishes its
local interrupt to be delivered.
Based on an initial patch by Thomas Gleixner.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2011-09-23 20:03:06 +04:00
if ( check & _IRQ_DESC_CHECK ) {
if ( ( check & _IRQ_DESC_PERCPU ) & &
! irq_settings_is_per_cpu_devid ( desc ) )
return NULL ;
if ( ! ( check & _IRQ_DESC_PERCPU ) & &
irq_settings_is_per_cpu_devid ( desc ) )
return NULL ;
}
2011-02-12 14:16:16 +03:00
if ( bus )
chip_bus_lock ( desc ) ;
raw_spin_lock_irqsave ( & desc - > lock , * flags ) ;
}
return desc ;
}
void __irq_put_desc_unlock ( struct irq_desc * desc , unsigned long flags , bool bus )
2019-12-16 17:42:07 +03:00
__releases ( & desc - > lock )
2011-02-12 14:16:16 +03:00
{
raw_spin_unlock_irqrestore ( & desc - > lock , flags ) ;
if ( bus )
chip_bus_sync_unlock ( desc ) ;
}
2016-04-11 11:57:52 +03:00
int irq_set_percpu_devid_partition ( unsigned int irq ,
const struct cpumask * affinity )
genirq: Add support for per-cpu dev_id interrupts
The ARM GIC interrupt controller offers per CPU interrupts (PPIs),
which are usually used to connect local timers to each core. Each CPU
has its own private interface to the GIC, and only sees the PPIs that
are directly connect to it.
While these timers are separate devices and have a separate interrupt
line to a core, they all use the same IRQ number.
For these devices, request_irq() is not the right API as it assumes
that an IRQ number is visible by a number of CPUs (through the
affinity setting), but makes it very awkward to express that an IRQ
number can be handled by all CPUs, and yet be a different interrupt
line on each CPU, requiring a different dev_id cookie to be passed
back to the handler.
The *_percpu_irq() functions is designed to overcome these
limitations, by providing a per-cpu dev_id vector:
int request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
void free_percpu_irq(unsigned int, void __percpu *);
int setup_percpu_irq(unsigned int irq, struct irqaction *new);
void remove_percpu_irq(unsigned int irq, struct irqaction *act);
void enable_percpu_irq(unsigned int irq);
void disable_percpu_irq(unsigned int irq);
The API has a number of limitations:
- no interrupt sharing
- no threading
- common handler across all the CPUs
Once the interrupt is requested using setup_percpu_irq() or
request_percpu_irq(), it must be enabled by each core that wishes its
local interrupt to be delivered.
Based on an initial patch by Thomas Gleixner.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2011-09-23 20:03:06 +04:00
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
if ( ! desc )
return - EINVAL ;
if ( desc - > percpu_enabled )
return - EINVAL ;
desc - > percpu_enabled = kzalloc ( sizeof ( * desc - > percpu_enabled ) , GFP_KERNEL ) ;
if ( ! desc - > percpu_enabled )
return - ENOMEM ;
2016-04-11 11:57:52 +03:00
if ( affinity )
desc - > percpu_affinity = affinity ;
else
desc - > percpu_affinity = cpu_possible_mask ;
genirq: Add support for per-cpu dev_id interrupts
The ARM GIC interrupt controller offers per CPU interrupts (PPIs),
which are usually used to connect local timers to each core. Each CPU
has its own private interface to the GIC, and only sees the PPIs that
are directly connect to it.
While these timers are separate devices and have a separate interrupt
line to a core, they all use the same IRQ number.
For these devices, request_irq() is not the right API as it assumes
that an IRQ number is visible by a number of CPUs (through the
affinity setting), but makes it very awkward to express that an IRQ
number can be handled by all CPUs, and yet be a different interrupt
line on each CPU, requiring a different dev_id cookie to be passed
back to the handler.
The *_percpu_irq() functions is designed to overcome these
limitations, by providing a per-cpu dev_id vector:
int request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
void free_percpu_irq(unsigned int, void __percpu *);
int setup_percpu_irq(unsigned int irq, struct irqaction *new);
void remove_percpu_irq(unsigned int irq, struct irqaction *act);
void enable_percpu_irq(unsigned int irq);
void disable_percpu_irq(unsigned int irq);
The API has a number of limitations:
- no interrupt sharing
- no threading
- common handler across all the CPUs
Once the interrupt is requested using setup_percpu_irq() or
request_percpu_irq(), it must be enabled by each core that wishes its
local interrupt to be delivered.
Based on an initial patch by Thomas Gleixner.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2011-09-23 20:03:06 +04:00
irq_set_percpu_devid_flags ( irq ) ;
return 0 ;
}
2016-04-11 11:57:52 +03:00
int irq_set_percpu_devid ( unsigned int irq )
{
return irq_set_percpu_devid_partition ( irq , NULL ) ;
}
int irq_get_percpu_devid_partition ( unsigned int irq , struct cpumask * affinity )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
if ( ! desc | | ! desc - > percpu_enabled )
return - EINVAL ;
if ( affinity )
cpumask_copy ( affinity , desc - > percpu_affinity ) ;
return 0 ;
}
2016-07-25 18:07:10 +03:00
EXPORT_SYMBOL_GPL ( irq_get_percpu_devid_partition ) ;
2016-04-11 11:57:52 +03:00
2014-02-24 01:40:14 +04:00
void kstat_incr_irq_this_cpu ( unsigned int irq )
{
2015-06-04 07:13:25 +03:00
kstat_incr_irqs_this_cpu ( irq_to_desc ( irq ) ) ;
2014-02-24 01:40:14 +04:00
}
2014-12-12 01:01:41 +03:00
/**
* kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
* @ irq : The interrupt number
* @ cpu : The cpu number
*
* Returns the sum of interrupt counts on @ cpu since boot for
* @ irq . The caller must ensure that the interrupt is not removed
* concurrently .
*/
2010-09-22 19:09:43 +04:00
unsigned int kstat_irqs_cpu ( unsigned int irq , int cpu )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
2011-01-14 02:45:38 +03:00
return desc & & desc - > kstat_irqs ?
* per_cpu_ptr ( desc - > kstat_irqs , cpu ) : 0 ;
2010-09-22 19:09:43 +04:00
}
2010-10-28 02:34:15 +04:00
2019-07-05 10:56:20 +03:00
static bool irq_is_nmi ( struct irq_desc * desc )
{
return desc - > istate & IRQS_NMI ;
}
2020-12-10 22:25:43 +03:00
static unsigned int kstat_irqs ( unsigned int irq )
2010-10-28 02:34:15 +04:00
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
2015-05-03 11:48:50 +03:00
unsigned int sum = 0 ;
2019-02-08 16:48:03 +03:00
int cpu ;
2010-10-28 02:34:15 +04:00
2011-01-14 02:45:38 +03:00
if ( ! desc | | ! desc - > kstat_irqs )
2010-10-28 02:34:15 +04:00
return 0 ;
2019-02-08 16:48:03 +03:00
if ( ! irq_settings_is_per_cpu_devid ( desc ) & &
2019-07-05 10:56:20 +03:00
! irq_settings_is_per_cpu ( desc ) & &
! irq_is_nmi ( desc ) )
2020-12-10 22:25:41 +03:00
return data_race ( desc - > tot_count ) ;
2019-02-08 16:48:03 +03:00
2010-10-28 02:34:15 +04:00
for_each_possible_cpu ( cpu )
2020-12-10 22:25:41 +03:00
sum + = data_race ( * per_cpu_ptr ( desc - > kstat_irqs , cpu ) ) ;
2010-10-28 02:34:15 +04:00
return sum ;
}
2014-12-12 01:01:41 +03:00
/**
2020-12-10 22:25:43 +03:00
* kstat_irqs_usr - Get the statistics for an interrupt from thread context
2014-12-12 01:01:41 +03:00
* @ irq : The interrupt number
*
2018-06-18 15:56:12 +03:00
* Returns the sum of interrupt counts on all cpus since boot for @ irq .
2020-12-10 22:25:43 +03:00
*
* It uses rcu to protect the access since a concurrent removal of an
* interrupt descriptor is observing an rcu grace period before
* delayed_free_desc ( ) / irq_kobj_release ( ) .
2014-12-12 01:01:41 +03:00
*/
unsigned int kstat_irqs_usr ( unsigned int irq )
{
2015-05-03 11:49:11 +03:00
unsigned int sum ;
2014-12-12 01:01:41 +03:00
2018-06-18 15:56:12 +03:00
rcu_read_lock ( ) ;
2014-12-12 01:01:41 +03:00
sum = kstat_irqs ( irq ) ;
2018-06-18 15:56:12 +03:00
rcu_read_unlock ( ) ;
2014-12-12 01:01:41 +03:00
return sum ;
}
2020-12-10 22:25:39 +03:00
# ifdef CONFIG_LOCKDEP
void __irq_set_lockdep_class ( unsigned int irq , struct lock_class_key * lock_class ,
struct lock_class_key * request_class )
{
struct irq_desc * desc = irq_to_desc ( irq ) ;
if ( desc ) {
lockdep_set_class ( & desc - > lock , lock_class ) ;
lockdep_set_class ( & desc - > request_mutex , request_class ) ;
}
}
EXPORT_SYMBOL_GPL ( __irq_set_lockdep_class ) ;
# endif