2018-03-15 00:15:19 +03:00
// SPDX-License-Identifier: GPL-2.0
2014-11-12 13:39:03 +03:00
/*
* Copyright ( C ) 2014 Intel Corp .
* Author : Jiang Liu < jiang . liu @ linux . intel . com >
*
* This file is licensed under GPLv2 .
*
2021-03-22 06:21:30 +03:00
* This file contains common code to support Message Signaled Interrupts for
2014-11-12 13:39:03 +03:00
* PCI compatible and non PCI compatible devices .
*/
2014-11-15 17:24:05 +03:00
# include <linux/types.h>
# include <linux/device.h>
2014-11-12 13:39:03 +03:00
# include <linux/irq.h>
# include <linux/irqdomain.h>
# include <linux/msi.h>
2016-11-22 12:21:16 +03:00
# include <linux/slab.h>
2021-12-07 01:27:31 +03:00
# include <linux/sysfs.h>
2021-08-13 06:56:27 +03:00
# include <linux/pci.h>
2014-11-15 17:24:04 +03:00
2017-09-14 00:29:05 +03:00
# include "internals.h"
2021-12-07 01:51:50 +03:00
static inline int msi_sysfs_create_group ( struct device * dev ) ;
2021-12-07 01:51:47 +03:00
2016-09-14 17:18:47 +03:00
/**
2021-12-07 01:51:47 +03:00
* msi_alloc_desc - Allocate an initialized msi_desc
2016-09-14 17:18:47 +03:00
* @ dev : Pointer to the device for which this is allocated
* @ nvec : The number of vectors used in this entry
* @ affinity : Optional pointer to an affinity mask array size of @ nvec
*
2021-08-11 02:48:35 +03:00
* If @ affinity is not % NULL then an affinity array [ @ nvec ] is allocated
2018-12-04 18:51:20 +03:00
* and the affinity masks and flags from @ affinity are copied .
2021-08-11 02:48:35 +03:00
*
* Return : pointer to allocated & msi_desc on success or % NULL on failure
2016-09-14 17:18:47 +03:00
*/
2021-12-07 01:51:47 +03:00
static struct msi_desc * msi_alloc_desc ( struct device * dev , int nvec ,
const struct irq_affinity_desc * affinity )
2015-07-09 11:00:47 +03:00
{
2021-12-07 01:51:47 +03:00
struct msi_desc * desc = kzalloc ( sizeof ( * desc ) , GFP_KERNEL ) ;
2016-09-14 17:18:47 +03:00
2015-07-09 11:00:47 +03:00
if ( ! desc )
return NULL ;
desc - > dev = dev ;
2016-09-14 17:18:47 +03:00
desc - > nvec_used = nvec ;
if ( affinity ) {
2021-12-07 01:51:47 +03:00
desc - > affinity = kmemdup ( affinity , nvec * sizeof ( * desc - > affinity ) , GFP_KERNEL ) ;
2016-09-14 17:18:47 +03:00
if ( ! desc - > affinity ) {
kfree ( desc ) ;
return NULL ;
}
}
2015-07-09 11:00:47 +03:00
return desc ;
}
2021-12-07 01:51:47 +03:00
static void msi_free_desc ( struct msi_desc * desc )
2015-07-09 11:00:47 +03:00
{
2021-12-07 01:51:47 +03:00
kfree ( desc - > affinity ) ;
kfree ( desc ) ;
2015-07-09 11:00:47 +03:00
}
2021-12-07 01:51:52 +03:00
static int msi_insert_desc ( struct msi_device_data * md , struct msi_desc * desc , unsigned int index )
{
2022-11-25 02:24:20 +03:00
struct xarray * xa = & md - > __domains [ MSI_DEFAULT_DOMAIN ] . store ;
2021-12-07 01:51:52 +03:00
int ret ;
desc - > msi_index = index ;
2022-11-25 02:24:20 +03:00
ret = xa_insert ( xa , index , desc , GFP_KERNEL ) ;
2021-12-07 01:51:52 +03:00
if ( ret )
msi_free_desc ( desc ) ;
return ret ;
}
2021-12-07 01:51:10 +03:00
/**
* msi_add_msi_desc - Allocate and initialize a MSI descriptor
* @ dev : Pointer to the device for which the descriptor is allocated
* @ init_desc : Pointer to an MSI descriptor to initialize the new descriptor
*
* Return : 0 on success or an appropriate failure code .
*/
int msi_add_msi_desc ( struct device * dev , struct msi_desc * init_desc )
{
struct msi_desc * desc ;
lockdep_assert_held ( & dev - > msi . data - > mutex ) ;
2021-12-07 01:51:47 +03:00
desc = msi_alloc_desc ( dev , init_desc - > nvec_used , init_desc - > affinity ) ;
2021-12-07 01:51:10 +03:00
if ( ! desc )
return - ENOMEM ;
2021-12-07 01:51:52 +03:00
/* Copy type specific data to the new descriptor. */
2021-12-07 01:51:10 +03:00
desc - > pci = init_desc - > pci ;
2021-12-07 01:51:52 +03:00
return msi_insert_desc ( dev - > msi . data , desc , init_desc - > msi_index ) ;
2021-12-07 01:51:10 +03:00
}
/**
* msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
* @ dev : Pointer to the device for which the descriptors are allocated
* @ index : Index for the first MSI descriptor
* @ ndesc : Number of descriptors to allocate
*
* Return : 0 on success or an appropriate failure code .
*/
static int msi_add_simple_msi_descs ( struct device * dev , unsigned int index , unsigned int ndesc )
{
2021-12-07 01:51:52 +03:00
unsigned int idx , last = index + ndesc - 1 ;
struct msi_desc * desc ;
int ret ;
2021-12-07 01:51:10 +03:00
lockdep_assert_held ( & dev - > msi . data - > mutex ) ;
2021-12-07 01:51:52 +03:00
for ( idx = index ; idx < = last ; idx + + ) {
2021-12-07 01:51:47 +03:00
desc = msi_alloc_desc ( dev , 1 , NULL ) ;
2021-12-07 01:51:10 +03:00
if ( ! desc )
2021-12-07 01:51:52 +03:00
goto fail_mem ;
ret = msi_insert_desc ( dev - > msi . data , desc , idx ) ;
if ( ret )
2021-12-07 01:51:10 +03:00
goto fail ;
}
return 0 ;
2021-12-07 01:51:52 +03:00
fail_mem :
ret = - ENOMEM ;
2021-12-07 01:51:10 +03:00
fail :
2022-11-11 16:54:22 +03:00
msi_free_msi_descs_range ( dev , index , last ) ;
2021-12-07 01:51:52 +03:00
return ret ;
}
static bool msi_desc_match ( struct msi_desc * desc , enum msi_desc_filter filter )
{
switch ( filter ) {
case MSI_DESC_ALL :
return true ;
case MSI_DESC_NOTASSOCIATED :
return ! desc - > irq ;
case MSI_DESC_ASSOCIATED :
return ! ! desc - > irq ;
2021-12-07 01:51:10 +03:00
}
2021-12-07 01:51:52 +03:00
WARN_ON_ONCE ( 1 ) ;
return false ;
2021-12-07 01:51:10 +03:00
}
2021-12-07 01:51:12 +03:00
/**
* msi_free_msi_descs_range - Free MSI descriptors of a device
* @ dev : Device to free the descriptors
* @ first_index : Index to start freeing from
* @ last_index : Last index to be freed
*/
2022-11-11 16:54:22 +03:00
void msi_free_msi_descs_range ( struct device * dev , unsigned int first_index ,
unsigned int last_index )
2021-12-07 01:51:12 +03:00
{
2022-11-25 02:24:20 +03:00
struct xarray * xa = & dev - > msi . data - > __domains [ MSI_DEFAULT_DOMAIN ] . store ;
2021-12-07 01:51:12 +03:00
struct msi_desc * desc ;
2021-12-07 01:51:52 +03:00
unsigned long idx ;
2021-12-07 01:51:12 +03:00
lockdep_assert_held ( & dev - > msi . data - > mutex ) ;
2021-12-07 01:51:52 +03:00
xa_for_each_range ( xa , idx , desc , first_index , last_index ) {
2022-11-11 16:54:22 +03:00
xa_erase ( xa , idx ) ;
/* Leak the descriptor when it is still referenced */
if ( WARN_ON_ONCE ( msi_desc_match ( desc , MSI_DESC_ASSOCIATED ) ) )
continue ;
msi_free_desc ( desc ) ;
2021-12-07 01:51:12 +03:00
}
}
2014-11-12 14:11:25 +03:00
void __get_cached_msi_msg ( struct msi_desc * entry , struct msi_msg * msg )
{
* msg = entry - > msg ;
}
void get_cached_msi_msg ( unsigned int irq , struct msi_msg * msg )
{
struct msi_desc * entry = irq_get_msi_desc ( irq ) ;
__get_cached_msi_msg ( entry , msg ) ;
}
EXPORT_SYMBOL_GPL ( get_cached_msi_msg ) ;
2021-12-11 01:18:55 +03:00
static void msi_device_data_release ( struct device * dev , void * res )
{
2021-12-07 01:51:04 +03:00
struct msi_device_data * md = res ;
2022-11-25 02:24:20 +03:00
int i ;
2021-12-07 01:51:04 +03:00
2022-11-25 02:24:20 +03:00
for ( i = 0 ; i < MSI_MAX_DEVICE_IRQDOMAINS ; i + + ) {
WARN_ON_ONCE ( ! xa_empty ( & md - > __domains [ i ] . store ) ) ;
xa_destroy ( & md - > __domains [ i ] . store ) ;
}
2021-12-11 01:18:55 +03:00
dev - > msi . data = NULL ;
}
/**
* msi_setup_device_data - Setup MSI device data
* @ dev : Device for which MSI device data should be set up
*
* Return : 0 on success , appropriate error code otherwise
*
* This can be called more than once for @ dev . If the MSI device data is
* already allocated the call succeeds . The allocated memory is
* automatically released when the device is destroyed .
*/
int msi_setup_device_data ( struct device * dev )
{
struct msi_device_data * md ;
2022-11-25 02:24:20 +03:00
int ret , i ;
2021-12-11 01:18:55 +03:00
if ( dev - > msi . data )
return 0 ;
md = devres_alloc ( msi_device_data_release , sizeof ( * md ) , GFP_KERNEL ) ;
if ( ! md )
return - ENOMEM ;
2021-12-07 01:51:50 +03:00
ret = msi_sysfs_create_group ( dev ) ;
if ( ret ) {
devres_free ( md ) ;
return ret ;
}
2022-11-25 02:24:20 +03:00
for ( i = 0 ; i < MSI_MAX_DEVICE_IRQDOMAINS ; i + + )
xa_init ( & md - > __domains [ i ] . store ) ;
2021-12-07 01:51:05 +03:00
mutex_init ( & md - > mutex ) ;
2021-12-11 01:18:55 +03:00
dev - > msi . data = md ;
devres_add ( dev , md ) ;
return 0 ;
}
2021-12-07 01:51:05 +03:00
/**
* msi_lock_descs - Lock the MSI descriptor storage of a device
* @ dev : Device to operate on
*/
void msi_lock_descs ( struct device * dev )
{
mutex_lock ( & dev - > msi . data - > mutex ) ;
}
EXPORT_SYMBOL_GPL ( msi_lock_descs ) ;
/**
* msi_unlock_descs - Unlock the MSI descriptor storage of a device
* @ dev : Device to operate on
*/
void msi_unlock_descs ( struct device * dev )
{
2022-11-25 02:24:20 +03:00
/* Invalidate the index which was cached by the iterator */
2021-12-07 01:51:52 +03:00
dev - > msi . data - > __iter_idx = MSI_MAX_INDEX ;
2021-12-07 01:51:05 +03:00
mutex_unlock ( & dev - > msi . data - > mutex ) ;
}
EXPORT_SYMBOL_GPL ( msi_unlock_descs ) ;
2021-12-07 01:51:52 +03:00
static struct msi_desc * msi_find_desc ( struct msi_device_data * md , enum msi_desc_filter filter )
2021-12-07 01:51:08 +03:00
{
2022-11-25 02:24:20 +03:00
struct xarray * xa = & md - > __domains [ MSI_DEFAULT_DOMAIN ] . store ;
2021-12-07 01:51:08 +03:00
struct msi_desc * desc ;
2022-11-25 02:24:20 +03:00
xa_for_each_start ( xa , md - > __iter_idx , desc , md - > __iter_idx ) {
2021-12-07 01:51:08 +03:00
if ( msi_desc_match ( desc , filter ) )
return desc ;
}
2021-12-07 01:51:52 +03:00
md - > __iter_idx = MSI_MAX_INDEX ;
2021-12-07 01:51:08 +03:00
return NULL ;
}
/**
* msi_first_desc - Get the first MSI descriptor of a device
* @ dev : Device to operate on
* @ filter : Descriptor state filter
*
* Must be called with the MSI descriptor mutex held , i . e . msi_lock_descs ( )
* must be invoked before the call .
*
* Return : Pointer to the first MSI descriptor matching the search
* criteria , NULL if none found .
*/
struct msi_desc * msi_first_desc ( struct device * dev , enum msi_desc_filter filter )
{
2021-12-07 01:51:52 +03:00
struct msi_device_data * md = dev - > msi . data ;
2021-12-07 01:51:08 +03:00
2021-12-07 01:51:52 +03:00
if ( WARN_ON_ONCE ( ! md ) )
2021-12-07 01:51:08 +03:00
return NULL ;
2021-12-07 01:51:52 +03:00
lockdep_assert_held ( & md - > mutex ) ;
2021-12-07 01:51:08 +03:00
2021-12-07 01:51:52 +03:00
md - > __iter_idx = 0 ;
return msi_find_desc ( md , filter ) ;
2021-12-07 01:51:08 +03:00
}
EXPORT_SYMBOL_GPL ( msi_first_desc ) ;
/**
* msi_next_desc - Get the next MSI descriptor of a device
* @ dev : Device to operate on
2022-11-11 16:54:23 +03:00
* @ filter : Descriptor state filter
2021-12-07 01:51:08 +03:00
*
* The first invocation of msi_next_desc ( ) has to be preceeded by a
2021-12-07 01:51:52 +03:00
* successful invocation of __msi_first_desc ( ) . Consecutive invocations are
2021-12-07 01:51:08 +03:00
* only valid if the previous one was successful . All these operations have
* to be done within the same MSI mutex held region .
*
* Return : Pointer to the next MSI descriptor matching the search
* criteria , NULL if none found .
*/
struct msi_desc * msi_next_desc ( struct device * dev , enum msi_desc_filter filter )
{
2021-12-07 01:51:52 +03:00
struct msi_device_data * md = dev - > msi . data ;
2021-12-07 01:51:08 +03:00
2021-12-07 01:51:52 +03:00
if ( WARN_ON_ONCE ( ! md ) )
2021-12-07 01:51:08 +03:00
return NULL ;
2021-12-07 01:51:52 +03:00
lockdep_assert_held ( & md - > mutex ) ;
2021-12-07 01:51:08 +03:00
2021-12-07 01:51:52 +03:00
if ( md - > __iter_idx > = ( unsigned long ) MSI_MAX_INDEX )
2021-12-07 01:51:08 +03:00
return NULL ;
2021-12-07 01:51:52 +03:00
md - > __iter_idx + + ;
return msi_find_desc ( md , filter ) ;
2021-12-07 01:51:08 +03:00
}
EXPORT_SYMBOL_GPL ( msi_next_desc ) ;
2021-12-11 01:19:23 +03:00
/**
* msi_get_virq - Return Linux interrupt number of a MSI interrupt
* @ dev : Device to operate on
* @ index : MSI interrupt index to look for ( 0 - based )
*
* Return : The Linux interrupt number on success ( > 0 ) , 0 if not found
*/
unsigned int msi_get_virq ( struct device * dev , unsigned int index )
{
struct msi_desc * desc ;
2021-12-07 01:51:45 +03:00
unsigned int ret = 0 ;
2022-11-25 02:24:20 +03:00
struct xarray * xa ;
2021-12-11 01:19:23 +03:00
bool pcimsi ;
if ( ! dev - > msi . data )
return 0 ;
pcimsi = dev_is_pci ( dev ) ? to_pci_dev ( dev ) - > msi_enabled : false ;
2021-12-07 01:51:45 +03:00
msi_lock_descs ( dev ) ;
2022-11-25 02:24:20 +03:00
xa = & dev - > msi . data - > __domains [ MSI_DEFAULT_DOMAIN ] . store ;
desc = xa_load ( xa , pcimsi ? 0 : index ) ;
2021-12-07 01:51:52 +03:00
if ( desc & & desc - > irq ) {
2021-12-11 01:19:23 +03:00
/*
2021-12-07 01:51:52 +03:00
* PCI - MSI has only one descriptor for multiple interrupts .
2021-12-11 01:19:23 +03:00
* PCI - MSIX and platform MSI use a descriptor per
* interrupt .
*/
2021-12-07 01:51:52 +03:00
if ( pcimsi ) {
if ( index < desc - > nvec_used )
ret = desc - > irq + index ;
} else {
2021-12-07 01:51:45 +03:00
ret = desc - > irq ;
}
2021-12-11 01:19:23 +03:00
}
2021-12-07 01:51:45 +03:00
msi_unlock_descs ( dev ) ;
return ret ;
2021-12-11 01:19:23 +03:00
}
EXPORT_SYMBOL_GPL ( msi_get_virq ) ;
2021-12-07 01:27:28 +03:00
# ifdef CONFIG_SYSFS
2021-12-07 01:51:50 +03:00
static struct attribute * msi_dev_attrs [ ] = {
NULL
} ;
static const struct attribute_group msi_irqs_group = {
. name = " msi_irqs " ,
. attrs = msi_dev_attrs ,
} ;
static inline int msi_sysfs_create_group ( struct device * dev )
{
return devm_device_add_group ( dev , & msi_irqs_group ) ;
}
2021-08-13 06:56:27 +03:00
static ssize_t msi_mode_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
2021-12-11 01:18:49 +03:00
/* MSI vs. MSIX is per device not per interrupt */
bool is_msix = dev_is_pci ( dev ) ? to_pci_dev ( dev ) - > msix_enabled : false ;
2021-08-13 06:56:27 +03:00
return sysfs_emit ( buf , " %s \n " , is_msix ? " msix " : " msi " ) ;
}
2021-12-07 01:51:50 +03:00
static void msi_sysfs_remove_desc ( struct device * dev , struct msi_desc * desc )
2021-08-13 06:56:27 +03:00
{
2021-12-07 01:51:50 +03:00
struct device_attribute * attrs = desc - > sysfs_attrs ;
2021-08-13 06:56:27 +03:00
int i ;
2021-12-07 01:51:50 +03:00
if ( ! attrs )
return ;
2021-08-13 06:56:27 +03:00
2021-12-07 01:51:50 +03:00
desc - > sysfs_attrs = NULL ;
for ( i = 0 ; i < desc - > nvec_used ; i + + ) {
if ( attrs [ i ] . show )
sysfs_remove_file_from_group ( & dev - > kobj , & attrs [ i ] . attr , msi_irqs_group . name ) ;
kfree ( attrs [ i ] . attr . name ) ;
2021-08-13 06:56:27 +03:00
}
2021-12-07 01:51:50 +03:00
kfree ( attrs ) ;
}
2021-08-13 06:56:27 +03:00
2021-12-07 01:51:50 +03:00
static int msi_sysfs_populate_desc ( struct device * dev , struct msi_desc * desc )
{
struct device_attribute * attrs ;
int ret , i ;
2021-08-13 06:56:27 +03:00
2021-12-07 01:51:50 +03:00
attrs = kcalloc ( desc - > nvec_used , sizeof ( * attrs ) , GFP_KERNEL ) ;
if ( ! attrs )
return - ENOMEM ;
2021-08-13 06:56:27 +03:00
2021-12-07 01:51:50 +03:00
desc - > sysfs_attrs = attrs ;
for ( i = 0 ; i < desc - > nvec_used ; i + + ) {
sysfs_attr_init ( & attrs [ i ] . attr ) ;
attrs [ i ] . attr . name = kasprintf ( GFP_KERNEL , " %d " , desc - > irq + i ) ;
if ( ! attrs [ i ] . attr . name ) {
ret = - ENOMEM ;
goto fail ;
}
attrs [ i ] . attr . mode = 0444 ;
attrs [ i ] . show = msi_mode_show ;
ret = sysfs_add_file_to_group ( & dev - > kobj , & attrs [ i ] . attr , msi_irqs_group . name ) ;
if ( ret ) {
attrs [ i ] . show = NULL ;
goto fail ;
}
2021-08-13 06:56:27 +03:00
}
2021-12-07 01:51:50 +03:00
return 0 ;
fail :
msi_sysfs_remove_desc ( dev , desc ) ;
return ret ;
2021-08-13 06:56:27 +03:00
}
2021-12-07 01:51:50 +03:00
# ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
2021-12-11 01:19:03 +03:00
/**
* msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
* @ dev : The device ( PCI , platform etc ) which will get sysfs entries
*/
int msi_device_populate_sysfs ( struct device * dev )
{
2021-12-07 01:51:50 +03:00
struct msi_desc * desc ;
int ret ;
2021-12-11 01:19:03 +03:00
2021-12-07 01:51:50 +03:00
msi_for_each_desc ( desc , dev , MSI_DESC_ASSOCIATED ) {
if ( desc - > sysfs_attrs )
continue ;
ret = msi_sysfs_populate_desc ( dev , desc ) ;
if ( ret )
return ret ;
}
2021-12-11 01:19:03 +03:00
return 0 ;
}
/**
* msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
* @ dev : The device ( PCI , platform etc ) for which to remove
* sysfs entries
*/
void msi_device_destroy_sysfs ( struct device * dev )
{
2021-12-07 01:51:50 +03:00
struct msi_desc * desc ;
2021-12-11 01:19:08 +03:00
2021-12-07 01:51:50 +03:00
msi_for_each_desc ( desc , dev , MSI_DESC_ALL )
msi_sysfs_remove_desc ( dev , desc ) ;
2021-12-11 01:19:03 +03:00
}
2021-12-07 01:51:50 +03:00
# endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
# else /* CONFIG_SYSFS */
static inline int msi_sysfs_create_group ( struct device * dev ) { return 0 ; }
static inline int msi_sysfs_populate_desc ( struct device * dev , struct msi_desc * desc ) { return 0 ; }
static inline void msi_sysfs_remove_desc ( struct device * dev , struct msi_desc * desc ) { }
# endif /* !CONFIG_SYSFS */
2021-08-13 06:56:27 +03:00
2022-11-11 16:54:25 +03:00
static int __msi_domain_alloc_irqs ( struct irq_domain * domain , struct device * dev , int nvec ) ;
2022-11-11 16:54:30 +03:00
static void __msi_domain_free_irqs ( struct irq_domain * domain , struct device * dev ) ;
2022-11-11 16:54:25 +03:00
2014-12-06 23:20:20 +03:00
static inline void irq_chip_write_msi_msg ( struct irq_data * data ,
struct msi_msg * msg )
{
data - > chip - > irq_write_msi_msg ( data , msg ) ;
}
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 15:14:30 +03:00
static void msi_check_level ( struct irq_domain * domain , struct msi_msg * msg )
{
struct msi_domain_info * info = domain - > host_data ;
/*
* If the MSI provider has messed with the second message and
* not advertized that it is level - capable , signal the breakage .
*/
WARN_ON ( ! ( ( info - > flags & MSI_FLAG_LEVEL_CAPABLE ) & &
( info - > chip - > flags & IRQCHIP_SUPPORTS_LEVEL_MSI ) ) & &
( msg [ 1 ] . address_lo | | msg [ 1 ] . address_hi | | msg [ 1 ] . data ) ) ;
}
2014-11-12 13:39:03 +03:00
/**
* msi_domain_set_affinity - Generic affinity setter function for MSI domains
* @ irq_data : The irq data associated to the interrupt
* @ mask : The affinity mask to set
* @ force : Flag to enforce setting ( disable online checks )
*
* Intended to be used by MSI interrupt controllers which are
* implemented with hierarchical domains .
2021-08-11 02:48:35 +03:00
*
* Return : IRQ_SET_MASK_ * result code
2014-11-12 13:39:03 +03:00
*/
int msi_domain_set_affinity ( struct irq_data * irq_data ,
const struct cpumask * mask , bool force )
{
struct irq_data * parent = irq_data - > parent_data ;
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 15:14:30 +03:00
struct msi_msg msg [ 2 ] = { [ 1 ] = { } , } ;
2014-11-12 13:39:03 +03:00
int ret ;
ret = parent - > chip - > irq_set_affinity ( parent , mask , force ) ;
if ( ret > = 0 & & ret ! = IRQ_SET_MASK_OK_DONE ) {
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 15:14:30 +03:00
BUG_ON ( irq_chip_compose_msi_msg ( irq_data , msg ) ) ;
msi_check_level ( irq_data - > domain , msg ) ;
irq_chip_write_msi_msg ( irq_data , msg ) ;
2014-11-12 13:39:03 +03:00
}
return ret ;
}
2017-09-14 00:29:10 +03:00
static int msi_domain_activate ( struct irq_domain * domain ,
struct irq_data * irq_data , bool early )
2014-11-12 13:39:03 +03:00
{
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 15:14:30 +03:00
struct msi_msg msg [ 2 ] = { [ 1 ] = { } , } ;
2014-11-12 13:39:03 +03:00
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 15:14:30 +03:00
BUG_ON ( irq_chip_compose_msi_msg ( irq_data , msg ) ) ;
msi_check_level ( irq_data - > domain , msg ) ;
irq_chip_write_msi_msg ( irq_data , msg ) ;
2017-09-14 00:29:10 +03:00
return 0 ;
2014-11-12 13:39:03 +03:00
}
static void msi_domain_deactivate ( struct irq_domain * domain ,
struct irq_data * irq_data )
{
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 15:14:30 +03:00
struct msi_msg msg [ 2 ] ;
2014-11-12 13:39:03 +03:00
genirq/msi: Allow level-triggered MSIs to be exposed by MSI providers
So far, MSIs have been used to signal edge-triggered interrupts, as
a write is a good model for an edge (you can't "unwrite" something).
On the other hand, routing zillions of wires in an SoC because you
need level interrupts is a bit extreme.
People have come up with a variety of schemes to support this, which
involves sending two messages: one to signal the interrupt, and one
to clear it. Since the kernel cannot represent this, we've ended up
with side-band mechanisms that are pretty awful.
Instead, let's acknoledge the requirement, and ensure that, under the
right circumstances, the irq_compose_msg and irq_write_msg can take
as a parameter an array of two messages instead of a pointer to a
single one. We also add some checking that the compose method only
clobbers the second message if the MSI domain has been created with
the MSI_FLAG_LEVEL_CAPABLE flags.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <robh@kernel.org>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Cc: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Cc: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lkml.kernel.org/r/20180508121438.11301-2-marc.zyngier@arm.com
2018-05-08 15:14:30 +03:00
memset ( msg , 0 , sizeof ( msg ) ) ;
irq_chip_write_msi_msg ( irq_data , msg ) ;
2014-11-12 13:39:03 +03:00
}
static int msi_domain_alloc ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs , void * arg )
{
struct msi_domain_info * info = domain - > host_data ;
struct msi_domain_ops * ops = info - > ops ;
irq_hw_number_t hwirq = ops - > get_hwirq ( info , arg ) ;
int i , ret ;
if ( irq_find_mapping ( domain , hwirq ) > 0 )
return - EEXIST ;
2016-01-12 23:18:06 +03:00
if ( domain - > parent ) {
ret = irq_domain_alloc_irqs_parent ( domain , virq , nr_irqs , arg ) ;
if ( ret < 0 )
return ret ;
}
2014-11-12 13:39:03 +03:00
for ( i = 0 ; i < nr_irqs ; i + + ) {
ret = ops - > msi_init ( domain , info , virq + i , hwirq + i , arg ) ;
if ( ret < 0 ) {
if ( ops - > msi_free ) {
for ( i - - ; i > 0 ; i - - )
ops - > msi_free ( domain , info , virq + i ) ;
}
irq_domain_free_irqs_top ( domain , virq , nr_irqs ) ;
return ret ;
}
}
return 0 ;
}
static void msi_domain_free ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs )
{
struct msi_domain_info * info = domain - > host_data ;
int i ;
if ( info - > ops - > msi_free ) {
for ( i = 0 ; i < nr_irqs ; i + + )
info - > ops - > msi_free ( domain , info , virq + i ) ;
}
irq_domain_free_irqs_top ( domain , virq , nr_irqs ) ;
}
2015-04-27 15:54:23 +03:00
static const struct irq_domain_ops msi_domain_ops = {
2014-11-12 13:39:03 +03:00
. alloc = msi_domain_alloc ,
. free = msi_domain_free ,
. activate = msi_domain_activate ,
. deactivate = msi_domain_deactivate ,
} ;
2014-11-15 17:24:05 +03:00
static irq_hw_number_t msi_domain_ops_get_hwirq ( struct msi_domain_info * info ,
msi_alloc_info_t * arg )
{
return arg - > hwirq ;
}
static int msi_domain_ops_prepare ( struct irq_domain * domain , struct device * dev ,
int nvec , msi_alloc_info_t * arg )
{
memset ( arg , 0 , sizeof ( * arg ) ) ;
return 0 ;
}
static void msi_domain_ops_set_desc ( msi_alloc_info_t * arg ,
struct msi_desc * desc )
{
arg - > desc = desc ;
}
static int msi_domain_ops_init ( struct irq_domain * domain ,
struct msi_domain_info * info ,
unsigned int virq , irq_hw_number_t hwirq ,
msi_alloc_info_t * arg )
{
irq_domain_set_hwirq_and_chip ( domain , virq , hwirq , info - > chip ,
info - > chip_data ) ;
if ( info - > handler & & info - > handler_name ) {
__irq_set_handler ( virq , info - > handler , 0 , info - > handler_name ) ;
if ( info - > handler_data )
irq_set_handler_data ( virq , info - > handler_data ) ;
}
return 0 ;
}
static struct msi_domain_ops msi_domain_ops_default = {
2020-08-26 14:16:57 +03:00
. get_hwirq = msi_domain_ops_get_hwirq ,
. msi_init = msi_domain_ops_init ,
. msi_prepare = msi_domain_ops_prepare ,
. set_desc = msi_domain_ops_set_desc ,
. domain_alloc_irqs = __msi_domain_alloc_irqs ,
. domain_free_irqs = __msi_domain_free_irqs ,
2014-11-15 17:24:05 +03:00
} ;
static void msi_domain_update_dom_ops ( struct msi_domain_info * info )
{
struct msi_domain_ops * ops = info - > ops ;
if ( ops = = NULL ) {
info - > ops = & msi_domain_ops_default ;
return ;
}
2020-08-26 14:16:57 +03:00
if ( ops - > domain_alloc_irqs = = NULL )
ops - > domain_alloc_irqs = msi_domain_ops_default . domain_alloc_irqs ;
if ( ops - > domain_free_irqs = = NULL )
ops - > domain_free_irqs = msi_domain_ops_default . domain_free_irqs ;
if ( ! ( info - > flags & MSI_FLAG_USE_DEF_DOM_OPS ) )
return ;
2014-11-15 17:24:05 +03:00
if ( ops - > get_hwirq = = NULL )
ops - > get_hwirq = msi_domain_ops_default . get_hwirq ;
if ( ops - > msi_init = = NULL )
ops - > msi_init = msi_domain_ops_default . msi_init ;
if ( ops - > msi_prepare = = NULL )
ops - > msi_prepare = msi_domain_ops_default . msi_prepare ;
if ( ops - > set_desc = = NULL )
ops - > set_desc = msi_domain_ops_default . set_desc ;
}
static void msi_domain_update_chip_ops ( struct msi_domain_info * info )
{
struct irq_chip * chip = info - > chip ;
2015-10-13 21:14:45 +03:00
BUG_ON ( ! chip | | ! chip - > irq_mask | | ! chip - > irq_unmask ) ;
2014-11-15 17:24:05 +03:00
if ( ! chip - > irq_set_affinity )
chip - > irq_set_affinity = msi_domain_set_affinity ;
}
2014-11-12 13:39:03 +03:00
/**
2021-08-11 02:48:35 +03:00
* msi_create_irq_domain - Create an MSI interrupt domain
2015-10-13 14:51:44 +03:00
* @ fwnode : Optional fwnode of the interrupt controller
2014-11-12 13:39:03 +03:00
* @ info : MSI domain info
* @ parent : Parent irq domain
2021-08-11 02:48:35 +03:00
*
* Return : pointer to the created & struct irq_domain or % NULL on failure
2014-11-12 13:39:03 +03:00
*/
2015-10-13 14:51:44 +03:00
struct irq_domain * msi_create_irq_domain ( struct fwnode_handle * fwnode ,
2014-11-12 13:39:03 +03:00
struct msi_domain_info * info ,
struct irq_domain * parent )
{
2017-05-12 14:55:37 +03:00
struct irq_domain * domain ;
2020-08-26 14:16:57 +03:00
msi_domain_update_dom_ops ( info ) ;
2014-11-15 17:24:05 +03:00
if ( info - > flags & MSI_FLAG_USE_DEF_CHIP_OPS )
msi_domain_update_chip_ops ( info ) ;
2014-11-12 13:39:03 +03:00
2017-05-12 14:55:37 +03:00
domain = irq_domain_create_hierarchy ( parent , IRQ_DOMAIN_FLAG_MSI , 0 ,
fwnode , & msi_domain_ops , info ) ;
2017-06-20 02:37:04 +03:00
2022-11-11 16:54:33 +03:00
if ( domain ) {
if ( ! domain - > name & & info - > chip )
domain - > name = info - > chip - > name ;
irq_domain_update_bus_token ( domain , info - > bus_token ) ;
}
2017-05-12 14:55:37 +03:00
return domain ;
2014-11-12 13:39:03 +03:00
}
2015-11-23 11:26:05 +03:00
int msi_domain_prepare_irqs ( struct irq_domain * domain , struct device * dev ,
int nvec , msi_alloc_info_t * arg )
{
struct msi_domain_info * info = domain - > host_data ;
struct msi_domain_ops * ops = info - > ops ;
2022-11-11 16:55:15 +03:00
return ops - > msi_prepare ( domain , dev , nvec , arg ) ;
2015-11-23 11:26:05 +03:00
}
2015-11-23 11:26:06 +03:00
int msi_domain_populate_irqs ( struct irq_domain * domain , struct device * dev ,
2021-12-07 01:51:42 +03:00
int virq_base , int nvec , msi_alloc_info_t * arg )
2015-11-23 11:26:06 +03:00
{
struct msi_domain_info * info = domain - > host_data ;
struct msi_domain_ops * ops = info - > ops ;
struct msi_desc * desc ;
2022-11-25 02:24:20 +03:00
struct xarray * xa ;
2021-12-07 01:51:42 +03:00
int ret , virq ;
2015-11-23 11:26:06 +03:00
2021-12-07 01:51:42 +03:00
msi_lock_descs ( dev ) ;
2021-12-07 01:51:52 +03:00
ret = msi_add_simple_msi_descs ( dev , virq_base , nvec ) ;
if ( ret )
goto unlock ;
2015-11-23 11:26:06 +03:00
2022-11-25 02:24:20 +03:00
xa = & dev - > msi . data - > __domains [ MSI_DEFAULT_DOMAIN ] . store ;
2021-12-07 01:51:52 +03:00
for ( virq = virq_base ; virq < virq_base + nvec ; virq + + ) {
2022-11-25 02:24:20 +03:00
desc = xa_load ( xa , virq ) ;
2021-12-07 01:51:42 +03:00
desc - > irq = virq ;
2015-11-23 11:26:06 +03:00
ops - > set_desc ( arg , desc ) ;
2021-12-07 01:51:42 +03:00
ret = irq_domain_alloc_irqs_hierarchy ( domain , virq , 1 , arg ) ;
2015-11-23 11:26:06 +03:00
if ( ret )
2021-12-07 01:51:42 +03:00
goto fail ;
2015-11-23 11:26:06 +03:00
2021-12-07 01:51:42 +03:00
irq_set_msi_desc ( virq , desc ) ;
2015-11-23 11:26:06 +03:00
}
2021-12-07 01:51:42 +03:00
msi_unlock_descs ( dev ) ;
return 0 ;
2015-11-23 11:26:06 +03:00
2021-12-07 01:51:42 +03:00
fail :
for ( - - virq ; virq > = virq_base ; virq - - )
irq_domain_free_irqs_common ( domain , virq , 1 ) ;
2022-11-11 16:54:22 +03:00
msi_free_msi_descs_range ( dev , virq_base , virq_base + nvec - 1 ) ;
2021-12-07 01:51:52 +03:00
unlock :
2021-12-07 01:51:42 +03:00
msi_unlock_descs ( dev ) ;
2015-11-23 11:26:06 +03:00
return ret ;
}
2017-12-29 12:47:22 +03:00
/*
* Carefully check whether the device can use reservation mode . If
* reservation mode is enabled then the early activation will assign a
* dummy vector to the device . If the PCI / MSI device does not support
* masking of the entry then this can result in spurious interrupts when
* the device driver is not absolutely careful . But even then a malfunction
* of the hardware could result in a spurious interrupt on the dummy vector
* and render the device unusable . If the entry can be masked then the core
* logic will prevent the spurious interrupt and reservation mode can be
* used . For now reservation mode is restricted to PCI / MSI .
*/
static bool msi_check_reservation_mode ( struct irq_domain * domain ,
struct msi_domain_info * info ,
struct device * dev )
2017-12-29 12:42:10 +03:00
{
2017-12-29 12:47:22 +03:00
struct msi_desc * desc ;
2020-08-26 14:16:51 +03:00
switch ( domain - > bus_token ) {
case DOMAIN_BUS_PCI_MSI :
case DOMAIN_BUS_VMD_MSI :
break ;
default :
2017-12-29 12:47:22 +03:00
return false ;
2020-08-26 14:16:51 +03:00
}
2017-12-29 12:47:22 +03:00
2017-12-29 12:42:10 +03:00
if ( ! ( info - > flags & MSI_FLAG_MUST_REACTIVATE ) )
return false ;
2017-12-29 12:47:22 +03:00
if ( IS_ENABLED ( CONFIG_PCI_MSI ) & & pci_msi_ignore_mask )
return false ;
/*
* Checking the first MSI descriptor is sufficient . MSIX supports
2021-11-04 02:27:29 +03:00
* masking and MSI does so when the can_mask attribute is set .
2017-12-29 12:47:22 +03:00
*/
2021-12-07 01:51:45 +03:00
desc = msi_first_desc ( dev , MSI_DESC_ALL ) ;
2021-12-07 01:27:39 +03:00
return desc - > pci . msi_attrib . is_msix | | desc - > pci . msi_attrib . can_mask ;
2017-12-29 12:42:10 +03:00
}
2021-12-07 01:27:59 +03:00
static int msi_handle_pci_fail ( struct irq_domain * domain , struct msi_desc * desc ,
int allocated )
{
switch ( domain - > bus_token ) {
case DOMAIN_BUS_PCI_MSI :
case DOMAIN_BUS_VMD_MSI :
if ( IS_ENABLED ( CONFIG_PCI_MSI ) )
break ;
fallthrough ;
default :
return - ENOSPC ;
}
/* Let a failed PCI multi MSI allocation retry */
if ( desc - > nvec_used > 1 )
return 1 ;
/* If there was a successful allocation let the caller know */
return allocated ? allocated : - ENOSPC ;
}
2021-12-07 01:51:44 +03:00
# define VIRQ_CAN_RESERVE 0x01
# define VIRQ_ACTIVATE 0x02
# define VIRQ_NOMASK_QUIRK 0x04
static int msi_init_virq ( struct irq_domain * domain , int virq , unsigned int vflags )
{
struct irq_data * irqd = irq_domain_get_irq_data ( domain , virq ) ;
int ret ;
if ( ! ( vflags & VIRQ_CAN_RESERVE ) ) {
irqd_clr_can_reserve ( irqd ) ;
if ( vflags & VIRQ_NOMASK_QUIRK )
irqd_set_msi_nomask_quirk ( irqd ) ;
2022-04-05 21:50:38 +03:00
/*
* If the interrupt is managed but no CPU is available to
* service it , shut it down until better times . Note that
* we only do this on the ! RESERVE path as x86 ( the only
* architecture using this flag ) deals with this in a
* different way by using a catch - all vector .
*/
if ( ( vflags & VIRQ_ACTIVATE ) & &
irqd_affinity_is_managed ( irqd ) & &
! cpumask_intersects ( irq_data_get_affinity_mask ( irqd ) ,
cpu_online_mask ) ) {
irqd_set_managed_shutdown ( irqd ) ;
return 0 ;
}
2021-12-07 01:51:44 +03:00
}
if ( ! ( vflags & VIRQ_ACTIVATE ) )
return 0 ;
ret = irq_domain_activate_irq ( irqd , vflags & VIRQ_CAN_RESERVE ) ;
if ( ret )
return ret ;
/*
* If the interrupt uses reservation mode , clear the activated bit
* so request_irq ( ) will assign the final vector .
*/
if ( vflags & VIRQ_CAN_RESERVE )
irqd_clr_activated ( irqd ) ;
return 0 ;
}
2022-11-11 16:54:25 +03:00
static int __msi_domain_alloc_irqs ( struct irq_domain * domain , struct device * dev ,
int nvec )
2014-11-15 17:24:04 +03:00
{
struct msi_domain_info * info = domain - > host_data ;
struct msi_domain_ops * ops = info - > ops ;
2020-12-18 09:00:39 +03:00
msi_alloc_info_t arg = { } ;
2021-12-07 01:51:44 +03:00
unsigned int vflags = 0 ;
struct msi_desc * desc ;
2021-12-07 01:27:59 +03:00
int allocated = 0 ;
2016-07-04 11:39:22 +03:00
int i , ret , virq ;
2014-11-15 17:24:04 +03:00
2015-11-23 11:26:05 +03:00
ret = msi_domain_prepare_irqs ( domain , dev , nvec , & arg ) ;
2014-11-15 17:24:04 +03:00
if ( ret )
return ret ;
2021-12-07 01:51:44 +03:00
/*
* This flag is set by the PCI layer as we need to activate
* the MSI entries before the PCI layer enables MSI in the
* card . Otherwise the card latches a random msi message .
*/
if ( info - > flags & MSI_FLAG_ACTIVATE_EARLY )
vflags | = VIRQ_ACTIVATE ;
/*
* Interrupt can use a reserved vector and will not occupy
* a real device vector until the interrupt is requested .
*/
if ( msi_check_reservation_mode ( domain , info , dev ) ) {
vflags | = VIRQ_CAN_RESERVE ;
/*
* MSI affinity setting requires a special quirk ( X86 ) when
* reservation mode is active .
*/
2022-11-25 02:24:08 +03:00
if ( info - > flags & MSI_FLAG_NOMASK_QUIRK )
2021-12-07 01:51:44 +03:00
vflags | = VIRQ_NOMASK_QUIRK ;
}
msi_for_each_desc ( desc , dev , MSI_DESC_NOTASSOCIATED ) {
2014-11-15 17:24:04 +03:00
ops - > set_desc ( & arg , desc ) ;
2016-07-04 11:39:22 +03:00
virq = __irq_domain_alloc_irqs ( domain , - 1 , desc - > nvec_used ,
2016-07-04 11:39:24 +03:00
dev_to_node ( dev ) , & arg , false ,
2016-07-04 11:39:26 +03:00
desc - > affinity ) ;
2021-12-07 01:51:07 +03:00
if ( virq < 0 )
return msi_handle_pci_fail ( domain , desc , allocated ) ;
2014-11-15 17:24:04 +03:00
2017-09-14 00:29:05 +03:00
for ( i = 0 ; i < desc - > nvec_used ; i + + ) {
2014-11-15 17:24:04 +03:00
irq_set_msi_desc_off ( virq , i , desc ) ;
2017-09-14 00:29:05 +03:00
irq_debugfs_copy_devname ( virq + i , dev ) ;
2021-12-07 01:51:44 +03:00
ret = msi_init_virq ( domain , virq + i , vflags ) ;
if ( ret )
return ret ;
2022-01-10 21:12:45 +03:00
}
if ( info - > flags & MSI_FLAG_DEV_SYSFS ) {
ret = msi_sysfs_populate_desc ( dev , desc ) ;
if ( ret )
return ret ;
2017-09-14 00:29:05 +03:00
}
2021-12-07 01:27:59 +03:00
allocated + + ;
2014-11-15 17:24:04 +03:00
}
return 0 ;
}
2021-12-07 01:51:12 +03:00
static int msi_domain_add_simple_msi_descs ( struct msi_domain_info * info ,
struct device * dev ,
unsigned int num_descs )
{
if ( ! ( info - > flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS ) )
return 0 ;
return msi_add_simple_msi_descs ( dev , 0 , num_descs ) ;
}
2014-11-15 17:24:04 +03:00
/**
2021-12-07 01:51:07 +03:00
* msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
2020-08-26 14:16:57 +03:00
* @ domain : The domain to allocate from
2014-11-15 17:24:04 +03:00
* @ dev : Pointer to device struct of the device for which the interrupts
2020-08-26 14:16:57 +03:00
* are allocated
* @ nvec : The number of interrupts to allocate
*
2021-12-07 01:51:07 +03:00
* Must be invoked from within a msi_lock_descs ( ) / msi_unlock_descs ( )
* pair . Use this for MSI irqdomains which implement their own vector
* allocation / free .
*
2021-08-11 02:48:35 +03:00
* Return : % 0 on success or an error code .
2014-11-15 17:24:04 +03:00
*/
2021-12-07 01:51:07 +03:00
int msi_domain_alloc_irqs_descs_locked ( struct irq_domain * domain , struct device * dev ,
int nvec )
2020-08-26 14:16:57 +03:00
{
struct msi_domain_info * info = domain - > host_data ;
struct msi_domain_ops * ops = info - > ops ;
2021-12-11 01:19:03 +03:00
int ret ;
2021-12-07 01:51:07 +03:00
lockdep_assert_held ( & dev - > msi . data - > mutex ) ;
2022-11-25 02:24:19 +03:00
if ( WARN_ON_ONCE ( irq_domain_is_msi_parent ( domain ) ) ) {
ret = - EINVAL ;
goto free ;
}
/* Frees allocated descriptors in case of failure. */
2021-12-07 01:51:12 +03:00
ret = msi_domain_add_simple_msi_descs ( info , dev , nvec ) ;
if ( ret )
2022-11-25 02:24:19 +03:00
goto free ;
2021-12-07 01:51:12 +03:00
2021-12-11 01:19:03 +03:00
ret = ops - > domain_alloc_irqs ( domain , dev , nvec ) ;
2022-11-25 02:24:19 +03:00
if ( ! ret )
return 0 ;
free :
msi_domain_free_irqs_descs_locked ( domain , dev ) ;
2021-12-07 01:51:07 +03:00
return ret ;
}
/**
* msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
* @ domain : The domain to allocate from
* @ dev : Pointer to device struct of the device for which the interrupts
* are allocated
* @ nvec : The number of interrupts to allocate
*
* Return : % 0 on success or an error code .
*/
int msi_domain_alloc_irqs ( struct irq_domain * domain , struct device * dev , int nvec )
{
int ret ;
msi_lock_descs ( dev ) ;
ret = msi_domain_alloc_irqs_descs_locked ( domain , dev , nvec ) ;
msi_unlock_descs ( dev ) ;
2021-12-11 01:19:03 +03:00
return ret ;
2020-08-26 14:16:57 +03:00
}
2022-11-11 16:54:30 +03:00
static void __msi_domain_free_irqs ( struct irq_domain * domain , struct device * dev )
2014-11-15 17:24:04 +03:00
{
2021-12-07 01:51:50 +03:00
struct msi_domain_info * info = domain - > host_data ;
2021-12-07 01:51:44 +03:00
struct irq_data * irqd ;
2014-11-15 17:24:04 +03:00
struct msi_desc * desc ;
2021-05-18 06:31:17 +03:00
int i ;
2021-12-07 01:51:44 +03:00
/* Only handle MSI entries which have an interrupt associated */
msi_for_each_desc ( desc , dev , MSI_DESC_ASSOCIATED ) {
/* Make sure all interrupts are deactivated */
for ( i = 0 ; i < desc - > nvec_used ; i + + ) {
irqd = irq_domain_get_irq_data ( domain , desc - > irq + i ) ;
if ( irqd & & irqd_is_activated ( irqd ) )
irq_domain_deactivate_irq ( irqd ) ;
2015-01-26 22:10:19 +03:00
}
2021-12-07 01:51:44 +03:00
irq_domain_free_irqs ( desc - > irq , desc - > nvec_used ) ;
2021-12-07 01:51:50 +03:00
if ( info - > flags & MSI_FLAG_DEV_SYSFS )
msi_sysfs_remove_desc ( dev , desc ) ;
2021-12-07 01:51:44 +03:00
desc - > irq = 0 ;
2014-11-15 17:24:04 +03:00
}
}
2021-12-07 01:51:12 +03:00
static void msi_domain_free_msi_descs ( struct msi_domain_info * info ,
struct device * dev )
{
if ( info - > flags & MSI_FLAG_FREE_MSI_DESCS )
msi_free_msi_descs ( dev ) ;
}
2020-08-26 14:16:57 +03:00
/**
2021-12-07 01:51:07 +03:00
* msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @ domain associated to @ dev
2020-08-26 14:16:57 +03:00
* @ domain : The domain to managing the interrupts
* @ dev : Pointer to device struct of the device for which the interrupts
* are free
2021-12-07 01:51:07 +03:00
*
* Must be invoked from within a msi_lock_descs ( ) / msi_unlock_descs ( )
* pair . Use this for MSI irqdomains which implement their own vector
* allocation .
2020-08-26 14:16:57 +03:00
*/
2021-12-07 01:51:07 +03:00
void msi_domain_free_irqs_descs_locked ( struct irq_domain * domain , struct device * dev )
2020-08-26 14:16:57 +03:00
{
struct msi_domain_info * info = domain - > host_data ;
struct msi_domain_ops * ops = info - > ops ;
2021-12-07 01:51:07 +03:00
lockdep_assert_held ( & dev - > msi . data - > mutex ) ;
2022-11-25 02:24:19 +03:00
if ( WARN_ON_ONCE ( irq_domain_is_msi_parent ( domain ) ) )
return ;
2021-12-11 01:19:03 +03:00
ops - > domain_free_irqs ( domain , dev ) ;
2022-11-11 16:54:27 +03:00
if ( ops - > msi_post_free )
ops - > msi_post_free ( domain , dev ) ;
2021-12-07 01:51:12 +03:00
msi_domain_free_msi_descs ( info , dev ) ;
2020-08-26 14:16:57 +03:00
}
2021-12-07 01:51:07 +03:00
/**
* msi_domain_free_irqs - Free interrupts from a MSI interrupt @ domain associated to @ dev
* @ domain : The domain to managing the interrupts
* @ dev : Pointer to device struct of the device for which the interrupts
* are free
*/
void msi_domain_free_irqs ( struct irq_domain * domain , struct device * dev )
{
msi_lock_descs ( dev ) ;
msi_domain_free_irqs_descs_locked ( domain , dev ) ;
msi_unlock_descs ( dev ) ;
}
2014-11-12 13:39:03 +03:00
/**
* msi_get_domain_info - Get the MSI interrupt domain info for @ domain
* @ domain : The interrupt domain to retrieve data from
*
2021-08-11 02:48:35 +03:00
* Return : the pointer to the msi_domain_info stored in @ domain - > host_data .
2014-11-12 13:39:03 +03:00
*/
struct msi_domain_info * msi_get_domain_info ( struct irq_domain * domain )
{
return ( struct msi_domain_info * ) domain - > host_data ;
}