2019-02-12 18:39:46 +01:00
// SPDX-License-Identifier: GPL-2.0
# define KMSG_COMPONENT "zpci"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
# include <linux/kernel.h>
# include <linux/irq.h>
# include <linux/kernel_stat.h>
# include <linux/pci.h>
# include <linux/msi.h>
2018-09-27 13:57:12 +02:00
# include <linux/smp.h>
2019-02-12 18:39:46 +01:00
# include <asm/isc.h>
# include <asm/airq.h>
2022-06-06 16:33:09 -04:00
# include <asm/tpi.h>
2019-02-12 18:39:46 +01:00
2018-09-27 13:57:12 +02:00
static enum { FLOATING , DIRECTED } irq_delivery ;
2019-02-14 12:56:50 +01:00
/*
2018-09-27 13:57:12 +02:00
* summary bit vector
* FLOATING - summary bit per function
* DIRECTED - summary bit per cpu ( only used in fallback path )
2019-02-14 12:56:50 +01:00
*/
static struct airq_iv * zpci_sbv ;
/*
2018-09-27 13:57:12 +02:00
* interrupt bit vectors
* FLOATING - interrupt bit vector per function
* DIRECTED - interrupt bit vector per cpu
2019-02-14 12:56:50 +01:00
*/
static struct airq_iv * * zpci_ibv ;
2019-02-12 18:39:46 +01:00
2020-12-10 15:28:05 +01:00
/* Modify PCI: Register floating adapter interruptions */
2019-02-12 18:39:46 +01:00
static int zpci_set_airq ( struct zpci_dev * zdev )
{
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , 0 , ZPCI_MOD_FC_REG_INT ) ;
struct zpci_fib fib = { 0 } ;
u8 status ;
2018-09-27 13:57:12 +02:00
fib . fmt0 . isc = PCI_ISC ;
fib . fmt0 . sum = 1 ; /* enable summary notifications */
fib . fmt0 . noi = airq_iv_end ( zdev - > aibv ) ;
2021-11-25 16:42:22 +01:00
fib . fmt0 . aibv = virt_to_phys ( zdev - > aibv - > vector ) ;
2018-09-27 13:57:12 +02:00
fib . fmt0 . aibvo = 0 ; /* each zdev has its own interrupt vector */
2021-11-25 16:42:22 +01:00
fib . fmt0 . aisb = virt_to_phys ( zpci_sbv - > vector ) + ( zdev - > aisb / 64 ) * 8 ;
2018-09-27 13:57:12 +02:00
fib . fmt0 . aisbo = zdev - > aisb & 63 ;
2022-06-06 16:33:12 -04:00
fib . gd = zdev - > gisa ;
2019-02-12 18:39:46 +01:00
return zpci_mod_fc ( req , & fib , & status ) ? - EIO : 0 ;
}
2020-12-10 15:28:05 +01:00
/* Modify PCI: Unregister floating adapter interruptions */
2019-02-12 18:39:46 +01:00
static int zpci_clear_airq ( struct zpci_dev * zdev )
{
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , 0 , ZPCI_MOD_FC_DEREG_INT ) ;
struct zpci_fib fib = { 0 } ;
u8 cc , status ;
2022-06-06 16:33:12 -04:00
fib . gd = zdev - > gisa ;
2019-02-12 18:39:46 +01:00
cc = zpci_mod_fc ( req , & fib , & status ) ;
if ( cc = = 3 | | ( cc = = 1 & & status = = 24 ) )
/* Function already gone or IRQs already deregistered. */
cc = 0 ;
return cc ? - EIO : 0 ;
}
2018-09-27 13:57:12 +02:00
/* Modify PCI: Register CPU directed interruptions */
static int zpci_set_directed_irq ( struct zpci_dev * zdev )
{
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , 0 , ZPCI_MOD_FC_REG_INT_D ) ;
struct zpci_fib fib = { 0 } ;
u8 status ;
fib . fmt = 1 ;
fib . fmt1 . noi = zdev - > msi_nr_irqs ;
fib . fmt1 . dibvo = zdev - > msi_first_bit ;
2022-06-06 16:33:12 -04:00
fib . gd = zdev - > gisa ;
2018-09-27 13:57:12 +02:00
return zpci_mod_fc ( req , & fib , & status ) ? - EIO : 0 ;
}
/* Modify PCI: Unregister CPU directed interruptions */
static int zpci_clear_directed_irq ( struct zpci_dev * zdev )
{
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , 0 , ZPCI_MOD_FC_DEREG_INT_D ) ;
struct zpci_fib fib = { 0 } ;
u8 cc , status ;
fib . fmt = 1 ;
2022-06-06 16:33:12 -04:00
fib . gd = zdev - > gisa ;
2018-09-27 13:57:12 +02:00
cc = zpci_mod_fc ( req , & fib , & status ) ;
if ( cc = = 3 | | ( cc = = 1 & & status = = 24 ) )
/* Function already gone or IRQs already deregistered. */
cc = 0 ;
return cc ? - EIO : 0 ;
}
2020-12-10 15:28:05 +01:00
/* Register adapter interruptions */
2022-03-08 10:49:58 +01:00
static int zpci_set_irq ( struct zpci_dev * zdev )
2020-12-10 15:28:05 +01:00
{
int rc ;
if ( irq_delivery = = DIRECTED )
rc = zpci_set_directed_irq ( zdev ) ;
else
rc = zpci_set_airq ( zdev ) ;
if ( ! rc )
zdev - > irqs_registered = 1 ;
return rc ;
}
/* Clear adapter interruptions */
2022-03-08 10:49:58 +01:00
static int zpci_clear_irq ( struct zpci_dev * zdev )
2020-12-10 15:28:05 +01:00
{
int rc ;
if ( irq_delivery = = DIRECTED )
rc = zpci_clear_directed_irq ( zdev ) ;
else
rc = zpci_clear_airq ( zdev ) ;
if ( ! rc )
zdev - > irqs_registered = 0 ;
return rc ;
}
2018-09-27 13:57:12 +02:00
static int zpci_set_irq_affinity ( struct irq_data * data , const struct cpumask * dest ,
bool force )
{
2022-11-12 19:47:52 +01:00
struct msi_desc * entry = irq_data_get_msi_desc ( data ) ;
2018-09-27 13:57:12 +02:00
struct msi_msg msg = entry - > msg ;
2020-11-26 18:00:37 +01:00
int cpu_addr = smp_cpu_get_cpu_address ( cpumask_first ( dest ) ) ;
2018-09-27 13:57:12 +02:00
msg . address_lo & = 0xff0000ff ;
2020-11-26 18:00:37 +01:00
msg . address_lo | = ( cpu_addr < < 8 ) ;
2018-09-27 13:57:12 +02:00
pci_write_msi_msg ( data - > irq , & msg ) ;
return IRQ_SET_MASK_OK ;
}
2019-02-12 18:39:46 +01:00
static struct irq_chip zpci_irq_chip = {
2017-02-14 18:13:09 +01:00
. name = " PCI-MSI " ,
2019-02-12 18:39:46 +01:00
. irq_unmask = pci_msi_unmask_irq ,
. irq_mask = pci_msi_mask_irq ,
} ;
2018-09-27 13:57:12 +02:00
static void zpci_handle_cpu_local_irq ( bool rescan )
{
struct airq_iv * dibv = zpci_ibv [ smp_processor_id ( ) ] ;
2022-06-06 16:33:11 -04:00
union zpci_sic_iib iib = { { 0 } } ;
2018-09-27 13:57:12 +02:00
unsigned long bit ;
int irqs_on = 0 ;
for ( bit = 0 ; ; ) {
/* Scan the directed IRQ bit vector */
bit = airq_iv_scan ( dibv , bit , airq_iv_end ( dibv ) ) ;
if ( bit = = - 1UL ) {
if ( ! rescan | | irqs_on + + )
/* End of second scan with interrupts on. */
break ;
/* First scan complete, reenable interrupts. */
2022-06-06 16:33:11 -04:00
if ( zpci_set_irq_ctrl ( SIC_IRQ_MODE_D_SINGLE , PCI_ISC , & iib ) )
2018-09-27 13:57:12 +02:00
break ;
bit = 0 ;
continue ;
}
inc_irq_stat ( IRQIO_MSI ) ;
generic_handle_irq ( airq_iv_get_data ( dibv , bit ) ) ;
}
}
struct cpu_irq_data {
call_single_data_t csd ;
atomic_t scheduled ;
} ;
static DEFINE_PER_CPU_SHARED_ALIGNED ( struct cpu_irq_data , irq_data ) ;
static void zpci_handle_remote_irq ( void * data )
{
atomic_t * scheduled = data ;
do {
zpci_handle_cpu_local_irq ( false ) ;
} while ( atomic_dec_return ( scheduled ) ) ;
}
static void zpci_handle_fallback_irq ( void )
{
struct cpu_irq_data * cpu_data ;
2022-06-06 16:33:11 -04:00
union zpci_sic_iib iib = { { 0 } } ;
2018-09-27 13:57:12 +02:00
unsigned long cpu ;
int irqs_on = 0 ;
for ( cpu = 0 ; ; ) {
cpu = airq_iv_scan ( zpci_sbv , cpu , airq_iv_end ( zpci_sbv ) ) ;
if ( cpu = = - 1UL ) {
if ( irqs_on + + )
/* End of second scan with interrupts on. */
break ;
/* First scan complete, reenable interrupts. */
2022-06-06 16:33:11 -04:00
if ( zpci_set_irq_ctrl ( SIC_IRQ_MODE_SINGLE , PCI_ISC , & iib ) )
2018-09-27 13:57:12 +02:00
break ;
cpu = 0 ;
continue ;
}
cpu_data = & per_cpu ( irq_data , cpu ) ;
if ( atomic_inc_return ( & cpu_data - > scheduled ) > 1 )
continue ;
2020-06-15 11:29:31 +02:00
INIT_CSD ( & cpu_data - > csd , zpci_handle_remote_irq , & cpu_data - > scheduled ) ;
2018-09-27 13:57:12 +02:00
smp_call_function_single_async ( cpu , & cpu_data - > csd ) ;
}
}
2022-06-06 16:33:09 -04:00
static void zpci_directed_irq_handler ( struct airq_struct * airq ,
struct tpi_info * tpi_info )
2018-09-27 13:57:12 +02:00
{
2022-06-06 16:33:09 -04:00
bool floating = ! tpi_info - > directed_irq ;
2018-11-22 14:08:33 +01:00
if ( floating ) {
inc_irq_stat ( IRQIO_PCF ) ;
2018-09-27 13:57:12 +02:00
zpci_handle_fallback_irq ( ) ;
2018-11-22 14:08:33 +01:00
} else {
inc_irq_stat ( IRQIO_PCD ) ;
2018-09-27 13:57:12 +02:00
zpci_handle_cpu_local_irq ( true ) ;
2018-11-22 14:08:33 +01:00
}
2018-09-27 13:57:12 +02:00
}
2022-06-06 16:33:09 -04:00
static void zpci_floating_irq_handler ( struct airq_struct * airq ,
struct tpi_info * tpi_info )
2019-02-12 18:39:46 +01:00
{
2022-06-06 16:33:11 -04:00
union zpci_sic_iib iib = { { 0 } } ;
2019-02-12 18:39:46 +01:00
unsigned long si , ai ;
struct airq_iv * aibv ;
int irqs_on = 0 ;
2018-11-22 14:08:33 +01:00
inc_irq_stat ( IRQIO_PCF ) ;
2019-02-12 18:39:46 +01:00
for ( si = 0 ; ; ) {
/* Scan adapter summary indicator bit vector */
2019-02-14 12:56:50 +01:00
si = airq_iv_scan ( zpci_sbv , si , airq_iv_end ( zpci_sbv ) ) ;
2019-02-12 18:39:46 +01:00
if ( si = = - 1UL ) {
if ( irqs_on + + )
/* End of second scan with interrupts on. */
break ;
/* First scan complete, reenable interrupts. */
2022-06-06 16:33:11 -04:00
if ( zpci_set_irq_ctrl ( SIC_IRQ_MODE_SINGLE , PCI_ISC , & iib ) )
2019-02-12 18:39:46 +01:00
break ;
si = 0 ;
continue ;
}
/* Scan the adapter interrupt vector for this device. */
2019-02-14 12:56:50 +01:00
aibv = zpci_ibv [ si ] ;
2019-02-12 18:39:46 +01:00
for ( ai = 0 ; ; ) {
ai = airq_iv_scan ( aibv , ai , airq_iv_end ( aibv ) ) ;
if ( ai = = - 1UL )
break ;
inc_irq_stat ( IRQIO_MSI ) ;
airq_iv_lock ( aibv , ai ) ;
generic_handle_irq ( airq_iv_get_data ( aibv , ai ) ) ;
airq_iv_unlock ( aibv , ai ) ;
}
}
}
int arch_setup_msi_irqs ( struct pci_dev * pdev , int nvec , int type )
{
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2018-09-27 13:57:12 +02:00
unsigned int hwirq , msi_vecs , cpu ;
unsigned long bit ;
2019-02-12 18:39:46 +01:00
struct msi_desc * msi ;
struct msi_msg msg ;
2020-11-26 18:00:37 +01:00
int cpu_addr ;
2019-02-12 18:39:46 +01:00
int rc , irq ;
zdev - > aisb = - 1UL ;
2018-09-27 13:57:12 +02:00
zdev - > msi_first_bit = - 1U ;
2019-02-12 18:39:46 +01:00
if ( type = = PCI_CAP_ID_MSI & & nvec > 1 )
return 1 ;
msi_vecs = min_t ( unsigned int , nvec , zdev - > max_msi ) ;
2018-09-27 13:57:12 +02:00
if ( irq_delivery = = DIRECTED ) {
/* Allocate cpu vector bits */
bit = airq_iv_alloc ( zpci_ibv [ 0 ] , msi_vecs ) ;
if ( bit = = - 1UL )
return - EIO ;
} else {
/* Allocate adapter summary indicator bit */
bit = airq_iv_alloc_bit ( zpci_sbv ) ;
if ( bit = = - 1UL )
return - EIO ;
zdev - > aisb = bit ;
/* Create adapter interrupt vector */
2022-06-06 16:33:10 -04:00
zdev - > aibv = airq_iv_create ( msi_vecs , AIRQ_IV_DATA | AIRQ_IV_BITLOCK , NULL ) ;
2018-09-27 13:57:12 +02:00
if ( ! zdev - > aibv )
return - ENOMEM ;
2019-02-12 18:39:46 +01:00
2018-09-27 13:57:12 +02:00
/* Wire up shortcut pointer */
zpci_ibv [ bit ] = zdev - > aibv ;
/* Each function has its own interrupt vector */
bit = 0 ;
}
2019-02-12 18:39:46 +01:00
/* Request MSI interrupts */
2018-09-27 13:57:12 +02:00
hwirq = bit ;
2021-12-06 23:51:23 +01:00
msi_for_each_desc ( msi , & pdev - > dev , MSI_DESC_NOTASSOCIATED ) {
2018-09-27 13:57:12 +02:00
rc = - EIO ;
if ( hwirq - bit > = msi_vecs )
2019-02-12 18:39:46 +01:00
break ;
2020-04-16 13:44:30 +02:00
irq = __irq_alloc_descs ( - 1 , 0 , 1 , 0 , THIS_MODULE ,
( irq_delivery = = DIRECTED ) ?
msi - > affinity : NULL ) ;
2019-02-12 18:39:46 +01:00
if ( irq < 0 )
return - ENOMEM ;
rc = irq_set_msi_desc ( irq , msi ) ;
if ( rc )
return rc ;
irq_set_chip_and_handler ( irq , & zpci_irq_chip ,
2018-09-27 13:57:12 +02:00
handle_percpu_irq ) ;
2019-09-10 15:45:04 +02:00
msg . data = hwirq - bit ;
2018-09-27 13:57:12 +02:00
if ( irq_delivery = = DIRECTED ) {
2020-11-26 18:00:37 +01:00
if ( msi - > affinity )
cpu = cpumask_first ( & msi - > affinity - > mask ) ;
else
cpu = 0 ;
cpu_addr = smp_cpu_get_cpu_address ( cpu ) ;
2018-09-27 13:57:12 +02:00
msg . address_lo = zdev - > msi_addr & 0xff0000ff ;
2020-11-26 18:00:37 +01:00
msg . address_lo | = ( cpu_addr < < 8 ) ;
2018-09-27 13:57:12 +02:00
for_each_possible_cpu ( cpu ) {
airq_iv_set_data ( zpci_ibv [ cpu ] , hwirq , irq ) ;
}
} else {
msg . address_lo = zdev - > msi_addr & 0xffffffff ;
airq_iv_set_data ( zdev - > aibv , hwirq , irq ) ;
}
2019-02-12 18:39:46 +01:00
msg . address_hi = zdev - > msi_addr > > 32 ;
pci_write_msi_msg ( irq , & msg ) ;
hwirq + + ;
}
2018-09-27 13:57:12 +02:00
zdev - > msi_first_bit = bit ;
zdev - > msi_nr_irqs = msi_vecs ;
2020-12-10 15:28:05 +01:00
rc = zpci_set_irq ( zdev ) ;
2019-02-12 18:39:46 +01:00
if ( rc )
return rc ;
return ( msi_vecs = = nvec ) ? 0 : msi_vecs ;
}
void arch_teardown_msi_irqs ( struct pci_dev * pdev )
{
struct zpci_dev * zdev = to_zpci ( pdev ) ;
struct msi_desc * msi ;
int rc ;
2018-09-27 13:57:12 +02:00
/* Disable interrupts */
2020-12-10 15:28:05 +01:00
rc = zpci_clear_irq ( zdev ) ;
2019-02-12 18:39:46 +01:00
if ( rc )
return ;
/* Release MSI interrupts */
2021-12-06 23:51:23 +01:00
msi_for_each_desc ( msi , & pdev - > dev , MSI_DESC_ASSOCIATED ) {
2019-02-12 18:39:46 +01:00
irq_set_msi_desc ( msi - > irq , NULL ) ;
irq_free_desc ( msi - > irq ) ;
msi - > msg . address_lo = 0 ;
msi - > msg . address_hi = 0 ;
msi - > msg . data = 0 ;
msi - > irq = 0 ;
}
if ( zdev - > aisb ! = - 1UL ) {
2019-02-14 12:56:50 +01:00
zpci_ibv [ zdev - > aisb ] = NULL ;
airq_iv_free_bit ( zpci_sbv , zdev - > aisb ) ;
2019-02-12 18:39:46 +01:00
zdev - > aisb = - 1UL ;
}
if ( zdev - > aibv ) {
airq_iv_release ( zdev - > aibv ) ;
zdev - > aibv = NULL ;
}
2018-09-27 13:57:12 +02:00
if ( ( irq_delivery = = DIRECTED ) & & zdev - > msi_first_bit ! = - 1U )
airq_iv_free ( zpci_ibv [ 0 ] , zdev - > msi_first_bit , zdev - > msi_nr_irqs ) ;
2019-02-12 18:39:46 +01:00
}
2021-12-06 23:27:42 +01:00
bool arch_restore_msi_irqs ( struct pci_dev * pdev )
2021-07-01 15:49:11 +02:00
{
struct zpci_dev * zdev = to_zpci ( pdev ) ;
if ( ! zdev - > irqs_registered )
zpci_set_irq ( zdev ) ;
2021-12-06 23:27:42 +01:00
return true ;
2021-07-01 15:49:11 +02:00
}
2019-02-12 18:39:46 +01:00
static struct airq_struct zpci_airq = {
2018-09-27 13:57:12 +02:00
. handler = zpci_floating_irq_handler ,
2019-02-12 18:39:46 +01:00
. isc = PCI_ISC ,
} ;
2018-09-27 13:57:12 +02:00
static void __init cpu_enable_directed_irq ( void * unused )
2019-02-12 18:39:46 +01:00
{
2018-09-27 13:57:12 +02:00
union zpci_sic_iib iib = { { 0 } } ;
2022-06-06 16:33:11 -04:00
union zpci_sic_iib ziib = { { 0 } } ;
2019-02-12 18:39:46 +01:00
2018-09-27 13:57:12 +02:00
iib . cdiib . dibv_addr = ( u64 ) zpci_ibv [ smp_processor_id ( ) ] - > vector ;
2022-06-06 16:33:11 -04:00
zpci_set_irq_ctrl ( SIC_IRQ_MODE_SET_CPU , 0 , & iib ) ;
zpci_set_irq_ctrl ( SIC_IRQ_MODE_D_SINGLE , PCI_ISC , & ziib ) ;
2018-09-27 13:57:12 +02:00
}
static int __init zpci_directed_irq_init ( void )
{
union zpci_sic_iib iib = { { 0 } } ;
unsigned int cpu ;
2022-06-06 16:33:10 -04:00
zpci_sbv = airq_iv_create ( num_possible_cpus ( ) , 0 , NULL ) ;
2018-09-27 13:57:12 +02:00
if ( ! zpci_sbv )
return - ENOMEM ;
iib . diib . isc = PCI_ISC ;
iib . diib . nr_cpus = num_possible_cpus ( ) ;
2021-11-25 16:42:22 +01:00
iib . diib . disb_addr = virt_to_phys ( zpci_sbv - > vector ) ;
2022-06-06 16:33:11 -04:00
zpci_set_irq_ctrl ( SIC_IRQ_MODE_DIRECT , 0 , & iib ) ;
2018-09-27 13:57:12 +02:00
zpci_ibv = kcalloc ( num_possible_cpus ( ) , sizeof ( * zpci_ibv ) ,
GFP_KERNEL ) ;
if ( ! zpci_ibv )
return - ENOMEM ;
for_each_possible_cpu ( cpu ) {
/*
* Per CPU IRQ vectors look the same but bit - allocation
* is only done on the first vector .
*/
zpci_ibv [ cpu ] = airq_iv_create ( cache_line_size ( ) * BITS_PER_BYTE ,
AIRQ_IV_DATA |
AIRQ_IV_CACHELINE |
2022-06-06 16:33:10 -04:00
( ! cpu ? AIRQ_IV_ALLOC : 0 ) , NULL ) ;
2018-09-27 13:57:12 +02:00
if ( ! zpci_ibv [ cpu ] )
return - ENOMEM ;
}
on_each_cpu ( cpu_enable_directed_irq , NULL , 1 ) ;
2019-02-12 18:39:46 +01:00
2018-09-27 13:57:12 +02:00
zpci_irq_chip . irq_set_affinity = zpci_set_irq_affinity ;
return 0 ;
}
static int __init zpci_floating_irq_init ( void )
{
2019-02-14 12:56:50 +01:00
zpci_ibv = kcalloc ( ZPCI_NR_DEVICES , sizeof ( * zpci_ibv ) , GFP_KERNEL ) ;
if ( ! zpci_ibv )
2018-09-27 13:57:12 +02:00
return - ENOMEM ;
2019-02-12 18:39:46 +01:00
2022-06-06 16:33:10 -04:00
zpci_sbv = airq_iv_create ( ZPCI_NR_DEVICES , AIRQ_IV_ALLOC , NULL ) ;
2019-02-14 12:56:50 +01:00
if ( ! zpci_sbv )
goto out_free ;
2019-02-12 18:39:46 +01:00
return 0 ;
2019-02-14 12:56:50 +01:00
out_free :
kfree ( zpci_ibv ) ;
2018-09-27 13:57:12 +02:00
return - ENOMEM ;
}
int __init zpci_irq_init ( void )
{
2022-06-06 16:33:11 -04:00
union zpci_sic_iib iib = { { 0 } } ;
2018-09-27 13:57:12 +02:00
int rc ;
irq_delivery = sclp . has_dirq ? DIRECTED : FLOATING ;
2019-02-26 16:07:32 +01:00
if ( s390_pci_force_floating )
irq_delivery = FLOATING ;
2018-09-27 13:57:12 +02:00
if ( irq_delivery = = DIRECTED )
zpci_airq . handler = zpci_directed_irq_handler ;
rc = register_adapter_interrupt ( & zpci_airq ) ;
if ( rc )
goto out ;
/* Set summary to 1 to be called every time for the ISC. */
* zpci_airq . lsi_ptr = 1 ;
switch ( irq_delivery ) {
case FLOATING :
rc = zpci_floating_irq_init ( ) ;
break ;
case DIRECTED :
rc = zpci_directed_irq_init ( ) ;
break ;
}
if ( rc )
goto out_airq ;
/*
* Enable floating IRQs ( with suppression after one IRQ ) . When using
* directed IRQs this enables the fallback path .
*/
2022-06-06 16:33:11 -04:00
zpci_set_irq_ctrl ( SIC_IRQ_MODE_SINGLE , PCI_ISC , & iib ) ;
2018-09-27 13:57:12 +02:00
return 0 ;
2019-02-12 18:39:46 +01:00
out_airq :
unregister_adapter_interrupt ( & zpci_airq ) ;
out :
return rc ;
}
void __init zpci_irq_exit ( void )
{
2018-09-27 13:57:12 +02:00
unsigned int cpu ;
if ( irq_delivery = = DIRECTED ) {
for_each_possible_cpu ( cpu ) {
airq_iv_release ( zpci_ibv [ cpu ] ) ;
}
}
2019-02-14 12:56:50 +01:00
kfree ( zpci_ibv ) ;
2018-09-27 13:57:12 +02:00
if ( zpci_sbv )
airq_iv_release ( zpci_sbv ) ;
2019-02-12 18:39:46 +01:00
unregister_adapter_interrupt ( & zpci_airq ) ;
}