2017-11-24 15:00:36 +01:00
// SPDX-License-Identifier: GPL-2.0
2012-11-29 12:50:30 +01:00
/*
* Copyright IBM Corp . 2012
*
* Author ( s ) :
* Jan Glauber < jang @ linux . vnet . ibm . com >
*
* The System z PCI code is a rewrite from a prototype by
* the following people ( Kudoz ! ) :
2012-12-06 14:06:28 +01:00
* Alexander Schmidt
* Christoph Raisch
* Hannes Hering
* Hoang - Nam Nguyen
* Jan - Bernd Themann
* Stefan Roscher
* Thomas Klein
2012-11-29 12:50:30 +01:00
*/
2014-07-16 17:21:01 +02:00
# define KMSG_COMPONENT "zpci"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2012-11-29 12:50:30 +01:00
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/err.h>
# include <linux/export.h>
# include <linux/delay.h>
# include <linux/seq_file.h>
2019-04-14 15:38:01 +02:00
# include <linux/jump_label.h>
2012-11-29 12:50:30 +01:00
# include <linux/pci.h>
2019-11-28 09:30:00 +01:00
# include <linux/printk.h>
2012-11-29 12:50:30 +01:00
2012-11-29 13:05:05 +01:00
# include <asm/isc.h>
# include <asm/airq.h>
2012-11-29 12:50:30 +01:00
# include <asm/facility.h>
# include <asm/pci_insn.h>
2012-11-29 12:55:21 +01:00
# include <asm/pci_clp.h>
2012-11-29 14:33:30 +01:00
# include <asm/pci_dma.h>
2012-11-29 12:50:30 +01:00
2020-03-23 10:45:43 +01:00
# include "pci_bus.h"
2020-08-17 10:29:23 +02:00
# include "pci_iov.h"
2020-03-23 10:45:43 +01:00
2012-11-29 12:50:30 +01:00
/* list of all detected zpci devices */
2013-08-29 19:33:16 +02:00
static LIST_HEAD ( zpci_list ) ;
2013-08-29 19:40:01 +02:00
static DEFINE_SPINLOCK ( zpci_list_lock ) ;
2012-11-29 12:50:30 +01:00
2020-03-17 12:59:37 +01:00
static DECLARE_BITMAP ( zpci_domain , ZPCI_DOMAIN_BITMAP_SIZE ) ;
2013-06-27 09:01:09 +02:00
static DEFINE_SPINLOCK ( zpci_domain_lock ) ;
2012-11-29 13:05:05 +01:00
2016-01-22 14:01:44 +01:00
# define ZPCI_IOMAP_ENTRIES \
2019-09-28 02:43:08 +03:00
min ( ( ( unsigned long ) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2 ) , \
2016-01-22 14:01:44 +01:00
ZPCI_IOMAP_MAX_ENTRIES )
2020-02-07 13:35:08 +01:00
unsigned int s390_pci_no_rid ;
2012-11-29 12:50:30 +01:00
static DEFINE_SPINLOCK ( zpci_iomap_lock ) ;
2016-01-22 14:01:44 +01:00
static unsigned long * zpci_iomap_bitmap ;
2012-11-29 12:50:30 +01:00
struct zpci_iomap_entry * zpci_iomap_start ;
EXPORT_SYMBOL_GPL ( zpci_iomap_start ) ;
2019-04-14 15:38:01 +02:00
DEFINE_STATIC_KEY_FALSE ( have_mio ) ;
2012-12-11 14:53:35 +01:00
static struct kmem_cache * zdev_fmb_cache ;
2012-11-29 12:50:30 +01:00
struct zpci_dev * get_zdev_by_fid ( u32 fid )
{
struct zpci_dev * tmp , * zdev = NULL ;
2013-08-29 19:40:01 +02:00
spin_lock ( & zpci_list_lock ) ;
2012-11-29 12:50:30 +01:00
list_for_each_entry ( tmp , & zpci_list , entry ) {
if ( tmp - > fid = = fid ) {
zdev = tmp ;
break ;
}
}
2013-08-29 19:40:01 +02:00
spin_unlock ( & zpci_list_lock ) ;
2012-11-29 12:50:30 +01:00
return zdev ;
}
2017-06-20 15:56:05 +02:00
void zpci_remove_reserved_devices ( void )
{
struct zpci_dev * tmp , * zdev ;
enum zpci_state state ;
LIST_HEAD ( remove ) ;
spin_lock ( & zpci_list_lock ) ;
list_for_each_entry_safe ( zdev , tmp , & zpci_list , entry ) {
if ( zdev - > state = = ZPCI_FN_STATE_STANDBY & &
! clp_get_state ( zdev - > fid , & state ) & &
state = = ZPCI_FN_STATE_RESERVED )
list_move_tail ( & zdev - > entry , & remove ) ;
}
spin_unlock ( & zpci_list_lock ) ;
list_for_each_entry_safe ( zdev , tmp , & remove , entry )
2020-03-23 10:45:43 +01:00
zpci_zdev_put ( zdev ) ;
2012-11-29 12:50:30 +01:00
}
int pci_domain_nr ( struct pci_bus * bus )
{
2020-03-23 10:45:43 +01:00
return ( ( struct zpci_bus * ) bus - > sysdata ) - > domain_nr ;
2012-11-29 12:50:30 +01:00
}
EXPORT_SYMBOL_GPL ( pci_domain_nr ) ;
int pci_proc_domain ( struct pci_bus * bus )
{
return pci_domain_nr ( bus ) ;
}
EXPORT_SYMBOL_GPL ( pci_proc_domain ) ;
2012-11-29 14:33:30 +01:00
/* Modify PCI: Register I/O address translation parameters */
int zpci_register_ioat ( struct zpci_dev * zdev , u8 dmaas ,
u64 base , u64 limit , u64 iota )
{
2017-06-10 14:10:00 +02:00
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , dmaas , ZPCI_MOD_FC_REG_IOAT ) ;
struct zpci_fib fib = { 0 } ;
2021-07-16 11:53:37 +02:00
u8 cc , status ;
2012-11-29 14:33:30 +01:00
WARN_ON_ONCE ( iota & 0x3fff ) ;
2017-06-10 14:10:00 +02:00
fib . pba = base ;
fib . pal = limit ;
fib . iota = iota | ZPCI_IOTA_RTTO_FLAG ;
2021-07-16 11:53:37 +02:00
cc = zpci_mod_fc ( req , & fib , & status ) ;
if ( cc )
zpci_dbg ( 3 , " reg ioat fid:%x, cc:%d, status:%d \n " , zdev - > fid , cc , status ) ;
return cc ;
2012-11-29 14:33:30 +01:00
}
/* Modify PCI: Unregister I/O address translation parameters */
int zpci_unregister_ioat ( struct zpci_dev * zdev , u8 dmaas )
{
2017-06-10 14:10:00 +02:00
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , dmaas , ZPCI_MOD_FC_DEREG_IOAT ) ;
struct zpci_fib fib = { 0 } ;
u8 cc , status ;
2012-11-29 14:33:30 +01:00
2017-06-10 14:10:00 +02:00
cc = zpci_mod_fc ( req , & fib , & status ) ;
2021-07-16 11:53:37 +02:00
if ( cc )
zpci_dbg ( 3 , " unreg ioat fid:%x, cc:%d, status:%d \n " , zdev - > fid , cc , status ) ;
return cc ;
2012-11-29 14:33:30 +01:00
}
2012-12-11 14:53:35 +01:00
/* Modify PCI: Set PCI function measurement parameters */
int zpci_fmb_enable_device ( struct zpci_dev * zdev )
{
2017-06-10 14:12:13 +02:00
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , 0 , ZPCI_MOD_FC_SET_MEASURE ) ;
struct zpci_fib fib = { 0 } ;
u8 cc , status ;
2012-12-11 14:53:35 +01:00
2016-06-15 13:07:51 +02:00
if ( zdev - > fmb | | sizeof ( * zdev - > fmb ) < zdev - > fmb_length )
2012-12-11 14:53:35 +01:00
return - EINVAL ;
2013-02-25 22:09:25 +08:00
zdev - > fmb = kmem_cache_zalloc ( zdev_fmb_cache , GFP_KERNEL ) ;
2012-12-11 14:53:35 +01:00
if ( ! zdev - > fmb )
return - ENOMEM ;
WARN_ON ( ( u64 ) zdev - > fmb & 0xf ) ;
2015-04-10 14:33:08 +02:00
/* reset software counters */
atomic64_set ( & zdev - > allocated_pages , 0 ) ;
atomic64_set ( & zdev - > mapped_pages , 0 ) ;
atomic64_set ( & zdev - > unmapped_pages , 0 ) ;
2017-06-10 14:12:13 +02:00
fib . fmb_addr = virt_to_phys ( zdev - > fmb ) ;
cc = zpci_mod_fc ( req , & fib , & status ) ;
if ( cc ) {
kmem_cache_free ( zdev_fmb_cache , zdev - > fmb ) ;
zdev - > fmb = NULL ;
}
return cc ? - EIO : 0 ;
2012-12-11 14:53:35 +01:00
}
/* Modify PCI: Disable PCI function measurement */
int zpci_fmb_disable_device ( struct zpci_dev * zdev )
{
2017-06-10 14:12:13 +02:00
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , 0 , ZPCI_MOD_FC_SET_MEASURE ) ;
struct zpci_fib fib = { 0 } ;
u8 cc , status ;
2012-12-11 14:53:35 +01:00
if ( ! zdev - > fmb )
return - EINVAL ;
/* Function measurement is disabled if fmb address is zero */
2017-06-10 14:12:13 +02:00
cc = zpci_mod_fc ( req , & fib , & status ) ;
if ( cc = = 3 ) /* Function already gone. */
cc = 0 ;
2012-12-11 14:53:35 +01:00
2017-06-10 14:12:13 +02:00
if ( ! cc ) {
kmem_cache_free ( zdev_fmb_cache , zdev - > fmb ) ;
zdev - > fmb = NULL ;
}
return cc ? - EIO : 0 ;
2012-12-11 14:53:35 +01:00
}
2012-11-29 12:50:30 +01:00
static int zpci_cfg_load ( struct zpci_dev * zdev , int offset , u32 * val , u8 len )
{
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , ZPCI_PCIAS_CFGSPC , len ) ;
u64 data ;
int rc ;
2019-04-14 16:25:54 +02:00
rc = __zpci_load ( & data , req , offset ) ;
2013-04-16 14:17:15 +02:00
if ( ! rc ) {
2016-12-17 15:35:39 +01:00
data = le64_to_cpu ( ( __force __le64 ) data ) ;
data > > = ( 8 - len ) * 8 ;
2012-11-29 12:50:30 +01:00
* val = ( u32 ) data ;
2013-04-16 14:17:15 +02:00
} else
2012-11-29 12:50:30 +01:00
* val = 0xffffffff ;
return rc ;
}
static int zpci_cfg_store ( struct zpci_dev * zdev , int offset , u32 val , u8 len )
{
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , ZPCI_PCIAS_CFGSPC , len ) ;
u64 data = val ;
int rc ;
2016-12-17 15:35:39 +01:00
data < < = ( 8 - len ) * 8 ;
data = ( __force u64 ) cpu_to_le64 ( data ) ;
2019-04-14 16:25:54 +02:00
rc = __zpci_store ( data , req , offset ) ;
2012-11-29 12:50:30 +01:00
return rc ;
}
resource_size_t pcibios_align_resource ( void * data , const struct resource * res ,
resource_size_t size ,
resource_size_t align )
{
return 0 ;
}
2012-12-06 14:30:28 +01:00
/* combine single writes by using store-block insn */
void __iowrite64_copy ( void __iomem * to , const void * from , size_t count )
{
zpci_memcpy_toio ( to , from , count ) ;
}
2020-07-13 14:12:49 +02:00
static void __iomem * __ioremap ( phys_addr_t addr , size_t size , pgprot_t prot )
2019-04-14 15:38:01 +02:00
{
2020-02-28 10:27:22 +01:00
unsigned long offset , vaddr ;
2019-04-14 15:38:01 +02:00
struct vm_struct * area ;
2020-02-28 10:27:22 +01:00
phys_addr_t last_addr ;
2019-04-14 15:38:01 +02:00
2020-02-28 10:27:22 +01:00
last_addr = addr + size - 1 ;
if ( ! size | | last_addr < addr )
2019-04-14 15:38:01 +02:00
return NULL ;
if ( ! static_branch_unlikely ( & have_mio ) )
2020-02-28 10:27:22 +01:00
return ( void __iomem * ) addr ;
2019-04-14 15:38:01 +02:00
2020-02-28 10:27:22 +01:00
offset = addr & ~ PAGE_MASK ;
addr & = PAGE_MASK ;
2019-04-14 15:38:01 +02:00
size = PAGE_ALIGN ( size + offset ) ;
area = get_vm_area ( size , VM_IOREMAP ) ;
if ( ! area )
return NULL ;
2020-02-28 10:27:22 +01:00
vaddr = ( unsigned long ) area - > addr ;
2020-07-13 14:12:49 +02:00
if ( ioremap_page_range ( vaddr , vaddr + size , addr , prot ) ) {
2020-02-28 10:27:22 +01:00
free_vm_area ( area ) ;
2019-04-14 15:38:01 +02:00
return NULL ;
}
return ( void __iomem * ) ( ( unsigned long ) area - > addr + offset ) ;
}
2020-07-13 14:12:49 +02:00
void __iomem * ioremap_prot ( phys_addr_t addr , size_t size , unsigned long prot )
{
return __ioremap ( addr , size , __pgprot ( prot ) ) ;
}
EXPORT_SYMBOL ( ioremap_prot ) ;
void __iomem * ioremap ( phys_addr_t addr , size_t size )
{
return __ioremap ( addr , size , PAGE_KERNEL ) ;
}
2019-04-14 15:38:01 +02:00
EXPORT_SYMBOL ( ioremap ) ;
2020-07-13 14:12:49 +02:00
void __iomem * ioremap_wc ( phys_addr_t addr , size_t size )
{
return __ioremap ( addr , size , pgprot_writecombine ( PAGE_KERNEL ) ) ;
}
EXPORT_SYMBOL ( ioremap_wc ) ;
void __iomem * ioremap_wt ( phys_addr_t addr , size_t size )
{
return __ioremap ( addr , size , pgprot_writethrough ( PAGE_KERNEL ) ) ;
}
EXPORT_SYMBOL ( ioremap_wt ) ;
2019-04-14 15:38:01 +02:00
void iounmap ( volatile void __iomem * addr )
{
if ( static_branch_likely ( & have_mio ) )
vunmap ( ( __force void * ) ( ( unsigned long ) addr & PAGE_MASK ) ) ;
}
EXPORT_SYMBOL ( iounmap ) ;
2012-11-29 12:50:30 +01:00
/* Create a virtual mapping cookie for a PCI BAR */
2019-04-14 15:38:01 +02:00
static void __iomem * pci_iomap_range_fh ( struct pci_dev * pdev , int bar ,
unsigned long offset , unsigned long max )
2012-11-29 12:50:30 +01:00
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2012-11-29 12:50:30 +01:00
int idx ;
idx = zdev - > bars [ bar ] . map_idx ;
spin_lock ( & zpci_iomap_lock ) ;
2013-05-29 11:52:21 +09:30
/* Detect overrun */
2016-01-22 14:11:21 +01:00
WARN_ON ( ! + + zpci_iomap_start [ idx ] . count ) ;
zpci_iomap_start [ idx ] . fh = zdev - > fh ;
zpci_iomap_start [ idx ] . bar = bar ;
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
2016-01-22 13:58:42 +01:00
return ( void __iomem * ) ZPCI_ADDR ( idx ) + offset ;
2012-11-29 12:50:30 +01:00
}
2019-04-14 15:38:01 +02:00
static void __iomem * pci_iomap_range_mio ( struct pci_dev * pdev , int bar ,
unsigned long offset ,
unsigned long max )
{
unsigned long barsize = pci_resource_len ( pdev , bar ) ;
struct zpci_dev * zdev = to_zpci ( pdev ) ;
void __iomem * iova ;
iova = ioremap ( ( unsigned long ) zdev - > bars [ bar ] . mio_wt , barsize ) ;
return iova ? iova + offset : iova ;
}
void __iomem * pci_iomap_range ( struct pci_dev * pdev , int bar ,
unsigned long offset , unsigned long max )
{
2019-09-28 02:43:08 +03:00
if ( bar > = PCI_STD_NUM_BARS | | ! pci_resource_len ( pdev , bar ) )
2019-04-14 15:38:01 +02:00
return NULL ;
if ( static_branch_likely ( & have_mio ) )
return pci_iomap_range_mio ( pdev , bar , offset , max ) ;
else
return pci_iomap_range_fh ( pdev , bar , offset , max ) ;
}
2015-02-27 16:43:55 +01:00
EXPORT_SYMBOL ( pci_iomap_range ) ;
2013-05-29 11:52:21 +09:30
void __iomem * pci_iomap ( struct pci_dev * dev , int bar , unsigned long maxlen )
{
return pci_iomap_range ( dev , bar , 0 , maxlen ) ;
}
EXPORT_SYMBOL ( pci_iomap ) ;
2012-11-29 12:50:30 +01:00
2019-04-14 15:38:01 +02:00
static void __iomem * pci_iomap_wc_range_mio ( struct pci_dev * pdev , int bar ,
unsigned long offset , unsigned long max )
{
unsigned long barsize = pci_resource_len ( pdev , bar ) ;
struct zpci_dev * zdev = to_zpci ( pdev ) ;
void __iomem * iova ;
iova = ioremap ( ( unsigned long ) zdev - > bars [ bar ] . mio_wb , barsize ) ;
return iova ? iova + offset : iova ;
}
void __iomem * pci_iomap_wc_range ( struct pci_dev * pdev , int bar ,
unsigned long offset , unsigned long max )
{
2019-09-28 02:43:08 +03:00
if ( bar > = PCI_STD_NUM_BARS | | ! pci_resource_len ( pdev , bar ) )
2019-04-14 15:38:01 +02:00
return NULL ;
if ( static_branch_likely ( & have_mio ) )
return pci_iomap_wc_range_mio ( pdev , bar , offset , max ) ;
else
return pci_iomap_range_fh ( pdev , bar , offset , max ) ;
}
EXPORT_SYMBOL ( pci_iomap_wc_range ) ;
void __iomem * pci_iomap_wc ( struct pci_dev * dev , int bar , unsigned long maxlen )
{
return pci_iomap_wc_range ( dev , bar , 0 , maxlen ) ;
}
EXPORT_SYMBOL ( pci_iomap_wc ) ;
static void pci_iounmap_fh ( struct pci_dev * pdev , void __iomem * addr )
2012-11-29 12:50:30 +01:00
{
2016-01-22 13:58:42 +01:00
unsigned int idx = ZPCI_IDX ( addr ) ;
2012-11-29 12:50:30 +01:00
spin_lock ( & zpci_iomap_lock ) ;
2013-05-29 11:52:21 +09:30
/* Detect underrun */
2016-01-22 14:11:21 +01:00
WARN_ON ( ! zpci_iomap_start [ idx ] . count ) ;
2013-05-29 11:52:21 +09:30
if ( ! - - zpci_iomap_start [ idx ] . count ) {
zpci_iomap_start [ idx ] . fh = 0 ;
zpci_iomap_start [ idx ] . bar = 0 ;
}
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
}
2019-04-14 15:38:01 +02:00
static void pci_iounmap_mio ( struct pci_dev * pdev , void __iomem * addr )
{
iounmap ( addr ) ;
}
void pci_iounmap ( struct pci_dev * pdev , void __iomem * addr )
{
if ( static_branch_likely ( & have_mio ) )
pci_iounmap_mio ( pdev , addr ) ;
else
pci_iounmap_fh ( pdev , addr ) ;
}
2015-02-27 16:43:55 +01:00
EXPORT_SYMBOL ( pci_iounmap ) ;
2012-11-29 12:50:30 +01:00
static int pci_read ( struct pci_bus * bus , unsigned int devfn , int where ,
int size , u32 * val )
{
2020-04-22 15:15:23 +02:00
struct zpci_dev * zdev = get_zdev_by_bus ( bus , devfn ) ;
2012-11-29 12:50:30 +01:00
2020-04-22 15:15:23 +02:00
return ( zdev ) ? zpci_cfg_load ( zdev , where , val , size ) : - ENODEV ;
2012-11-29 12:50:30 +01:00
}
static int pci_write ( struct pci_bus * bus , unsigned int devfn , int where ,
int size , u32 val )
{
2020-04-22 15:15:23 +02:00
struct zpci_dev * zdev = get_zdev_by_bus ( bus , devfn ) ;
2012-11-29 12:50:30 +01:00
2020-04-22 15:15:23 +02:00
return ( zdev ) ? zpci_cfg_store ( zdev , where , val , size ) : - ENODEV ;
2012-11-29 12:50:30 +01:00
}
static struct pci_ops pci_root_ops = {
. read = pci_read ,
. write = pci_write ,
} ;
2015-02-27 16:43:21 +01:00
static void zpci_map_resources ( struct pci_dev * pdev )
2012-11-29 12:50:30 +01:00
{
2019-04-14 15:38:01 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2012-11-29 12:50:30 +01:00
resource_size_t len ;
int i ;
2019-09-28 02:43:08 +03:00
for ( i = 0 ; i < PCI_STD_NUM_BARS ; i + + ) {
2012-11-29 12:50:30 +01:00
len = pci_resource_len ( pdev , i ) ;
if ( ! len )
continue ;
2019-04-14 15:38:01 +02:00
2019-06-27 15:13:05 +02:00
if ( zpci_use_mio ( zdev ) )
2019-04-14 15:38:01 +02:00
pdev - > resource [ i ] . start =
2020-02-27 12:17:18 +01:00
( resource_size_t __force ) zdev - > bars [ i ] . mio_wt ;
2019-04-14 15:38:01 +02:00
else
2019-06-27 15:13:05 +02:00
pdev - > resource [ i ] . start = ( resource_size_t __force )
pci_iomap_range_fh ( pdev , i , 0 , 0 ) ;
2012-11-29 12:50:30 +01:00
pdev - > resource [ i ] . end = pdev - > resource [ i ] . start + len - 1 ;
}
2018-09-12 12:47:37 +02:00
2020-08-17 10:29:23 +02:00
zpci_iov_map_resources ( pdev ) ;
2013-06-05 16:06:16 +02:00
}
2015-02-27 16:43:21 +01:00
static void zpci_unmap_resources ( struct pci_dev * pdev )
2013-06-05 16:06:16 +02:00
{
2019-06-27 15:13:05 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2013-06-05 16:06:16 +02:00
resource_size_t len ;
int i ;
2019-06-27 15:13:05 +02:00
if ( zpci_use_mio ( zdev ) )
2019-04-14 15:38:01 +02:00
return ;
2019-09-28 02:43:08 +03:00
for ( i = 0 ; i < PCI_STD_NUM_BARS ; i + + ) {
2013-06-05 16:06:16 +02:00
len = pci_resource_len ( pdev , i ) ;
if ( ! len )
continue ;
2019-06-27 15:13:05 +02:00
pci_iounmap_fh ( pdev , ( void __iomem __force * )
pdev - > resource [ i ] . start ) ;
2013-06-05 16:06:16 +02:00
}
}
2012-11-29 12:50:30 +01:00
static int zpci_alloc_iomap ( struct zpci_dev * zdev )
{
2016-01-22 13:59:35 +01:00
unsigned long entry ;
2012-11-29 12:50:30 +01:00
spin_lock ( & zpci_iomap_lock ) ;
2016-01-22 14:01:44 +01:00
entry = find_first_zero_bit ( zpci_iomap_bitmap , ZPCI_IOMAP_ENTRIES ) ;
if ( entry = = ZPCI_IOMAP_ENTRIES ) {
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
return - ENOSPC ;
}
2016-01-22 14:01:44 +01:00
set_bit ( entry , zpci_iomap_bitmap ) ;
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
return entry ;
}
static void zpci_free_iomap ( struct zpci_dev * zdev , int entry )
{
spin_lock ( & zpci_iomap_lock ) ;
memset ( & zpci_iomap_start [ entry ] , 0 , sizeof ( struct zpci_iomap_entry ) ) ;
2016-01-22 14:01:44 +01:00
clear_bit ( entry , zpci_iomap_bitmap ) ;
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
}
2013-11-12 19:33:06 +01:00
static struct resource * __alloc_res ( struct zpci_dev * zdev , unsigned long start ,
unsigned long size , unsigned long flags )
{
struct resource * r ;
r = kzalloc ( sizeof ( * r ) , GFP_KERNEL ) ;
if ( ! r )
return NULL ;
r - > start = start ;
r - > end = r - > start + size - 1 ;
r - > flags = flags ;
r - > name = zdev - > res_name ;
if ( request_resource ( & iomem_resource , r ) ) {
kfree ( r ) ;
return NULL ;
}
return r ;
}
2020-03-23 10:45:43 +01:00
int zpci_setup_bus_resources ( struct zpci_dev * zdev ,
struct list_head * resources )
2013-11-12 19:33:06 +01:00
{
unsigned long addr , size , flags ;
struct resource * res ;
int i , entry ;
snprintf ( zdev - > res_name , sizeof ( zdev - > res_name ) ,
2020-03-23 10:45:43 +01:00
" PCI Bus %04x:%02x " , zdev - > uid , ZPCI_BUS_NR ) ;
2013-11-12 19:33:06 +01:00
2019-09-28 02:43:08 +03:00
for ( i = 0 ; i < PCI_STD_NUM_BARS ; i + + ) {
2013-11-12 19:33:06 +01:00
if ( ! zdev - > bars [ i ] . size )
continue ;
entry = zpci_alloc_iomap ( zdev ) ;
if ( entry < 0 )
return entry ;
zdev - > bars [ i ] . map_idx = entry ;
/* only MMIO is supported */
flags = IORESOURCE_MEM ;
if ( zdev - > bars [ i ] . val & 8 )
flags | = IORESOURCE_PREFETCH ;
if ( zdev - > bars [ i ] . val & 4 )
flags | = IORESOURCE_MEM_64 ;
2019-06-27 15:13:05 +02:00
if ( zpci_use_mio ( zdev ) )
2020-02-27 12:17:18 +01:00
addr = ( unsigned long ) zdev - > bars [ i ] . mio_wt ;
2019-05-16 14:19:51 +02:00
else
addr = ZPCI_ADDR ( entry ) ;
2013-11-12 19:33:06 +01:00
size = 1UL < < zdev - > bars [ i ] . size ;
res = __alloc_res ( zdev , addr , size , flags ) ;
if ( ! res ) {
zpci_free_iomap ( zdev , entry ) ;
return - ENOMEM ;
}
zdev - > bars [ i ] . res = res ;
pci_add_resource ( resources , res ) ;
}
2021-02-12 14:19:31 +01:00
zdev - > has_resources = 1 ;
2013-11-12 19:33:06 +01:00
return 0 ;
}
static void zpci_cleanup_bus_resources ( struct zpci_dev * zdev )
{
int i ;
2019-09-28 02:43:08 +03:00
for ( i = 0 ; i < PCI_STD_NUM_BARS ; i + + ) {
2015-07-28 19:10:45 +02:00
if ( ! zdev - > bars [ i ] . size | | ! zdev - > bars [ i ] . res )
2013-11-12 19:33:06 +01:00
continue ;
zpci_free_iomap ( zdev , zdev - > bars [ i ] . map_idx ) ;
release_resource ( zdev - > bars [ i ] . res ) ;
kfree ( zdev - > bars [ i ] . res ) ;
}
2021-02-12 14:19:31 +01:00
zdev - > has_resources = 0 ;
2013-11-12 19:33:06 +01:00
}
2013-04-16 14:13:21 +02:00
int pcibios_add_device ( struct pci_dev * pdev )
{
2021-08-06 12:11:16 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2013-08-29 19:34:37 +02:00
struct resource * res ;
int i ;
2021-08-06 12:11:16 +02:00
/* The pdev has a reference to the zdev via its bus */
zpci_zdev_get ( zdev ) ;
2018-12-21 15:14:20 +01:00
if ( pdev - > is_physfn )
pdev - > no_vf_scan = 1 ;
2014-04-30 14:50:09 -06:00
pdev - > dev . groups = zpci_attr_groups ;
2017-01-20 13:04:02 -08:00
pdev - > dev . dma_ops = & s390_pci_dma_ops ;
2015-02-27 16:43:21 +01:00
zpci_map_resources ( pdev ) ;
2013-08-29 19:34:37 +02:00
2019-09-28 02:43:08 +03:00
for ( i = 0 ; i < PCI_STD_NUM_BARS ; i + + ) {
2013-08-29 19:34:37 +02:00
res = & pdev - > resource [ i ] ;
if ( res - > parent | | ! res - > flags )
continue ;
pci_claim_resource ( pdev , i ) ;
}
return 0 ;
}
2015-02-27 16:43:21 +01:00
void pcibios_release_device ( struct pci_dev * pdev )
{
2021-08-06 12:11:16 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2015-02-27 16:43:21 +01:00
zpci_unmap_resources ( pdev ) ;
2021-08-06 12:11:16 +02:00
zpci_zdev_put ( zdev ) ;
2015-02-27 16:43:21 +01:00
}
2013-08-29 19:34:37 +02:00
int pcibios_enable_device ( struct pci_dev * pdev , int mask )
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2013-04-16 14:13:21 +02:00
2016-01-29 15:13:30 +01:00
zpci_debug_init_device ( zdev , dev_name ( & pdev - > dev ) ) ;
2013-04-16 14:13:21 +02:00
zpci_fmb_enable_device ( zdev ) ;
2014-02-26 15:30:24 -07:00
return pci_enable_resources ( pdev , mask ) ;
2013-04-16 14:13:21 +02:00
}
2013-08-29 19:34:37 +02:00
void pcibios_disable_device ( struct pci_dev * pdev )
2013-06-05 16:06:16 +02:00
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2013-06-05 16:06:16 +02:00
zpci_fmb_disable_device ( zdev ) ;
zpci_debug_exit_device ( zdev ) ;
}
2020-03-23 10:45:43 +01:00
static int __zpci_register_domain ( int domain )
2012-11-29 12:50:30 +01:00
{
2020-03-17 12:59:37 +01:00
spin_lock ( & zpci_domain_lock ) ;
2020-03-23 10:45:43 +01:00
if ( test_bit ( domain , zpci_domain ) ) {
2020-03-17 12:59:37 +01:00
spin_unlock ( & zpci_domain_lock ) ;
2020-03-23 10:45:43 +01:00
pr_err ( " Domain %04x is already assigned \n " , domain ) ;
return - EEXIST ;
2020-03-17 12:59:37 +01:00
}
2020-03-23 10:45:43 +01:00
set_bit ( domain , zpci_domain ) ;
spin_unlock ( & zpci_domain_lock ) ;
return domain ;
}
2020-03-17 12:59:37 +01:00
2020-03-23 10:45:43 +01:00
static int __zpci_alloc_domain ( void )
{
int domain ;
2017-06-21 10:20:35 +02:00
2020-03-23 10:45:43 +01:00
spin_lock ( & zpci_domain_lock ) ;
2020-03-17 12:59:37 +01:00
/*
* We can always auto allocate domains below ZPCI_NR_DEVICES .
* There is either a free domain or we have reached the maximum in
* which case we would have bailed earlier .
*/
2020-03-23 10:45:43 +01:00
domain = find_first_zero_bit ( zpci_domain , ZPCI_NR_DEVICES ) ;
set_bit ( domain , zpci_domain ) ;
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_domain_lock ) ;
2020-03-23 10:45:43 +01:00
return domain ;
2012-11-29 12:50:30 +01:00
}
2020-03-23 10:45:43 +01:00
int zpci_alloc_domain ( int domain )
2012-11-29 12:50:30 +01:00
{
2020-03-23 10:45:43 +01:00
if ( zpci_unique_uid ) {
if ( domain )
return __zpci_register_domain ( domain ) ;
pr_warn ( " UID checking was active but no UID is provided: switching to automatic domain allocation \n " ) ;
update_uid_checking ( false ) ;
}
return __zpci_alloc_domain ( ) ;
2012-11-29 12:50:30 +01:00
}
2020-03-23 10:45:43 +01:00
void zpci_free_domain ( int domain )
2013-11-12 19:35:01 +01:00
{
2020-03-23 10:45:43 +01:00
spin_lock ( & zpci_domain_lock ) ;
clear_bit ( domain , zpci_domain ) ;
spin_unlock ( & zpci_domain_lock ) ;
2013-11-12 19:35:01 +01:00
}
2012-11-29 12:55:21 +01:00
int zpci_enable_device ( struct zpci_dev * zdev )
{
2021-07-22 11:44:08 +02:00
u32 fh = zdev - > fh ;
2021-07-16 11:53:37 +02:00
int rc = 0 ;
2012-11-29 14:33:30 +01:00
2021-07-16 11:53:37 +02:00
if ( clp_enable_fh ( zdev , & fh , ZPCI_NR_DMA_SPACES ) )
2021-07-21 19:58:54 +02:00
rc = - EIO ;
2021-07-16 11:53:37 +02:00
else
zdev - > fh = fh ;
2012-11-29 12:55:21 +01:00
return rc ;
}
2013-04-16 14:12:17 +02:00
int zpci_disable_device ( struct zpci_dev * zdev )
{
2021-07-22 11:44:08 +02:00
u32 fh = zdev - > fh ;
2021-07-22 12:38:29 +02:00
int cc , rc = 0 ;
2021-07-22 11:44:08 +02:00
cc = clp_disable_fh ( zdev , & fh ) ;
if ( ! cc ) {
zdev - > fh = fh ;
} else if ( cc = = CLP_RC_SETPCIFN_ALRDY ) {
2021-07-22 12:38:29 +02:00
pr_info ( " Disabling PCI function %08x had no effect as it was already disabled \n " ,
zdev - > fid ) ;
/* Function is already disabled - update handle */
2021-07-22 11:44:08 +02:00
rc = clp_refresh_fh ( zdev - > fid , & fh ) ;
if ( ! rc ) {
zdev - > fh = fh ;
2021-07-22 12:38:29 +02:00
rc = - EINVAL ;
2021-07-22 11:44:08 +02:00
}
} else {
2021-07-22 12:38:29 +02:00
rc = - EIO ;
}
return rc ;
2013-04-16 14:12:17 +02:00
}
2020-07-22 16:53:54 +02:00
/**
* zpci_create_device ( ) - Create a new zpci_dev and add it to the zbus
* @ fid : Function ID of the device to be created
* @ fh : Current Function Handle of the device to be created
* @ state : Initial state after creation either Standby or Configured
*
* Creates a new zpci device and adds it to its , possibly newly created , zbus
* as well as zpci_list .
*
2021-02-12 11:57:58 +01:00
* Returns : the zdev on success or an error pointer otherwise
2020-07-22 16:53:54 +02:00
*/
2021-02-12 11:57:58 +01:00
struct zpci_dev * zpci_create_device ( u32 fid , u32 fh , enum zpci_state state )
2012-11-29 12:50:30 +01:00
{
2020-07-22 16:53:54 +02:00
struct zpci_dev * zdev ;
2012-11-29 12:50:30 +01:00
int rc ;
2020-07-22 16:53:54 +02:00
zpci_dbg ( 3 , " add fid:%x, fh:%x, c:%d \n " , fid , fh , state ) ;
zdev = kzalloc ( sizeof ( * zdev ) , GFP_KERNEL ) ;
if ( ! zdev )
2021-02-12 11:57:58 +01:00
return ERR_PTR ( - ENOMEM ) ;
2020-03-23 10:45:43 +01:00
2020-07-22 16:53:54 +02:00
/* FID and Function Handle are the static/dynamic identifiers */
zdev - > fid = fid ;
zdev - > fh = fh ;
2012-11-29 12:50:30 +01:00
2020-07-22 16:53:54 +02:00
/* Query function properties and update zdev */
rc = clp_query_pci_fn ( zdev ) ;
2017-04-27 14:44:06 +02:00
if ( rc )
2020-07-22 16:53:54 +02:00
goto error ;
zdev - > state = state ;
2017-04-27 14:44:06 +02:00
2020-07-22 16:53:54 +02:00
kref_init ( & zdev - > kref ) ;
2015-04-10 14:34:33 +02:00
mutex_init ( & zdev - > lock ) ;
2020-07-22 16:53:54 +02:00
rc = zpci_init_iommu ( zdev ) ;
if ( rc )
goto error ;
2020-03-23 10:45:43 +01:00
rc = zpci_bus_device_register ( zdev , & pci_root_ops ) ;
2012-11-29 12:50:30 +01:00
if ( rc )
2021-02-12 14:19:31 +01:00
goto error_destroy_iommu ;
2020-07-22 16:53:54 +02:00
spin_lock ( & zpci_list_lock ) ;
list_add_tail ( & zdev - > entry , & zpci_list ) ;
spin_unlock ( & zpci_list_lock ) ;
2012-11-29 12:50:30 +01:00
2021-02-12 11:57:58 +01:00
return zdev ;
2012-11-29 12:50:30 +01:00
2020-07-22 16:53:54 +02:00
error_destroy_iommu :
2017-04-27 14:44:06 +02:00
zpci_destroy_iommu ( zdev ) ;
2020-07-22 16:53:54 +02:00
error :
zpci_dbg ( 0 , " add fid:%x, rc:%d \n " , fid , rc ) ;
kfree ( zdev ) ;
2021-02-12 11:57:58 +01:00
return ERR_PTR ( rc ) ;
2012-11-29 12:50:30 +01:00
}
2020-11-03 10:41:20 +01:00
/**
2021-04-09 14:08:50 +02:00
* zpci_scan_configured_device ( ) - Scan a freshly configured zpci_dev
2020-11-03 10:41:20 +01:00
* @ zdev : The zpci_dev to be configured
* @ fh : The general function handle supplied by the platform
*
2021-03-26 13:58:48 +01:00
* Given a device in the configuration state Configured , enables , scans and
2021-04-09 14:08:50 +02:00
* adds it to the common code PCI subsystem if possible . If the PCI device is
* parked because we can not yet create a PCI bus because we have not seen
* function 0 , it is ignored but will be scanned once function 0 appears .
* If any failure occurs , the zpci_dev is left disabled .
2020-11-03 10:41:20 +01:00
*
* Return : 0 on success , or an error code otherwise
*/
2021-04-09 14:08:50 +02:00
int zpci_scan_configured_device ( struct zpci_dev * zdev , u32 fh )
2020-11-03 10:41:20 +01:00
{
int rc ;
zdev - > fh = fh ;
/* the PCI function will be scanned once function 0 appears */
if ( ! zdev - > zbus - > bus )
return 0 ;
2021-02-12 14:19:31 +01:00
/* For function 0 on a multi-function bus scan whole bus as we might
* have to pick up existing functions waiting for it to allow creating
* the PCI bus
*/
if ( zdev - > devfn = = 0 & & zdev - > zbus - > multifunction )
rc = zpci_bus_scan_bus ( zdev - > zbus ) ;
else
rc = zpci_bus_scan_device ( zdev ) ;
2020-11-03 10:41:20 +01:00
return rc ;
}
/**
* zpci_deconfigure_device ( ) - Deconfigure a zpci_dev
* @ zdev : The zpci_dev to configure
*
* Deconfigure a zPCI function that is currently configured and possibly known
* to the common code PCI subsystem .
* If any failure occurs the device is left as is .
*
* Return : 0 on success , or an error code otherwise
*/
int zpci_deconfigure_device ( struct zpci_dev * zdev )
{
int rc ;
if ( zdev - > zbus - > bus )
2021-01-26 13:58:28 +01:00
zpci_bus_remove_device ( zdev , false ) ;
2020-11-03 10:41:20 +01:00
2021-07-16 11:53:37 +02:00
if ( zdev - > dma_table ) {
rc = zpci_dma_exit_device ( zdev ) ;
if ( rc )
return rc ;
}
2020-11-03 10:41:20 +01:00
if ( zdev_enabled ( zdev ) ) {
rc = zpci_disable_device ( zdev ) ;
if ( rc )
return rc ;
}
rc = sclp_pci_deconfigure ( zdev - > fid ) ;
zpci_dbg ( 3 , " deconf fid:%x, rc:%d \n " , zdev - > fid , rc ) ;
if ( rc )
return rc ;
zdev - > state = ZPCI_FN_STATE_STANDBY ;
return 0 ;
}
2020-03-23 10:45:43 +01:00
void zpci_release_device ( struct kref * kref )
2017-05-09 12:27:30 +02:00
{
2020-03-23 10:45:43 +01:00
struct zpci_dev * zdev = container_of ( kref , struct zpci_dev , kref ) ;
2021-03-05 14:32:02 +01:00
int ret ;
2020-03-23 10:45:43 +01:00
2020-08-03 17:46:32 +02:00
if ( zdev - > zbus - > bus )
2021-01-26 13:58:28 +01:00
zpci_bus_remove_device ( zdev , false ) ;
2020-04-22 15:15:23 +02:00
2021-07-16 11:53:37 +02:00
if ( zdev - > dma_table )
zpci_dma_exit_device ( zdev ) ;
2021-03-02 14:55:21 +01:00
if ( zdev_enabled ( zdev ) )
2020-03-23 10:45:43 +01:00
zpci_disable_device ( zdev ) ;
2021-03-02 14:55:21 +01:00
switch ( zdev - > state ) {
2021-03-05 14:32:02 +01:00
case ZPCI_FN_STATE_CONFIGURED :
ret = sclp_pci_deconfigure ( zdev - > fid ) ;
zpci_dbg ( 3 , " deconf fid:%x, rc:%d \n " , zdev - > fid , ret ) ;
fallthrough ;
2020-03-23 10:45:43 +01:00
case ZPCI_FN_STATE_STANDBY :
2020-04-22 15:15:23 +02:00
if ( zdev - > has_hp_slot )
2020-03-23 10:45:43 +01:00
zpci_exit_slot ( zdev ) ;
2021-08-06 10:28:40 +02:00
if ( zdev - > has_resources )
zpci_cleanup_bus_resources ( zdev ) ;
2020-04-22 15:15:23 +02:00
zpci_bus_device_unregister ( zdev ) ;
zpci_destroy_iommu ( zdev ) ;
2020-03-23 10:45:43 +01:00
fallthrough ;
default :
break ;
}
2017-05-09 12:27:30 +02:00
2020-03-23 10:45:43 +01:00
spin_lock ( & zpci_list_lock ) ;
list_del ( & zdev - > entry ) ;
spin_unlock ( & zpci_list_lock ) ;
zpci_dbg ( 3 , " rem fid:%x \n " , zdev - > fid ) ;
kfree ( zdev ) ;
2017-05-09 12:27:30 +02:00
}
2016-07-18 14:05:21 +02:00
int zpci_report_error ( struct pci_dev * pdev ,
struct zpci_report_error_header * report )
{
struct zpci_dev * zdev = to_zpci ( pdev ) ;
return sclp_pci_report ( report , zdev - > fh , zdev - > fid ) ;
}
EXPORT_SYMBOL ( zpci_report_error ) ;
2012-11-29 12:50:30 +01:00
static int zpci_mem_init ( void )
{
2016-03-14 15:47:23 +01:00
BUILD_BUG_ON ( ! is_power_of_2 ( __alignof__ ( struct zpci_fmb ) ) | |
__alignof__ ( struct zpci_fmb ) < sizeof ( struct zpci_fmb ) ) ;
2012-12-11 14:53:35 +01:00
zdev_fmb_cache = kmem_cache_create ( " PCI_FMB_cache " , sizeof ( struct zpci_fmb ) ,
2016-03-14 15:47:23 +01:00
__alignof__ ( struct zpci_fmb ) , 0 , NULL ) ;
2012-12-11 14:53:35 +01:00
if ( ! zdev_fmb_cache )
2016-01-22 14:01:44 +01:00
goto error_fmb ;
2012-12-11 14:53:35 +01:00
2016-01-22 14:01:44 +01:00
zpci_iomap_start = kcalloc ( ZPCI_IOMAP_ENTRIES ,
sizeof ( * zpci_iomap_start ) , GFP_KERNEL ) ;
2012-11-29 12:50:30 +01:00
if ( ! zpci_iomap_start )
2012-11-29 13:05:05 +01:00
goto error_iomap ;
2012-11-29 12:50:30 +01:00
2016-01-22 14:01:44 +01:00
zpci_iomap_bitmap = kcalloc ( BITS_TO_LONGS ( ZPCI_IOMAP_ENTRIES ) ,
sizeof ( * zpci_iomap_bitmap ) , GFP_KERNEL ) ;
if ( ! zpci_iomap_bitmap )
goto error_iomap_bitmap ;
2020-07-13 14:12:49 +02:00
if ( static_branch_likely ( & have_mio ) )
clp_setup_writeback_mio ( ) ;
2016-01-22 14:01:44 +01:00
return 0 ;
error_iomap_bitmap :
kfree ( zpci_iomap_start ) ;
2012-11-29 13:05:05 +01:00
error_iomap :
2012-12-11 14:53:35 +01:00
kmem_cache_destroy ( zdev_fmb_cache ) ;
2016-01-22 14:01:44 +01:00
error_fmb :
2012-11-29 12:50:30 +01:00
return - ENOMEM ;
}
static void zpci_mem_exit ( void )
{
2016-01-22 14:01:44 +01:00
kfree ( zpci_iomap_bitmap ) ;
2012-11-29 12:50:30 +01:00
kfree ( zpci_iomap_start ) ;
2012-12-11 14:53:35 +01:00
kmem_cache_destroy ( zdev_fmb_cache ) ;
2012-11-29 12:50:30 +01:00
}
2019-02-12 16:23:13 +01:00
static unsigned int s390_pci_probe __initdata = 1 ;
2019-02-26 16:07:32 +01:00
unsigned int s390_pci_force_floating __initdata ;
2013-12-12 17:48:32 +01:00
static unsigned int s390_pci_initialized ;
2012-11-29 12:50:30 +01:00
char * __init pcibios_setup ( char * str )
{
2013-12-12 17:55:22 +01:00
if ( ! strcmp ( str , " off " ) ) {
s390_pci_probe = 0 ;
2012-11-29 12:50:30 +01:00
return NULL ;
}
2019-04-18 21:39:06 +02:00
if ( ! strcmp ( str , " nomio " ) ) {
2021-07-08 14:55:42 +02:00
S390_lowcore . machine_flags & = ~ MACHINE_FLAG_PCI_MIO ;
2019-04-18 21:39:06 +02:00
return NULL ;
}
2019-02-26 16:07:32 +01:00
if ( ! strcmp ( str , " force_floating " ) ) {
s390_pci_force_floating = 1 ;
return NULL ;
}
2020-02-07 13:35:08 +01:00
if ( ! strcmp ( str , " norid " ) ) {
s390_pci_no_rid = 1 ;
return NULL ;
}
2012-11-29 12:50:30 +01:00
return str ;
}
2013-12-12 17:48:32 +01:00
bool zpci_is_enabled ( void )
{
return s390_pci_initialized ;
}
2012-11-29 12:50:30 +01:00
static int __init pci_base_init ( void )
{
int rc ;
2013-01-30 15:52:16 +01:00
if ( ! s390_pci_probe )
2012-11-29 12:50:30 +01:00
return 0 ;
2020-10-26 10:01:24 +01:00
if ( ! test_facility ( 69 ) | | ! test_facility ( 71 ) ) {
pr_info ( " PCI is not supported because CPU facilities 69 or 71 are not available \n " ) ;
2012-11-29 12:50:30 +01:00
return 0 ;
2020-10-26 10:01:24 +01:00
}
2012-11-29 12:50:30 +01:00
2021-07-08 14:55:42 +02:00
if ( MACHINE_HAS_PCI_MIO ) {
2019-04-14 15:38:01 +02:00
static_branch_enable ( & have_mio ) ;
2019-07-10 13:08:06 +02:00
ctl_set_bit ( 2 , 5 ) ;
}
2019-04-14 15:38:01 +02:00
2012-12-11 14:53:35 +01:00
rc = zpci_debug_init ( ) ;
if ( rc )
2013-06-27 09:01:09 +02:00
goto out ;
2012-12-11 14:53:35 +01:00
2012-11-29 12:50:30 +01:00
rc = zpci_mem_init ( ) ;
if ( rc )
goto out_mem ;
2012-11-29 13:05:05 +01:00
rc = zpci_irq_init ( ) ;
if ( rc )
goto out_irq ;
2012-11-29 14:33:30 +01:00
rc = zpci_dma_init ( ) ;
if ( rc )
goto out_dma ;
2013-08-29 19:37:28 +02:00
rc = clp_scan_pci_devices ( ) ;
2012-11-29 12:55:21 +01:00
if ( rc )
goto out_find ;
2021-02-12 11:57:58 +01:00
zpci_bus_scan_busses ( ) ;
2012-11-29 12:55:21 +01:00
2013-12-12 17:48:32 +01:00
s390_pci_initialized = 1 ;
2012-11-29 12:50:30 +01:00
return 0 ;
2012-11-29 12:55:21 +01:00
out_find :
2012-11-29 14:33:30 +01:00
zpci_dma_exit ( ) ;
out_dma :
2012-11-29 13:05:05 +01:00
zpci_irq_exit ( ) ;
out_irq :
2012-11-29 12:50:30 +01:00
zpci_mem_exit ( ) ;
out_mem :
2012-12-11 14:53:35 +01:00
zpci_debug_exit ( ) ;
2013-06-27 09:01:09 +02:00
out :
2012-11-29 12:50:30 +01:00
return rc ;
}
2013-08-29 19:33:16 +02:00
subsys_initcall_sync ( pci_base_init ) ;