2017-11-24 15:00:36 +01:00
// SPDX-License-Identifier: GPL-2.0
2012-11-29 12:50:30 +01:00
/*
* Copyright IBM Corp . 2012
*
* Author ( s ) :
* Jan Glauber < jang @ linux . vnet . ibm . com >
*
* The System z PCI code is a rewrite from a prototype by
* the following people ( Kudoz ! ) :
2012-12-06 14:06:28 +01:00
* Alexander Schmidt
* Christoph Raisch
* Hannes Hering
* Hoang - Nam Nguyen
* Jan - Bernd Themann
* Stefan Roscher
* Thomas Klein
2012-11-29 12:50:30 +01:00
*/
2014-07-16 17:21:01 +02:00
# define KMSG_COMPONENT "zpci"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2012-11-29 12:50:30 +01:00
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/err.h>
# include <linux/export.h>
# include <linux/delay.h>
# include <linux/seq_file.h>
2019-04-14 15:38:01 +02:00
# include <linux/jump_label.h>
2012-11-29 12:50:30 +01:00
# include <linux/pci.h>
2019-11-28 09:30:00 +01:00
# include <linux/printk.h>
2012-11-29 12:50:30 +01:00
2012-11-29 13:05:05 +01:00
# include <asm/isc.h>
# include <asm/airq.h>
2012-11-29 12:50:30 +01:00
# include <asm/facility.h>
# include <asm/pci_insn.h>
2012-11-29 12:55:21 +01:00
# include <asm/pci_clp.h>
2012-11-29 14:33:30 +01:00
# include <asm/pci_dma.h>
2012-11-29 12:50:30 +01:00
/* list of all detected zpci devices */
2013-08-29 19:33:16 +02:00
static LIST_HEAD ( zpci_list ) ;
2013-08-29 19:40:01 +02:00
static DEFINE_SPINLOCK ( zpci_list_lock ) ;
2012-11-29 12:50:30 +01:00
2013-06-27 09:01:09 +02:00
static DECLARE_BITMAP ( zpci_domain , ZPCI_NR_DEVICES ) ;
static DEFINE_SPINLOCK ( zpci_domain_lock ) ;
2012-11-29 13:05:05 +01:00
2016-01-22 14:01:44 +01:00
# define ZPCI_IOMAP_ENTRIES \
2017-03-27 19:07:24 +02:00
min ( ( ( unsigned long ) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2 ) , \
2016-01-22 14:01:44 +01:00
ZPCI_IOMAP_MAX_ENTRIES )
2012-11-29 12:50:30 +01:00
static DEFINE_SPINLOCK ( zpci_iomap_lock ) ;
2016-01-22 14:01:44 +01:00
static unsigned long * zpci_iomap_bitmap ;
2012-11-29 12:50:30 +01:00
struct zpci_iomap_entry * zpci_iomap_start ;
EXPORT_SYMBOL_GPL ( zpci_iomap_start ) ;
2019-04-14 15:38:01 +02:00
DEFINE_STATIC_KEY_FALSE ( have_mio ) ;
2012-12-11 14:53:35 +01:00
static struct kmem_cache * zdev_fmb_cache ;
2012-11-29 12:50:30 +01:00
struct zpci_dev * get_zdev_by_fid ( u32 fid )
{
struct zpci_dev * tmp , * zdev = NULL ;
2013-08-29 19:40:01 +02:00
spin_lock ( & zpci_list_lock ) ;
2012-11-29 12:50:30 +01:00
list_for_each_entry ( tmp , & zpci_list , entry ) {
if ( tmp - > fid = = fid ) {
zdev = tmp ;
break ;
}
}
2013-08-29 19:40:01 +02:00
spin_unlock ( & zpci_list_lock ) ;
2012-11-29 12:50:30 +01:00
return zdev ;
}
2017-06-20 15:56:05 +02:00
void zpci_remove_reserved_devices ( void )
{
struct zpci_dev * tmp , * zdev ;
enum zpci_state state ;
LIST_HEAD ( remove ) ;
spin_lock ( & zpci_list_lock ) ;
list_for_each_entry_safe ( zdev , tmp , & zpci_list , entry ) {
if ( zdev - > state = = ZPCI_FN_STATE_STANDBY & &
! clp_get_state ( zdev - > fid , & state ) & &
state = = ZPCI_FN_STATE_RESERVED )
list_move_tail ( & zdev - > entry , & remove ) ;
}
spin_unlock ( & zpci_list_lock ) ;
list_for_each_entry_safe ( zdev , tmp , & remove , entry )
zpci_remove_device ( zdev ) ;
}
2012-11-29 12:50:30 +01:00
static struct zpci_dev * get_zdev_by_bus ( struct pci_bus * bus )
{
return ( bus & & bus - > sysdata ) ? ( struct zpci_dev * ) bus - > sysdata : NULL ;
}
int pci_domain_nr ( struct pci_bus * bus )
{
return ( ( struct zpci_dev * ) bus - > sysdata ) - > domain ;
}
EXPORT_SYMBOL_GPL ( pci_domain_nr ) ;
int pci_proc_domain ( struct pci_bus * bus )
{
return pci_domain_nr ( bus ) ;
}
EXPORT_SYMBOL_GPL ( pci_proc_domain ) ;
2012-11-29 14:33:30 +01:00
/* Modify PCI: Register I/O address translation parameters */
int zpci_register_ioat ( struct zpci_dev * zdev , u8 dmaas ,
u64 base , u64 limit , u64 iota )
{
2017-06-10 14:10:00 +02:00
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , dmaas , ZPCI_MOD_FC_REG_IOAT ) ;
struct zpci_fib fib = { 0 } ;
u8 status ;
2012-11-29 14:33:30 +01:00
WARN_ON_ONCE ( iota & 0x3fff ) ;
2017-06-10 14:10:00 +02:00
fib . pba = base ;
fib . pal = limit ;
fib . iota = iota | ZPCI_IOTA_RTTO_FLAG ;
return zpci_mod_fc ( req , & fib , & status ) ? - EIO : 0 ;
2012-11-29 14:33:30 +01:00
}
/* Modify PCI: Unregister I/O address translation parameters */
int zpci_unregister_ioat ( struct zpci_dev * zdev , u8 dmaas )
{
2017-06-10 14:10:00 +02:00
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , dmaas , ZPCI_MOD_FC_DEREG_IOAT ) ;
struct zpci_fib fib = { 0 } ;
u8 cc , status ;
2012-11-29 14:33:30 +01:00
2017-06-10 14:10:00 +02:00
cc = zpci_mod_fc ( req , & fib , & status ) ;
if ( cc = = 3 ) /* Function already gone. */
cc = 0 ;
return cc ? - EIO : 0 ;
2012-11-29 14:33:30 +01:00
}
2012-12-11 14:53:35 +01:00
/* Modify PCI: Set PCI function measurement parameters */
int zpci_fmb_enable_device ( struct zpci_dev * zdev )
{
2017-06-10 14:12:13 +02:00
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , 0 , ZPCI_MOD_FC_SET_MEASURE ) ;
struct zpci_fib fib = { 0 } ;
u8 cc , status ;
2012-12-11 14:53:35 +01:00
2016-06-15 13:07:51 +02:00
if ( zdev - > fmb | | sizeof ( * zdev - > fmb ) < zdev - > fmb_length )
2012-12-11 14:53:35 +01:00
return - EINVAL ;
2013-02-25 22:09:25 +08:00
zdev - > fmb = kmem_cache_zalloc ( zdev_fmb_cache , GFP_KERNEL ) ;
2012-12-11 14:53:35 +01:00
if ( ! zdev - > fmb )
return - ENOMEM ;
WARN_ON ( ( u64 ) zdev - > fmb & 0xf ) ;
2015-04-10 14:33:08 +02:00
/* reset software counters */
atomic64_set ( & zdev - > allocated_pages , 0 ) ;
atomic64_set ( & zdev - > mapped_pages , 0 ) ;
atomic64_set ( & zdev - > unmapped_pages , 0 ) ;
2017-06-10 14:12:13 +02:00
fib . fmb_addr = virt_to_phys ( zdev - > fmb ) ;
cc = zpci_mod_fc ( req , & fib , & status ) ;
if ( cc ) {
kmem_cache_free ( zdev_fmb_cache , zdev - > fmb ) ;
zdev - > fmb = NULL ;
}
return cc ? - EIO : 0 ;
2012-12-11 14:53:35 +01:00
}
/* Modify PCI: Disable PCI function measurement */
int zpci_fmb_disable_device ( struct zpci_dev * zdev )
{
2017-06-10 14:12:13 +02:00
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , 0 , ZPCI_MOD_FC_SET_MEASURE ) ;
struct zpci_fib fib = { 0 } ;
u8 cc , status ;
2012-12-11 14:53:35 +01:00
if ( ! zdev - > fmb )
return - EINVAL ;
/* Function measurement is disabled if fmb address is zero */
2017-06-10 14:12:13 +02:00
cc = zpci_mod_fc ( req , & fib , & status ) ;
if ( cc = = 3 ) /* Function already gone. */
cc = 0 ;
2012-12-11 14:53:35 +01:00
2017-06-10 14:12:13 +02:00
if ( ! cc ) {
kmem_cache_free ( zdev_fmb_cache , zdev - > fmb ) ;
zdev - > fmb = NULL ;
}
return cc ? - EIO : 0 ;
2012-12-11 14:53:35 +01:00
}
2012-11-29 12:50:30 +01:00
static int zpci_cfg_load ( struct zpci_dev * zdev , int offset , u32 * val , u8 len )
{
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , ZPCI_PCIAS_CFGSPC , len ) ;
u64 data ;
int rc ;
2019-04-14 16:25:54 +02:00
rc = __zpci_load ( & data , req , offset ) ;
2013-04-16 14:17:15 +02:00
if ( ! rc ) {
2016-12-17 15:35:39 +01:00
data = le64_to_cpu ( ( __force __le64 ) data ) ;
data > > = ( 8 - len ) * 8 ;
2012-11-29 12:50:30 +01:00
* val = ( u32 ) data ;
2013-04-16 14:17:15 +02:00
} else
2012-11-29 12:50:30 +01:00
* val = 0xffffffff ;
return rc ;
}
static int zpci_cfg_store ( struct zpci_dev * zdev , int offset , u32 val , u8 len )
{
u64 req = ZPCI_CREATE_REQ ( zdev - > fh , ZPCI_PCIAS_CFGSPC , len ) ;
u64 data = val ;
int rc ;
2016-12-17 15:35:39 +01:00
data < < = ( 8 - len ) * 8 ;
data = ( __force u64 ) cpu_to_le64 ( data ) ;
2019-04-14 16:25:54 +02:00
rc = __zpci_store ( data , req , offset ) ;
2012-11-29 12:50:30 +01:00
return rc ;
}
resource_size_t pcibios_align_resource ( void * data , const struct resource * res ,
resource_size_t size ,
resource_size_t align )
{
return 0 ;
}
2012-12-06 14:30:28 +01:00
/* combine single writes by using store-block insn */
void __iowrite64_copy ( void __iomem * to , const void * from , size_t count )
{
zpci_memcpy_toio ( to , from , count ) ;
}
2019-04-14 15:38:01 +02:00
void __iomem * ioremap ( unsigned long ioaddr , unsigned long size )
{
struct vm_struct * area ;
unsigned long offset ;
if ( ! size )
return NULL ;
if ( ! static_branch_unlikely ( & have_mio ) )
return ( void __iomem * ) ioaddr ;
offset = ioaddr & ~ PAGE_MASK ;
ioaddr & = PAGE_MASK ;
size = PAGE_ALIGN ( size + offset ) ;
area = get_vm_area ( size , VM_IOREMAP ) ;
if ( ! area )
return NULL ;
if ( ioremap_page_range ( ( unsigned long ) area - > addr ,
( unsigned long ) area - > addr + size ,
ioaddr , PAGE_KERNEL ) ) {
vunmap ( area - > addr ) ;
return NULL ;
}
return ( void __iomem * ) ( ( unsigned long ) area - > addr + offset ) ;
}
EXPORT_SYMBOL ( ioremap ) ;
void iounmap ( volatile void __iomem * addr )
{
if ( static_branch_likely ( & have_mio ) )
vunmap ( ( __force void * ) ( ( unsigned long ) addr & PAGE_MASK ) ) ;
}
EXPORT_SYMBOL ( iounmap ) ;
2012-11-29 12:50:30 +01:00
/* Create a virtual mapping cookie for a PCI BAR */
2019-04-14 15:38:01 +02:00
static void __iomem * pci_iomap_range_fh ( struct pci_dev * pdev , int bar ,
unsigned long offset , unsigned long max )
2012-11-29 12:50:30 +01:00
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2012-11-29 12:50:30 +01:00
int idx ;
idx = zdev - > bars [ bar ] . map_idx ;
spin_lock ( & zpci_iomap_lock ) ;
2013-05-29 11:52:21 +09:30
/* Detect overrun */
2016-01-22 14:11:21 +01:00
WARN_ON ( ! + + zpci_iomap_start [ idx ] . count ) ;
zpci_iomap_start [ idx ] . fh = zdev - > fh ;
zpci_iomap_start [ idx ] . bar = bar ;
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
2016-01-22 13:58:42 +01:00
return ( void __iomem * ) ZPCI_ADDR ( idx ) + offset ;
2012-11-29 12:50:30 +01:00
}
2019-04-14 15:38:01 +02:00
static void __iomem * pci_iomap_range_mio ( struct pci_dev * pdev , int bar ,
unsigned long offset ,
unsigned long max )
{
unsigned long barsize = pci_resource_len ( pdev , bar ) ;
struct zpci_dev * zdev = to_zpci ( pdev ) ;
void __iomem * iova ;
iova = ioremap ( ( unsigned long ) zdev - > bars [ bar ] . mio_wt , barsize ) ;
return iova ? iova + offset : iova ;
}
void __iomem * pci_iomap_range ( struct pci_dev * pdev , int bar ,
unsigned long offset , unsigned long max )
{
if ( ! pci_resource_len ( pdev , bar ) | | bar > = PCI_BAR_COUNT )
return NULL ;
if ( static_branch_likely ( & have_mio ) )
return pci_iomap_range_mio ( pdev , bar , offset , max ) ;
else
return pci_iomap_range_fh ( pdev , bar , offset , max ) ;
}
2015-02-27 16:43:55 +01:00
EXPORT_SYMBOL ( pci_iomap_range ) ;
2013-05-29 11:52:21 +09:30
void __iomem * pci_iomap ( struct pci_dev * dev , int bar , unsigned long maxlen )
{
return pci_iomap_range ( dev , bar , 0 , maxlen ) ;
}
EXPORT_SYMBOL ( pci_iomap ) ;
2012-11-29 12:50:30 +01:00
2019-04-14 15:38:01 +02:00
static void __iomem * pci_iomap_wc_range_mio ( struct pci_dev * pdev , int bar ,
unsigned long offset , unsigned long max )
{
unsigned long barsize = pci_resource_len ( pdev , bar ) ;
struct zpci_dev * zdev = to_zpci ( pdev ) ;
void __iomem * iova ;
iova = ioremap ( ( unsigned long ) zdev - > bars [ bar ] . mio_wb , barsize ) ;
return iova ? iova + offset : iova ;
}
void __iomem * pci_iomap_wc_range ( struct pci_dev * pdev , int bar ,
unsigned long offset , unsigned long max )
{
if ( ! pci_resource_len ( pdev , bar ) | | bar > = PCI_BAR_COUNT )
return NULL ;
if ( static_branch_likely ( & have_mio ) )
return pci_iomap_wc_range_mio ( pdev , bar , offset , max ) ;
else
return pci_iomap_range_fh ( pdev , bar , offset , max ) ;
}
EXPORT_SYMBOL ( pci_iomap_wc_range ) ;
void __iomem * pci_iomap_wc ( struct pci_dev * dev , int bar , unsigned long maxlen )
{
return pci_iomap_wc_range ( dev , bar , 0 , maxlen ) ;
}
EXPORT_SYMBOL ( pci_iomap_wc ) ;
static void pci_iounmap_fh ( struct pci_dev * pdev , void __iomem * addr )
2012-11-29 12:50:30 +01:00
{
2016-01-22 13:58:42 +01:00
unsigned int idx = ZPCI_IDX ( addr ) ;
2012-11-29 12:50:30 +01:00
spin_lock ( & zpci_iomap_lock ) ;
2013-05-29 11:52:21 +09:30
/* Detect underrun */
2016-01-22 14:11:21 +01:00
WARN_ON ( ! zpci_iomap_start [ idx ] . count ) ;
2013-05-29 11:52:21 +09:30
if ( ! - - zpci_iomap_start [ idx ] . count ) {
zpci_iomap_start [ idx ] . fh = 0 ;
zpci_iomap_start [ idx ] . bar = 0 ;
}
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
}
2019-04-14 15:38:01 +02:00
static void pci_iounmap_mio ( struct pci_dev * pdev , void __iomem * addr )
{
iounmap ( addr ) ;
}
void pci_iounmap ( struct pci_dev * pdev , void __iomem * addr )
{
if ( static_branch_likely ( & have_mio ) )
pci_iounmap_mio ( pdev , addr ) ;
else
pci_iounmap_fh ( pdev , addr ) ;
}
2015-02-27 16:43:55 +01:00
EXPORT_SYMBOL ( pci_iounmap ) ;
2012-11-29 12:50:30 +01:00
static int pci_read ( struct pci_bus * bus , unsigned int devfn , int where ,
int size , u32 * val )
{
struct zpci_dev * zdev = get_zdev_by_bus ( bus ) ;
2013-04-16 14:18:41 +02:00
int ret ;
2012-11-29 12:50:30 +01:00
if ( ! zdev | | devfn ! = ZPCI_DEVFN )
2013-04-16 14:18:41 +02:00
ret = - ENODEV ;
else
ret = zpci_cfg_load ( zdev , where , val , size ) ;
return ret ;
2012-11-29 12:50:30 +01:00
}
static int pci_write ( struct pci_bus * bus , unsigned int devfn , int where ,
int size , u32 val )
{
struct zpci_dev * zdev = get_zdev_by_bus ( bus ) ;
2013-04-16 14:18:41 +02:00
int ret ;
2012-11-29 12:50:30 +01:00
if ( ! zdev | | devfn ! = ZPCI_DEVFN )
2013-04-16 14:18:41 +02:00
ret = - ENODEV ;
else
ret = zpci_cfg_store ( zdev , where , val , size ) ;
return ret ;
2012-11-29 12:50:30 +01:00
}
static struct pci_ops pci_root_ops = {
. read = pci_read ,
. write = pci_write ,
} ;
2018-09-12 12:47:37 +02:00
# ifdef CONFIG_PCI_IOV
static struct resource iov_res = {
. name = " PCI IOV res " ,
. start = 0 ,
. end = - 1 ,
. flags = IORESOURCE_MEM ,
} ;
# endif
2015-02-27 16:43:21 +01:00
static void zpci_map_resources ( struct pci_dev * pdev )
2012-11-29 12:50:30 +01:00
{
2019-04-14 15:38:01 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2012-11-29 12:50:30 +01:00
resource_size_t len ;
int i ;
for ( i = 0 ; i < PCI_BAR_COUNT ; i + + ) {
len = pci_resource_len ( pdev , i ) ;
if ( ! len )
continue ;
2019-04-14 15:38:01 +02:00
2019-06-27 15:13:05 +02:00
if ( zpci_use_mio ( zdev ) )
2019-04-14 15:38:01 +02:00
pdev - > resource [ i ] . start =
( resource_size_t __force ) zdev - > bars [ i ] . mio_wb ;
else
2019-06-27 15:13:05 +02:00
pdev - > resource [ i ] . start = ( resource_size_t __force )
pci_iomap_range_fh ( pdev , i , 0 , 0 ) ;
2012-11-29 12:50:30 +01:00
pdev - > resource [ i ] . end = pdev - > resource [ i ] . start + len - 1 ;
}
2018-09-12 12:47:37 +02:00
# ifdef CONFIG_PCI_IOV
2019-08-06 19:01:37 +03:00
for ( i = 0 ; i < PCI_SRIOV_NUM_BARS ; i + + ) {
int bar = i + PCI_IOV_RESOURCES ;
2018-09-12 12:47:37 +02:00
2019-08-06 19:01:37 +03:00
len = pci_resource_len ( pdev , bar ) ;
2018-09-12 12:47:37 +02:00
if ( ! len )
continue ;
2019-08-06 19:01:37 +03:00
pdev - > resource [ bar ] . parent = & iov_res ;
2018-09-12 12:47:37 +02:00
}
# endif
2013-06-05 16:06:16 +02:00
}
2015-02-27 16:43:21 +01:00
static void zpci_unmap_resources ( struct pci_dev * pdev )
2013-06-05 16:06:16 +02:00
{
2019-06-27 15:13:05 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2013-06-05 16:06:16 +02:00
resource_size_t len ;
int i ;
2019-06-27 15:13:05 +02:00
if ( zpci_use_mio ( zdev ) )
2019-04-14 15:38:01 +02:00
return ;
2013-06-05 16:06:16 +02:00
for ( i = 0 ; i < PCI_BAR_COUNT ; i + + ) {
len = pci_resource_len ( pdev , i ) ;
if ( ! len )
continue ;
2019-06-27 15:13:05 +02:00
pci_iounmap_fh ( pdev , ( void __iomem __force * )
pdev - > resource [ i ] . start ) ;
2013-06-05 16:06:16 +02:00
}
}
2012-11-29 12:50:30 +01:00
static int zpci_alloc_iomap ( struct zpci_dev * zdev )
{
2016-01-22 13:59:35 +01:00
unsigned long entry ;
2012-11-29 12:50:30 +01:00
spin_lock ( & zpci_iomap_lock ) ;
2016-01-22 14:01:44 +01:00
entry = find_first_zero_bit ( zpci_iomap_bitmap , ZPCI_IOMAP_ENTRIES ) ;
if ( entry = = ZPCI_IOMAP_ENTRIES ) {
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
return - ENOSPC ;
}
2016-01-22 14:01:44 +01:00
set_bit ( entry , zpci_iomap_bitmap ) ;
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
return entry ;
}
static void zpci_free_iomap ( struct zpci_dev * zdev , int entry )
{
spin_lock ( & zpci_iomap_lock ) ;
memset ( & zpci_iomap_start [ entry ] , 0 , sizeof ( struct zpci_iomap_entry ) ) ;
2016-01-22 14:01:44 +01:00
clear_bit ( entry , zpci_iomap_bitmap ) ;
2012-11-29 12:50:30 +01:00
spin_unlock ( & zpci_iomap_lock ) ;
}
2013-11-12 19:33:06 +01:00
static struct resource * __alloc_res ( struct zpci_dev * zdev , unsigned long start ,
unsigned long size , unsigned long flags )
{
struct resource * r ;
r = kzalloc ( sizeof ( * r ) , GFP_KERNEL ) ;
if ( ! r )
return NULL ;
r - > start = start ;
r - > end = r - > start + size - 1 ;
r - > flags = flags ;
r - > name = zdev - > res_name ;
if ( request_resource ( & iomem_resource , r ) ) {
kfree ( r ) ;
return NULL ;
}
return r ;
}
static int zpci_setup_bus_resources ( struct zpci_dev * zdev ,
struct list_head * resources )
{
unsigned long addr , size , flags ;
struct resource * res ;
int i , entry ;
snprintf ( zdev - > res_name , sizeof ( zdev - > res_name ) ,
" PCI Bus %04x:%02x " , zdev - > domain , ZPCI_BUS_NR ) ;
for ( i = 0 ; i < PCI_BAR_COUNT ; i + + ) {
if ( ! zdev - > bars [ i ] . size )
continue ;
entry = zpci_alloc_iomap ( zdev ) ;
if ( entry < 0 )
return entry ;
zdev - > bars [ i ] . map_idx = entry ;
/* only MMIO is supported */
flags = IORESOURCE_MEM ;
if ( zdev - > bars [ i ] . val & 8 )
flags | = IORESOURCE_PREFETCH ;
if ( zdev - > bars [ i ] . val & 4 )
flags | = IORESOURCE_MEM_64 ;
2019-06-27 15:13:05 +02:00
if ( zpci_use_mio ( zdev ) )
2019-05-16 14:19:51 +02:00
addr = ( unsigned long ) zdev - > bars [ i ] . mio_wb ;
else
addr = ZPCI_ADDR ( entry ) ;
2013-11-12 19:33:06 +01:00
size = 1UL < < zdev - > bars [ i ] . size ;
res = __alloc_res ( zdev , addr , size , flags ) ;
if ( ! res ) {
zpci_free_iomap ( zdev , entry ) ;
return - ENOMEM ;
}
zdev - > bars [ i ] . res = res ;
pci_add_resource ( resources , res ) ;
}
return 0 ;
}
static void zpci_cleanup_bus_resources ( struct zpci_dev * zdev )
{
int i ;
for ( i = 0 ; i < PCI_BAR_COUNT ; i + + ) {
2015-07-28 19:10:45 +02:00
if ( ! zdev - > bars [ i ] . size | | ! zdev - > bars [ i ] . res )
2013-11-12 19:33:06 +01:00
continue ;
zpci_free_iomap ( zdev , zdev - > bars [ i ] . map_idx ) ;
release_resource ( zdev - > bars [ i ] . res ) ;
kfree ( zdev - > bars [ i ] . res ) ;
}
}
2013-04-16 14:13:21 +02:00
int pcibios_add_device ( struct pci_dev * pdev )
{
2013-08-29 19:34:37 +02:00
struct resource * res ;
int i ;
2018-12-21 15:14:20 +01:00
if ( pdev - > is_physfn )
pdev - > no_vf_scan = 1 ;
2014-04-30 14:50:09 -06:00
pdev - > dev . groups = zpci_attr_groups ;
2017-01-20 13:04:02 -08:00
pdev - > dev . dma_ops = & s390_pci_dma_ops ;
2015-02-27 16:43:21 +01:00
zpci_map_resources ( pdev ) ;
2013-08-29 19:34:37 +02:00
for ( i = 0 ; i < PCI_BAR_COUNT ; i + + ) {
res = & pdev - > resource [ i ] ;
if ( res - > parent | | ! res - > flags )
continue ;
pci_claim_resource ( pdev , i ) ;
}
return 0 ;
}
2015-02-27 16:43:21 +01:00
void pcibios_release_device ( struct pci_dev * pdev )
{
zpci_unmap_resources ( pdev ) ;
}
2013-08-29 19:34:37 +02:00
int pcibios_enable_device ( struct pci_dev * pdev , int mask )
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2013-04-16 14:13:21 +02:00
2016-01-29 15:13:30 +01:00
zpci_debug_init_device ( zdev , dev_name ( & pdev - > dev ) ) ;
2013-04-16 14:13:21 +02:00
zpci_fmb_enable_device ( zdev ) ;
2014-02-26 15:30:24 -07:00
return pci_enable_resources ( pdev , mask ) ;
2013-04-16 14:13:21 +02:00
}
2013-08-29 19:34:37 +02:00
void pcibios_disable_device ( struct pci_dev * pdev )
2013-06-05 16:06:16 +02:00
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2013-06-05 16:06:16 +02:00
zpci_fmb_disable_device ( zdev ) ;
zpci_debug_exit_device ( zdev ) ;
}
2013-09-25 12:27:43 +02:00
# ifdef CONFIG_HIBERNATE_CALLBACKS
static int zpci_restore ( struct device * dev )
{
2015-02-27 16:43:21 +01:00
struct pci_dev * pdev = to_pci_dev ( dev ) ;
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2013-09-25 12:27:43 +02:00
int ret = 0 ;
if ( zdev - > state ! = ZPCI_FN_STATE_ONLINE )
goto out ;
ret = clp_enable_fh ( zdev , ZPCI_NR_DMA_SPACES ) ;
if ( ret )
goto out ;
2015-02-27 16:43:21 +01:00
zpci_map_resources ( pdev ) ;
2015-11-16 14:35:48 +01:00
zpci_register_ioat ( zdev , 0 , zdev - > start_dma , zdev - > end_dma ,
2013-09-25 12:27:43 +02:00
( u64 ) zdev - > dma_table ) ;
out :
return ret ;
}
static int zpci_freeze ( struct device * dev )
{
2015-02-27 16:43:21 +01:00
struct pci_dev * pdev = to_pci_dev ( dev ) ;
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( pdev ) ;
2013-09-25 12:27:43 +02:00
if ( zdev - > state ! = ZPCI_FN_STATE_ONLINE )
return 0 ;
zpci_unregister_ioat ( zdev , 0 ) ;
2015-02-27 16:43:21 +01:00
zpci_unmap_resources ( pdev ) ;
2013-09-25 12:27:43 +02:00
return clp_disable_fh ( zdev ) ;
}
struct dev_pm_ops pcibios_pm_ops = {
. thaw_noirq = zpci_restore ,
. freeze_noirq = zpci_freeze ,
. restore_noirq = zpci_restore ,
. poweroff_noirq = zpci_freeze ,
} ;
# endif /* CONFIG_HIBERNATE_CALLBACKS */
2012-11-29 12:50:30 +01:00
static int zpci_alloc_domain ( struct zpci_dev * zdev )
{
2016-01-27 13:33:30 +01:00
if ( zpci_unique_uid ) {
zdev - > domain = ( u16 ) zdev - > uid ;
2017-06-21 10:20:35 +02:00
if ( zdev - > domain > = ZPCI_NR_DEVICES )
return 0 ;
spin_lock ( & zpci_domain_lock ) ;
if ( test_bit ( zdev - > domain , zpci_domain ) ) {
spin_unlock ( & zpci_domain_lock ) ;
2019-11-28 09:30:00 +01:00
pr_err ( " Adding PCI function %08x failed because domain %04x is already assigned \n " ,
zdev - > fid , zdev - > domain ) ;
2017-06-21 10:20:35 +02:00
return - EEXIST ;
}
set_bit ( zdev - > domain , zpci_domain ) ;
spin_unlock ( & zpci_domain_lock ) ;
2016-01-27 13:33:30 +01:00
return 0 ;
}
2012-11-29 12:50:30 +01:00
spin_lock ( & zpci_domain_lock ) ;
zdev - > domain = find_first_zero_bit ( zpci_domain , ZPCI_NR_DEVICES ) ;
if ( zdev - > domain = = ZPCI_NR_DEVICES ) {
spin_unlock ( & zpci_domain_lock ) ;
return - ENOSPC ;
}
set_bit ( zdev - > domain , zpci_domain ) ;
spin_unlock ( & zpci_domain_lock ) ;
return 0 ;
}
static void zpci_free_domain ( struct zpci_dev * zdev )
{
2017-06-21 10:20:35 +02:00
if ( zdev - > domain > = ZPCI_NR_DEVICES )
2016-01-27 13:33:30 +01:00
return ;
2012-11-29 12:50:30 +01:00
spin_lock ( & zpci_domain_lock ) ;
clear_bit ( zdev - > domain , zpci_domain ) ;
spin_unlock ( & zpci_domain_lock ) ;
}
2013-11-12 19:35:01 +01:00
void pcibios_remove_bus ( struct pci_bus * bus )
{
struct zpci_dev * zdev = get_zdev_by_bus ( bus ) ;
zpci_exit_slot ( zdev ) ;
zpci_cleanup_bus_resources ( zdev ) ;
2017-04-27 14:44:06 +02:00
zpci_destroy_iommu ( zdev ) ;
2013-11-12 19:35:01 +01:00
zpci_free_domain ( zdev ) ;
spin_lock ( & zpci_list_lock ) ;
list_del ( & zdev - > entry ) ;
spin_unlock ( & zpci_list_lock ) ;
2017-06-21 12:30:40 +02:00
zpci_dbg ( 3 , " rem fid:%x \n " , zdev - > fid ) ;
2013-11-12 19:35:01 +01:00
kfree ( zdev ) ;
}
static int zpci_scan_bus ( struct zpci_dev * zdev )
{
LIST_HEAD ( resources ) ;
int ret ;
ret = zpci_setup_bus_resources ( zdev , & resources ) ;
if ( ret )
2015-07-28 19:10:45 +02:00
goto error ;
2013-11-12 19:35:01 +01:00
zdev - > bus = pci_scan_root_bus ( NULL , ZPCI_BUS_NR , & pci_root_ops ,
zdev , & resources ) ;
if ( ! zdev - > bus ) {
2015-07-28 19:10:45 +02:00
ret = - EIO ;
goto error ;
2013-11-12 19:35:01 +01:00
}
zdev - > bus - > max_bus_speed = zdev - > max_bus_speed ;
PCI: Assign resources before drivers claim devices (pci_scan_root_bus())
Previously, pci_scan_root_bus() created a root PCI bus, enumerated the
devices on it, and called pci_bus_add_devices(), which made the devices
available for drivers to claim them.
Most callers assigned resources to devices after pci_scan_root_bus()
returns, which may be after drivers have claimed the devices. This is
incorrect; the PCI core should not change device resources while a driver
is managing the device.
Remove pci_bus_add_devices() from pci_scan_root_bus() and do it after any
resource assignment in the callers.
Note that ARM's pci_common_init_dev() already called pci_bus_add_devices()
after pci_scan_root_bus(), so we only need to remove the first call:
pci_common_init_dev
pcibios_init_hw
pci_scan_root_bus
pci_bus_add_devices # first call
pci_bus_assign_resources
pci_bus_add_devices # second call
[bhelgaas: changelog, drop "root_bus" var in alpha common_init_pci(),
return failure earlier in mn10300, add "return" in x86 pcibios_scan_root(),
return early if xtensa platform_pcibios_fixup() fails]
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
CC: Richard Henderson <rth@twiddle.net>
CC: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
CC: Matt Turner <mattst88@gmail.com>
CC: David Howells <dhowells@redhat.com>
CC: Tony Luck <tony.luck@intel.com>
CC: Michal Simek <monstr@monstr.eu>
CC: Ralf Baechle <ralf@linux-mips.org>
CC: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
CC: Sebastian Ott <sebott@linux.vnet.ibm.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Chris Metcalf <cmetcalf@ezchip.com>
CC: Chris Zankel <chris@zankel.net>
CC: Max Filippov <jcmvbkbc@gmail.com>
CC: Thomas Gleixner <tglx@linutronix.de>
2015-03-16 11:18:56 +08:00
pci_bus_add_devices ( zdev - > bus ) ;
2013-11-12 19:35:01 +01:00
return 0 ;
2015-07-28 19:10:45 +02:00
error :
zpci_cleanup_bus_resources ( zdev ) ;
pci_free_resource_list ( & resources ) ;
return ret ;
2013-11-12 19:35:01 +01:00
}
2012-11-29 12:55:21 +01:00
int zpci_enable_device ( struct zpci_dev * zdev )
{
int rc ;
rc = clp_enable_fh ( zdev , ZPCI_NR_DMA_SPACES ) ;
if ( rc )
goto out ;
2012-11-29 14:33:30 +01:00
rc = zpci_dma_init_device ( zdev ) ;
if ( rc )
goto out_dma ;
2013-08-29 19:35:19 +02:00
zdev - > state = ZPCI_FN_STATE_ONLINE ;
2012-11-29 12:55:21 +01:00
return 0 ;
2012-11-29 14:33:30 +01:00
out_dma :
clp_disable_fh ( zdev ) ;
2012-11-29 12:55:21 +01:00
out :
return rc ;
}
EXPORT_SYMBOL_GPL ( zpci_enable_device ) ;
2013-04-16 14:12:17 +02:00
int zpci_disable_device ( struct zpci_dev * zdev )
{
zpci_dma_exit_device ( zdev ) ;
return clp_disable_fh ( zdev ) ;
}
EXPORT_SYMBOL_GPL ( zpci_disable_device ) ;
2012-11-29 12:50:30 +01:00
int zpci_create_device ( struct zpci_dev * zdev )
{
int rc ;
rc = zpci_alloc_domain ( zdev ) ;
if ( rc )
goto out ;
2017-04-27 14:44:06 +02:00
rc = zpci_init_iommu ( zdev ) ;
if ( rc )
goto out_free ;
2015-04-10 14:34:33 +02:00
mutex_init ( & zdev - > lock ) ;
2013-04-25 14:49:48 +02:00
if ( zdev - > state = = ZPCI_FN_STATE_CONFIGURED ) {
rc = zpci_enable_device ( zdev ) ;
if ( rc )
2017-04-27 14:44:06 +02:00
goto out_destroy_iommu ;
2013-04-25 14:49:48 +02:00
}
rc = zpci_scan_bus ( zdev ) ;
2012-11-29 12:50:30 +01:00
if ( rc )
2013-04-25 14:49:48 +02:00
goto out_disable ;
2012-11-29 12:50:30 +01:00
2013-08-29 19:40:01 +02:00
spin_lock ( & zpci_list_lock ) ;
2012-11-29 12:50:30 +01:00
list_add_tail ( & zdev - > entry , & zpci_list ) ;
2013-08-29 19:40:01 +02:00
spin_unlock ( & zpci_list_lock ) ;
2012-11-29 12:50:30 +01:00
2013-08-29 19:33:16 +02:00
zpci_init_slot ( zdev ) ;
2012-11-29 12:50:30 +01:00
return 0 ;
2013-04-25 14:49:48 +02:00
out_disable :
if ( zdev - > state = = ZPCI_FN_STATE_ONLINE )
zpci_disable_device ( zdev ) ;
2017-04-27 14:44:06 +02:00
out_destroy_iommu :
zpci_destroy_iommu ( zdev ) ;
2013-04-25 14:49:48 +02:00
out_free :
2012-11-29 12:50:30 +01:00
zpci_free_domain ( zdev ) ;
out :
return rc ;
}
2017-05-09 12:27:30 +02:00
void zpci_remove_device ( struct zpci_dev * zdev )
{
if ( ! zdev - > bus )
return ;
pci_stop_root_bus ( zdev - > bus ) ;
pci_remove_root_bus ( zdev - > bus ) ;
}
2016-07-18 14:05:21 +02:00
int zpci_report_error ( struct pci_dev * pdev ,
struct zpci_report_error_header * report )
{
struct zpci_dev * zdev = to_zpci ( pdev ) ;
return sclp_pci_report ( report , zdev - > fh , zdev - > fid ) ;
}
EXPORT_SYMBOL ( zpci_report_error ) ;
2012-11-29 12:50:30 +01:00
static int zpci_mem_init ( void )
{
2016-03-14 15:47:23 +01:00
BUILD_BUG_ON ( ! is_power_of_2 ( __alignof__ ( struct zpci_fmb ) ) | |
__alignof__ ( struct zpci_fmb ) < sizeof ( struct zpci_fmb ) ) ;
2012-12-11 14:53:35 +01:00
zdev_fmb_cache = kmem_cache_create ( " PCI_FMB_cache " , sizeof ( struct zpci_fmb ) ,
2016-03-14 15:47:23 +01:00
__alignof__ ( struct zpci_fmb ) , 0 , NULL ) ;
2012-12-11 14:53:35 +01:00
if ( ! zdev_fmb_cache )
2016-01-22 14:01:44 +01:00
goto error_fmb ;
2012-12-11 14:53:35 +01:00
2016-01-22 14:01:44 +01:00
zpci_iomap_start = kcalloc ( ZPCI_IOMAP_ENTRIES ,
sizeof ( * zpci_iomap_start ) , GFP_KERNEL ) ;
2012-11-29 12:50:30 +01:00
if ( ! zpci_iomap_start )
2012-11-29 13:05:05 +01:00
goto error_iomap ;
2012-11-29 12:50:30 +01:00
2016-01-22 14:01:44 +01:00
zpci_iomap_bitmap = kcalloc ( BITS_TO_LONGS ( ZPCI_IOMAP_ENTRIES ) ,
sizeof ( * zpci_iomap_bitmap ) , GFP_KERNEL ) ;
if ( ! zpci_iomap_bitmap )
goto error_iomap_bitmap ;
return 0 ;
error_iomap_bitmap :
kfree ( zpci_iomap_start ) ;
2012-11-29 13:05:05 +01:00
error_iomap :
2012-12-11 14:53:35 +01:00
kmem_cache_destroy ( zdev_fmb_cache ) ;
2016-01-22 14:01:44 +01:00
error_fmb :
2012-11-29 12:50:30 +01:00
return - ENOMEM ;
}
static void zpci_mem_exit ( void )
{
2016-01-22 14:01:44 +01:00
kfree ( zpci_iomap_bitmap ) ;
2012-11-29 12:50:30 +01:00
kfree ( zpci_iomap_start ) ;
2012-12-11 14:53:35 +01:00
kmem_cache_destroy ( zdev_fmb_cache ) ;
2012-11-29 12:50:30 +01:00
}
2019-02-12 16:23:13 +01:00
static unsigned int s390_pci_probe __initdata = 1 ;
2019-04-18 21:39:06 +02:00
static unsigned int s390_pci_no_mio __initdata ;
2019-02-26 16:07:32 +01:00
unsigned int s390_pci_force_floating __initdata ;
2013-12-12 17:48:32 +01:00
static unsigned int s390_pci_initialized ;
2012-11-29 12:50:30 +01:00
char * __init pcibios_setup ( char * str )
{
2013-12-12 17:55:22 +01:00
if ( ! strcmp ( str , " off " ) ) {
s390_pci_probe = 0 ;
2012-11-29 12:50:30 +01:00
return NULL ;
}
2019-04-18 21:39:06 +02:00
if ( ! strcmp ( str , " nomio " ) ) {
s390_pci_no_mio = 1 ;
return NULL ;
}
2019-02-26 16:07:32 +01:00
if ( ! strcmp ( str , " force_floating " ) ) {
s390_pci_force_floating = 1 ;
return NULL ;
}
2012-11-29 12:50:30 +01:00
return str ;
}
2013-12-12 17:48:32 +01:00
bool zpci_is_enabled ( void )
{
return s390_pci_initialized ;
}
2012-11-29 12:50:30 +01:00
static int __init pci_base_init ( void )
{
int rc ;
2013-01-30 15:52:16 +01:00
if ( ! s390_pci_probe )
2012-11-29 12:50:30 +01:00
return 0 ;
2017-10-30 14:38:58 +01:00
if ( ! test_facility ( 69 ) | | ! test_facility ( 71 ) )
2012-11-29 12:50:30 +01:00
return 0 ;
2019-07-10 13:08:06 +02:00
if ( test_facility ( 153 ) & & ! s390_pci_no_mio ) {
2019-04-14 15:38:01 +02:00
static_branch_enable ( & have_mio ) ;
2019-07-10 13:08:06 +02:00
ctl_set_bit ( 2 , 5 ) ;
}
2019-04-14 15:38:01 +02:00
2012-12-11 14:53:35 +01:00
rc = zpci_debug_init ( ) ;
if ( rc )
2013-06-27 09:01:09 +02:00
goto out ;
2012-12-11 14:53:35 +01:00
2012-11-29 12:50:30 +01:00
rc = zpci_mem_init ( ) ;
if ( rc )
goto out_mem ;
2012-11-29 13:05:05 +01:00
rc = zpci_irq_init ( ) ;
if ( rc )
goto out_irq ;
2012-11-29 14:33:30 +01:00
rc = zpci_dma_init ( ) ;
if ( rc )
goto out_dma ;
2013-08-29 19:37:28 +02:00
rc = clp_scan_pci_devices ( ) ;
2012-11-29 12:55:21 +01:00
if ( rc )
goto out_find ;
2013-12-12 17:48:32 +01:00
s390_pci_initialized = 1 ;
2012-11-29 12:50:30 +01:00
return 0 ;
2012-11-29 12:55:21 +01:00
out_find :
2012-11-29 14:33:30 +01:00
zpci_dma_exit ( ) ;
out_dma :
2012-11-29 13:05:05 +01:00
zpci_irq_exit ( ) ;
out_irq :
2012-11-29 12:50:30 +01:00
zpci_mem_exit ( ) ;
out_mem :
2012-12-11 14:53:35 +01:00
zpci_debug_exit ( ) ;
2013-06-27 09:01:09 +02:00
out :
2012-11-29 12:50:30 +01:00
return rc ;
}
2013-08-29 19:33:16 +02:00
subsys_initcall_sync ( pci_base_init ) ;
2013-08-29 19:40:01 +02:00
void zpci_rescan ( void )
{
2013-12-12 17:48:32 +01:00
if ( zpci_is_enabled ( ) )
clp_rescan_pci_devices_simple ( ) ;
2013-08-29 19:40:01 +02:00
}