2005-04-17 02:20:36 +04:00
/*
* probe . c - PCI detection and setup code
*/
# include <linux/kernel.h>
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/pci.h>
2015-03-03 20:52:13 +03:00
# include <linux/of_pci.h>
2014-09-13 06:02:00 +04:00
# include <linux/pci_hotplug.h>
2005-04-17 02:20:36 +04:00
# include <linux/slab.h>
# include <linux/module.h>
# include <linux/cpumask.h>
PCI: add PCI Express ASPM support
PCI Express ASPM defines a protocol for PCI Express components in the D0
state to reduce Link power by placing their Links into a low power state
and instructing the other end of the Link to do likewise. This
capability allows hardware-autonomous, dynamic Link power reduction
beyond what is achievable by software-only controlled power management.
However, The device should be configured by software appropriately.
Enabling ASPM will save power, but will introduce device latency.
This patch adds ASPM support in Linux. It introduces a global policy for
ASPM, a sysfs file /sys/module/pcie_aspm/parameters/policy can control
it. The interface can be used as a boot option too. Currently we have
below setting:
-default, BIOS default setting
-powersave, highest power saving mode, enable all available ASPM
state and clock power management
-performance, highest performance, disable ASPM and clock power
management
By default, the 'default' policy is used currently.
In my test, power difference between powersave mode and performance mode
is about 1.3w in a system with 3 PCIE links.
Note: some devices might not work well with aspm, either because chipset
issue or device issue. The patch provide API (pci_disable_link_state),
driver can disable ASPM for specific device.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2008-02-25 04:46:41 +03:00
# include <linux/pci-aspm.h>
2012-05-01 01:21:02 +04:00
# include <asm-generic/pci-bridge.h>
2005-04-08 09:53:31 +04:00
# include "pci.h"
2005-04-17 02:20:36 +04:00
# define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
# define CARDBUS_RESERVE_BUSNR 3
2014-01-11 04:14:48 +04:00
static struct resource busn_resource = {
2012-05-18 05:51:12 +04:00
. name = " PCI busn " ,
. start = 0 ,
. end = 255 ,
. flags = IORESOURCE_BUS ,
} ;
2005-04-17 02:20:36 +04:00
/* Ugh. Need to stop exporting this to modules. */
LIST_HEAD ( pci_root_buses ) ;
EXPORT_SYMBOL ( pci_root_buses ) ;
2012-05-18 05:51:11 +04:00
static LIST_HEAD ( pci_domain_busn_res_list ) ;
struct pci_domain_busn_res {
struct list_head list ;
struct resource res ;
int domain_nr ;
} ;
static struct resource * get_pci_domain_busn_res ( int domain_nr )
{
struct pci_domain_busn_res * r ;
list_for_each_entry ( r , & pci_domain_busn_res_list , list )
if ( r - > domain_nr = = domain_nr )
return & r - > res ;
r = kzalloc ( sizeof ( * r ) , GFP_KERNEL ) ;
if ( ! r )
return NULL ;
r - > domain_nr = domain_nr ;
r - > res . start = 0 ;
r - > res . end = 0xff ;
r - > res . flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED ;
list_add_tail ( & r - > list , & pci_domain_busn_res_list ) ;
return & r - > res ;
}
2008-02-14 09:30:39 +03:00
static int find_anything ( struct device * dev , void * data )
{
return 1 ;
}
2005-04-17 02:20:36 +04:00
2007-07-16 10:39:39 +04:00
/*
* Some device drivers need know if pci is initiated .
* Basically , we think pci is not initiated when there
2008-02-14 09:30:39 +03:00
* is no device to be found on the pci_bus_type .
2007-07-16 10:39:39 +04:00
*/
int no_pci_devices ( void )
{
2008-02-14 09:30:39 +03:00
struct device * dev ;
int no_devices ;
2007-07-16 10:39:39 +04:00
2008-02-14 09:30:39 +03:00
dev = bus_find_device ( & pci_bus_type , NULL , NULL , find_anything ) ;
no_devices = ( dev = = NULL ) ;
put_device ( dev ) ;
return no_devices ;
}
2007-07-16 10:39:39 +04:00
EXPORT_SYMBOL ( no_pci_devices ) ;
2005-04-17 02:20:36 +04:00
/*
* PCI Bus Class
*/
2007-05-23 06:47:54 +04:00
static void release_pcibus_dev ( struct device * dev )
2005-04-17 02:20:36 +04:00
{
2007-05-23 06:47:54 +04:00
struct pci_bus * pci_bus = to_pci_bus ( dev ) ;
2005-04-17 02:20:36 +04:00
2014-11-11 07:02:17 +03:00
put_device ( pci_bus - > bridge ) ;
2010-02-23 20:24:36 +03:00
pci_bus_remove_resources ( pci_bus ) ;
2011-04-11 05:37:07 +04:00
pci_release_bus_of_node ( pci_bus ) ;
2005-04-17 02:20:36 +04:00
kfree ( pci_bus ) ;
}
static struct class pcibus_class = {
. name = " pci_bus " ,
2007-05-23 06:47:54 +04:00
. dev_release = & release_pcibus_dev ,
2013-07-25 02:05:17 +04:00
. dev_groups = pcibus_groups ,
2005-04-17 02:20:36 +04:00
} ;
static int __init pcibus_class_init ( void )
{
return class_register ( & pcibus_class ) ;
}
postcore_initcall ( pcibus_class_init ) ;
2008-07-28 21:38:59 +04:00
static u64 pci_size ( u64 base , u64 maxbase , u64 mask )
2005-04-17 02:20:36 +04:00
{
2008-07-28 21:38:59 +04:00
u64 size = mask & maxbase ; /* Find the significant bits */
2005-04-17 02:20:36 +04:00
if ( ! size )
return 0 ;
/* Get the lowest of them to find the decode size, and
from that the extent . */
size = ( size & ~ ( size - 1 ) ) - 1 ;
/* base == maxbase can be valid only if the BAR has
already been programmed with all 1 s . */
if ( base = = maxbase & & ( ( base | size ) & mask ) ! = mask )
return 0 ;
return size ;
}
2011-06-14 23:04:35 +04:00
static inline unsigned long decode_bar ( struct pci_dev * dev , u32 bar )
2008-07-28 21:38:59 +04:00
{
PCI: treat mem BAR type "11" (reserved) as 32-bit, not 64-bit, BAR
This fixes a minor regression where broken PCI devices that use the
reserved "11" memory BAR type worked before e354597cce but not after.
The low four bits of a memory BAR are "PTT0" where P=1 for prefetchable
BARs, and TT is as follows:
00 32-bit BAR, anywhere in lower 4GB
01 anywhere below 1MB (reserved as of PCI 2.2)
10 64-bit BAR
11 reserved
Prior to e354597cce, we treated "0100" as a 64-bit BAR and all others,
including prefetchable 64-bit BARs ("1100") as 32-bit BARs. The e354597cce
fix, which appeared in 2.6.28, treats "x1x0" as 64-bit BARs, so the
reserved "x110" types are treated as 64-bit instead of 32-bit.
This patch returns to treating the reserved "11" type as a 32-bit BAR and
adds a warning if we see it.
It also logs a note if we see a 1M BAR. This is not a warning, because
such hardware conforms to pre-PCI 2.2 spec, but I think it's worth noting
because Linux ignores the 1M restriction if it ever has to assign the BAR.
CC: Peter Chubb <peterc@gelato.unsw.edu.au>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=35952
Reported-by: Jan Zwiegers <jan@radicalsystems.co.za>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-06-14 23:04:29 +04:00
u32 mem_type ;
2011-06-14 23:04:35 +04:00
unsigned long flags ;
PCI: treat mem BAR type "11" (reserved) as 32-bit, not 64-bit, BAR
This fixes a minor regression where broken PCI devices that use the
reserved "11" memory BAR type worked before e354597cce but not after.
The low four bits of a memory BAR are "PTT0" where P=1 for prefetchable
BARs, and TT is as follows:
00 32-bit BAR, anywhere in lower 4GB
01 anywhere below 1MB (reserved as of PCI 2.2)
10 64-bit BAR
11 reserved
Prior to e354597cce, we treated "0100" as a 64-bit BAR and all others,
including prefetchable 64-bit BARs ("1100") as 32-bit BARs. The e354597cce
fix, which appeared in 2.6.28, treats "x1x0" as 64-bit BARs, so the
reserved "x110" types are treated as 64-bit instead of 32-bit.
This patch returns to treating the reserved "11" type as a 32-bit BAR and
adds a warning if we see it.
It also logs a note if we see a 1M BAR. This is not a warning, because
such hardware conforms to pre-PCI 2.2 spec, but I think it's worth noting
because Linux ignores the 1M restriction if it ever has to assign the BAR.
CC: Peter Chubb <peterc@gelato.unsw.edu.au>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=35952
Reported-by: Jan Zwiegers <jan@radicalsystems.co.za>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-06-14 23:04:29 +04:00
2008-07-28 21:38:59 +04:00
if ( ( bar & PCI_BASE_ADDRESS_SPACE ) = = PCI_BASE_ADDRESS_SPACE_IO ) {
2011-06-14 23:04:35 +04:00
flags = bar & ~ PCI_BASE_ADDRESS_IO_MASK ;
flags | = IORESOURCE_IO ;
return flags ;
2008-07-28 21:38:59 +04:00
}
2006-11-30 00:53:10 +03:00
2011-06-14 23:04:35 +04:00
flags = bar & ~ PCI_BASE_ADDRESS_MEM_MASK ;
flags | = IORESOURCE_MEM ;
if ( flags & PCI_BASE_ADDRESS_MEM_PREFETCH )
flags | = IORESOURCE_PREFETCH ;
2006-11-30 00:53:10 +03:00
PCI: treat mem BAR type "11" (reserved) as 32-bit, not 64-bit, BAR
This fixes a minor regression where broken PCI devices that use the
reserved "11" memory BAR type worked before e354597cce but not after.
The low four bits of a memory BAR are "PTT0" where P=1 for prefetchable
BARs, and TT is as follows:
00 32-bit BAR, anywhere in lower 4GB
01 anywhere below 1MB (reserved as of PCI 2.2)
10 64-bit BAR
11 reserved
Prior to e354597cce, we treated "0100" as a 64-bit BAR and all others,
including prefetchable 64-bit BARs ("1100") as 32-bit BARs. The e354597cce
fix, which appeared in 2.6.28, treats "x1x0" as 64-bit BARs, so the
reserved "x110" types are treated as 64-bit instead of 32-bit.
This patch returns to treating the reserved "11" type as a 32-bit BAR and
adds a warning if we see it.
It also logs a note if we see a 1M BAR. This is not a warning, because
such hardware conforms to pre-PCI 2.2 spec, but I think it's worth noting
because Linux ignores the 1M restriction if it ever has to assign the BAR.
CC: Peter Chubb <peterc@gelato.unsw.edu.au>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=35952
Reported-by: Jan Zwiegers <jan@radicalsystems.co.za>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-06-14 23:04:29 +04:00
mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK ;
switch ( mem_type ) {
case PCI_BASE_ADDRESS_MEM_TYPE_32 :
break ;
case PCI_BASE_ADDRESS_MEM_TYPE_1M :
2012-08-23 20:53:08 +04:00
/* 1M mem BAR treated as 32-bit BAR */
PCI: treat mem BAR type "11" (reserved) as 32-bit, not 64-bit, BAR
This fixes a minor regression where broken PCI devices that use the
reserved "11" memory BAR type worked before e354597cce but not after.
The low four bits of a memory BAR are "PTT0" where P=1 for prefetchable
BARs, and TT is as follows:
00 32-bit BAR, anywhere in lower 4GB
01 anywhere below 1MB (reserved as of PCI 2.2)
10 64-bit BAR
11 reserved
Prior to e354597cce, we treated "0100" as a 64-bit BAR and all others,
including prefetchable 64-bit BARs ("1100") as 32-bit BARs. The e354597cce
fix, which appeared in 2.6.28, treats "x1x0" as 64-bit BARs, so the
reserved "x110" types are treated as 64-bit instead of 32-bit.
This patch returns to treating the reserved "11" type as a 32-bit BAR and
adds a warning if we see it.
It also logs a note if we see a 1M BAR. This is not a warning, because
such hardware conforms to pre-PCI 2.2 spec, but I think it's worth noting
because Linux ignores the 1M restriction if it ever has to assign the BAR.
CC: Peter Chubb <peterc@gelato.unsw.edu.au>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=35952
Reported-by: Jan Zwiegers <jan@radicalsystems.co.za>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-06-14 23:04:29 +04:00
break ;
case PCI_BASE_ADDRESS_MEM_TYPE_64 :
2011-06-14 23:04:35 +04:00
flags | = IORESOURCE_MEM_64 ;
break ;
PCI: treat mem BAR type "11" (reserved) as 32-bit, not 64-bit, BAR
This fixes a minor regression where broken PCI devices that use the
reserved "11" memory BAR type worked before e354597cce but not after.
The low four bits of a memory BAR are "PTT0" where P=1 for prefetchable
BARs, and TT is as follows:
00 32-bit BAR, anywhere in lower 4GB
01 anywhere below 1MB (reserved as of PCI 2.2)
10 64-bit BAR
11 reserved
Prior to e354597cce, we treated "0100" as a 64-bit BAR and all others,
including prefetchable 64-bit BARs ("1100") as 32-bit BARs. The e354597cce
fix, which appeared in 2.6.28, treats "x1x0" as 64-bit BARs, so the
reserved "x110" types are treated as 64-bit instead of 32-bit.
This patch returns to treating the reserved "11" type as a 32-bit BAR and
adds a warning if we see it.
It also logs a note if we see a 1M BAR. This is not a warning, because
such hardware conforms to pre-PCI 2.2 spec, but I think it's worth noting
because Linux ignores the 1M restriction if it ever has to assign the BAR.
CC: Peter Chubb <peterc@gelato.unsw.edu.au>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=35952
Reported-by: Jan Zwiegers <jan@radicalsystems.co.za>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-06-14 23:04:29 +04:00
default :
2012-08-23 20:53:08 +04:00
/* mem unknown type treated as 32-bit BAR */
PCI: treat mem BAR type "11" (reserved) as 32-bit, not 64-bit, BAR
This fixes a minor regression where broken PCI devices that use the
reserved "11" memory BAR type worked before e354597cce but not after.
The low four bits of a memory BAR are "PTT0" where P=1 for prefetchable
BARs, and TT is as follows:
00 32-bit BAR, anywhere in lower 4GB
01 anywhere below 1MB (reserved as of PCI 2.2)
10 64-bit BAR
11 reserved
Prior to e354597cce, we treated "0100" as a 64-bit BAR and all others,
including prefetchable 64-bit BARs ("1100") as 32-bit BARs. The e354597cce
fix, which appeared in 2.6.28, treats "x1x0" as 64-bit BARs, so the
reserved "x110" types are treated as 64-bit instead of 32-bit.
This patch returns to treating the reserved "11" type as a 32-bit BAR and
adds a warning if we see it.
It also logs a note if we see a 1M BAR. This is not a warning, because
such hardware conforms to pre-PCI 2.2 spec, but I think it's worth noting
because Linux ignores the 1M restriction if it ever has to assign the BAR.
CC: Peter Chubb <peterc@gelato.unsw.edu.au>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=35952
Reported-by: Jan Zwiegers <jan@radicalsystems.co.za>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-06-14 23:04:29 +04:00
break ;
}
2011-06-14 23:04:35 +04:00
return flags ;
2006-11-30 00:53:10 +03:00
}
2013-08-23 02:19:18 +04:00
# define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
2008-11-21 21:40:40 +03:00
/**
* pci_read_base - read a PCI BAR
* @ dev : the PCI device
* @ type : type of the BAR
* @ res : resource buffer to be filled in
* @ pos : BAR position in the config space
*
* Returns 1 if the BAR is 64 - bit , or 0 if 32 - bit .
2008-07-28 21:38:59 +04:00
*/
2008-11-21 21:40:40 +03:00
int __pci_read_base ( struct pci_dev * dev , enum pci_bar_type type ,
2014-04-19 04:13:49 +04:00
struct resource * res , unsigned int pos )
2006-11-30 00:53:10 +03:00
{
2008-07-28 21:38:59 +04:00
u32 l , sz , mask ;
2014-04-15 01:25:54 +04:00
u64 l64 , sz64 , mask64 ;
2010-07-16 21:19:22 +04:00
u16 orig_cmd ;
2013-05-25 15:36:27 +04:00
struct pci_bus_region region , inverted_region ;
2008-07-28 21:38:59 +04:00
2009-10-29 18:24:59 +03:00
mask = type ? PCI_ROM_ADDRESS_MASK : ~ 0 ;
2008-07-28 21:38:59 +04:00
2012-08-23 20:53:08 +04:00
/* No printks while decoding is disabled! */
2010-07-16 21:19:22 +04:00
if ( ! dev - > mmio_always_on ) {
pci_read_config_word ( dev , PCI_COMMAND , & orig_cmd ) ;
2013-08-23 02:19:18 +04:00
if ( orig_cmd & PCI_COMMAND_DECODE_ENABLE ) {
pci_write_config_word ( dev , PCI_COMMAND ,
orig_cmd & ~ PCI_COMMAND_DECODE_ENABLE ) ;
}
2010-07-16 21:19:22 +04:00
}
2008-07-28 21:38:59 +04:00
res - > name = pci_name ( dev ) ;
pci_read_config_dword ( dev , pos , & l ) ;
2009-10-29 18:24:59 +03:00
pci_write_config_dword ( dev , pos , l | mask ) ;
2008-07-28 21:38:59 +04:00
pci_read_config_dword ( dev , pos , & sz ) ;
pci_write_config_dword ( dev , pos , l ) ;
/*
* All bits set in sz means the device isn ' t working properly .
2010-04-22 19:02:43 +04:00
* If the BAR isn ' t implemented , all bits must be 0. If it ' s a
* memory BAR or a ROM , bit 0 must be clear ; if it ' s an io BAR , bit
* 1 must be clear .
2008-07-28 21:38:59 +04:00
*/
2014-10-30 20:54:43 +03:00
if ( sz = = 0xffffffff )
sz = 0 ;
2008-07-28 21:38:59 +04:00
/*
* I don ' t know how l can have all bits set . Copied from old code .
* Maybe it fixes a bug on some ancient platform .
*/
if ( l = = 0xffffffff )
l = 0 ;
if ( type = = pci_bar_unknown ) {
2011-06-14 23:04:35 +04:00
res - > flags = decode_bar ( dev , l ) ;
res - > flags | = IORESOURCE_SIZEALIGN ;
if ( res - > flags & IORESOURCE_IO ) {
2014-10-30 20:54:43 +03:00
l64 = l & PCI_BASE_ADDRESS_IO_MASK ;
sz64 = sz & PCI_BASE_ADDRESS_IO_MASK ;
mask64 = PCI_BASE_ADDRESS_IO_MASK & ( u32 ) IO_SPACE_LIMIT ;
2008-07-28 21:38:59 +04:00
} else {
2014-10-30 20:54:43 +03:00
l64 = l & PCI_BASE_ADDRESS_MEM_MASK ;
sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK ;
mask64 = ( u32 ) PCI_BASE_ADDRESS_MEM_MASK ;
2008-07-28 21:38:59 +04:00
}
} else {
res - > flags | = ( l & IORESOURCE_ROM_ENABLE ) ;
2014-10-30 20:54:43 +03:00
l64 = l & PCI_ROM_ADDRESS_MASK ;
sz64 = sz & PCI_ROM_ADDRESS_MASK ;
mask64 = ( u32 ) PCI_ROM_ADDRESS_MASK ;
2008-07-28 21:38:59 +04:00
}
2011-06-14 23:04:35 +04:00
if ( res - > flags & IORESOURCE_MEM_64 ) {
2008-07-28 21:38:59 +04:00
pci_read_config_dword ( dev , pos + 4 , & l ) ;
pci_write_config_dword ( dev , pos + 4 , ~ 0 ) ;
pci_read_config_dword ( dev , pos + 4 , & sz ) ;
pci_write_config_dword ( dev , pos + 4 , l ) ;
l64 | = ( ( u64 ) l < < 32 ) ;
sz64 | = ( ( u64 ) sz < < 32 ) ;
2014-10-30 20:54:43 +03:00
mask64 | = ( ( u64 ) ~ 0 < < 32 ) ;
}
2008-07-28 21:38:59 +04:00
2014-10-30 20:54:43 +03:00
if ( ! dev - > mmio_always_on & & ( orig_cmd & PCI_COMMAND_DECODE_ENABLE ) )
pci_write_config_word ( dev , PCI_COMMAND , orig_cmd ) ;
2008-07-28 21:38:59 +04:00
2014-10-30 20:54:43 +03:00
if ( ! sz64 )
goto fail ;
2008-07-28 21:38:59 +04:00
2014-10-30 20:54:43 +03:00
sz64 = pci_size ( l64 , sz64 , mask64 ) ;
2014-10-30 20:54:50 +03:00
if ( ! sz64 ) {
dev_info ( & dev - > dev , FW_BUG " reg 0x%x: invalid BAR (can't size) \n " ,
pos ) ;
2014-10-30 20:54:43 +03:00
goto fail ;
2014-10-30 20:54:50 +03:00
}
2014-10-30 20:54:43 +03:00
if ( res - > flags & IORESOURCE_MEM_64 ) {
PCI: Add pci_bus_addr_t
David Ahern reported that d63e2e1f3df9 ("sparc/PCI: Clip bridge windows
to fit in upstream windows") fails to boot on sparc/T5-8:
pci 0000:06:00.0: reg 0x184: can't handle BAR above 4GB (bus address 0x110204000)
The problem is that sparc64 assumed that dma_addr_t only needed to hold DMA
addresses, i.e., bus addresses returned via the DMA API (dma_map_single(),
etc.), while the PCI core assumed dma_addr_t could hold *any* bus address,
including raw BAR values. On sparc64, all DMA addresses fit in 32 bits, so
dma_addr_t is a 32-bit type. However, BAR values can be 64 bits wide, so
they don't fit in a dma_addr_t. d63e2e1f3df9 added new checking that
tripped over this mismatch.
Add pci_bus_addr_t, which is wide enough to hold any PCI bus address,
including both raw BAR values and DMA addresses. This will be 64 bits
on 64-bit platforms and on platforms with a 64-bit dma_addr_t. Then
dma_addr_t only needs to be wide enough to hold addresses from the DMA API.
[bhelgaas: changelog, bugzilla, Kconfig to ensure pci_bus_addr_t is at
least as wide as dma_addr_t, documentation]
Fixes: d63e2e1f3df9 ("sparc/PCI: Clip bridge windows to fit in upstream windows")
Fixes: 23b13bc76f35 ("PCI: Fail safely if we can't handle BARs larger than 4GB")
Link: http://lkml.kernel.org/r/CAE9FiQU1gJY1LYrxs+ma5LCTEEe4xmtjRG0aXJ9K_Tsu+m9Wuw@mail.gmail.com
Link: http://lkml.kernel.org/r/1427857069-6789-1-git-send-email-yinghai@kernel.org
Link: https://bugzilla.kernel.org/show_bug.cgi?id=96231
Reported-by: David Ahern <david.ahern@oracle.com>
Tested-by: David Ahern <david.ahern@oracle.com>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: David S. Miller <davem@davemloft.net>
CC: stable@vger.kernel.org # v3.19+
2015-05-28 03:23:51 +03:00
if ( ( sizeof ( pci_bus_addr_t ) < 8 | | sizeof ( resource_size_t ) < 8 )
& & sz64 > 0x100000000ULL ) {
2014-04-15 01:25:54 +04:00
res - > flags | = IORESOURCE_UNSET | IORESOURCE_DISABLED ;
res - > start = 0 ;
res - > end = 0 ;
2014-10-30 20:54:43 +03:00
dev_err ( & dev - > dev , " reg 0x%x: can't handle BAR larger than 4GB (size %#010llx) \n " ,
pos , ( unsigned long long ) sz64 ) ;
2014-04-15 01:25:54 +04:00
goto out ;
2009-10-27 22:26:47 +03:00
}
PCI: Add pci_bus_addr_t
David Ahern reported that d63e2e1f3df9 ("sparc/PCI: Clip bridge windows
to fit in upstream windows") fails to boot on sparc/T5-8:
pci 0000:06:00.0: reg 0x184: can't handle BAR above 4GB (bus address 0x110204000)
The problem is that sparc64 assumed that dma_addr_t only needed to hold DMA
addresses, i.e., bus addresses returned via the DMA API (dma_map_single(),
etc.), while the PCI core assumed dma_addr_t could hold *any* bus address,
including raw BAR values. On sparc64, all DMA addresses fit in 32 bits, so
dma_addr_t is a 32-bit type. However, BAR values can be 64 bits wide, so
they don't fit in a dma_addr_t. d63e2e1f3df9 added new checking that
tripped over this mismatch.
Add pci_bus_addr_t, which is wide enough to hold any PCI bus address,
including both raw BAR values and DMA addresses. This will be 64 bits
on 64-bit platforms and on platforms with a 64-bit dma_addr_t. Then
dma_addr_t only needs to be wide enough to hold addresses from the DMA API.
[bhelgaas: changelog, bugzilla, Kconfig to ensure pci_bus_addr_t is at
least as wide as dma_addr_t, documentation]
Fixes: d63e2e1f3df9 ("sparc/PCI: Clip bridge windows to fit in upstream windows")
Fixes: 23b13bc76f35 ("PCI: Fail safely if we can't handle BARs larger than 4GB")
Link: http://lkml.kernel.org/r/CAE9FiQU1gJY1LYrxs+ma5LCTEEe4xmtjRG0aXJ9K_Tsu+m9Wuw@mail.gmail.com
Link: http://lkml.kernel.org/r/1427857069-6789-1-git-send-email-yinghai@kernel.org
Link: https://bugzilla.kernel.org/show_bug.cgi?id=96231
Reported-by: David Ahern <david.ahern@oracle.com>
Tested-by: David Ahern <david.ahern@oracle.com>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: David S. Miller <davem@davemloft.net>
CC: stable@vger.kernel.org # v3.19+
2015-05-28 03:23:51 +03:00
if ( ( sizeof ( pci_bus_addr_t ) < 8 ) & & l ) {
2014-04-30 04:37:47 +04:00
/* Above 32-bit boundary; try to reallocate */
2014-02-26 22:26:00 +04:00
res - > flags | = IORESOURCE_UNSET ;
2014-04-30 04:42:49 +04:00
res - > start = 0 ;
res - > end = sz64 ;
2014-10-30 20:54:43 +03:00
dev_info ( & dev - > dev , " reg 0x%x: can't handle BAR above 4GB (bus address %#010llx) \n " ,
pos , ( unsigned long long ) l64 ) ;
2014-04-30 04:42:49 +04:00
goto out ;
2008-07-28 21:38:59 +04:00
}
}
2014-10-30 20:54:43 +03:00
region . start = l64 ;
region . end = l64 + sz64 ;
PCI: Convert pcibios_resource_to_bus() to take a pci_bus, not a pci_dev
These interfaces:
pcibios_resource_to_bus(struct pci_dev *dev, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_dev *dev, *resource, *bus_region)
took a pci_dev, but they really depend only on the pci_bus. And we want to
use them in resource allocation paths where we have the bus but not a
device, so this patch converts them to take the pci_bus instead of the
pci_dev:
pcibios_resource_to_bus(struct pci_bus *bus, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_bus *bus, *resource, *bus_region)
In fact, with standard PCI-PCI bridges, they only depend on the host
bridge, because that's the only place address translation occurs, but
we aren't going that far yet.
[bhelgaas: changelog]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-12-10 10:54:40 +04:00
pcibios_bus_to_resource ( dev - > bus , res , & region ) ;
pcibios_resource_to_bus ( dev - > bus , & inverted_region , res ) ;
2013-05-25 15:36:27 +04:00
/*
* If " A " is a BAR value ( a bus address ) , " bus_to_resource(A) " is
* the corresponding resource address ( the physical address used by
* the CPU . Converting that resource address back to a bus address
* should yield the original BAR value :
*
* resource_to_bus ( bus_to_resource ( A ) ) = = A
*
* If it doesn ' t , CPU accesses to " bus_to_resource(A) " will not
* be claimed by the device .
*/
if ( inverted_region . start ! = region . start ) {
res - > flags | = IORESOURCE_UNSET ;
res - > start = 0 ;
2014-04-15 01:26:50 +04:00
res - > end = region . end - region . start ;
2014-10-30 20:54:43 +03:00
dev_info ( & dev - > dev , " reg 0x%x: initial BAR value %#010llx invalid \n " ,
pos , ( unsigned long long ) region . start ) ;
2013-05-25 15:36:27 +04:00
}
2013-05-25 15:36:26 +04:00
2012-08-23 20:53:08 +04:00
goto out ;
fail :
res - > flags = 0 ;
out :
2014-04-30 04:37:47 +04:00
if ( res - > flags )
2013-05-25 15:36:25 +04:00
dev_printk ( KERN_DEBUG , & dev - > dev , " reg 0x%x: %pR \n " , pos , res ) ;
2012-08-23 20:53:08 +04:00
2011-06-14 23:04:35 +04:00
return ( res - > flags & IORESOURCE_MEM_64 ) ? 1 : 0 ;
2006-11-30 00:53:10 +03:00
}
2005-04-17 02:20:36 +04:00
static void pci_read_bases ( struct pci_dev * dev , unsigned int howmany , int rom )
{
2008-07-28 21:38:59 +04:00
unsigned int pos , reg ;
2006-11-30 00:53:10 +03:00
2008-07-28 21:38:59 +04:00
for ( pos = 0 ; pos < howmany ; pos + + ) {
struct resource * res = & dev - > resource [ pos ] ;
2005-04-17 02:20:36 +04:00
reg = PCI_BASE_ADDRESS_0 + ( pos < < 2 ) ;
2008-07-28 21:38:59 +04:00
pos + = __pci_read_base ( dev , pci_bar_unknown , res , reg ) ;
2005-04-17 02:20:36 +04:00
}
2008-07-28 21:38:59 +04:00
2005-04-17 02:20:36 +04:00
if ( rom ) {
2008-07-28 21:38:59 +04:00
struct resource * res = & dev - > resource [ PCI_ROM_RESOURCE ] ;
2005-04-17 02:20:36 +04:00
dev - > rom_base_reg = rom ;
2008-07-28 21:38:59 +04:00
res - > flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
IORESOURCE_SIZEALIGN ;
__pci_read_base ( dev , pci_bar_mem32 , res , rom ) ;
2005-04-17 02:20:36 +04:00
}
}
2012-11-22 00:35:00 +04:00
static void pci_read_bridge_io ( struct pci_bus * child )
2005-04-17 02:20:36 +04:00
{
struct pci_dev * dev = child - > self ;
u8 io_base_lo , io_limit_lo ;
2012-07-09 23:38:57 +04:00
unsigned long io_mask , io_granularity , base , limit ;
2012-02-24 07:19:00 +04:00
struct pci_bus_region region ;
2012-07-09 23:38:57 +04:00
struct resource * res ;
io_mask = PCI_IO_RANGE_MASK ;
io_granularity = 0x1000 ;
if ( dev - > io_window_1k ) {
/* Support 1K I/O space granularity */
io_mask = PCI_IO_1K_RANGE_MASK ;
io_granularity = 0x400 ;
}
2005-04-17 02:20:36 +04:00
res = child - > resource [ 0 ] ;
pci_read_config_byte ( dev , PCI_IO_BASE , & io_base_lo ) ;
pci_read_config_byte ( dev , PCI_IO_LIMIT , & io_limit_lo ) ;
2012-07-09 23:38:57 +04:00
base = ( io_base_lo & io_mask ) < < 8 ;
limit = ( io_limit_lo & io_mask ) < < 8 ;
2005-04-17 02:20:36 +04:00
if ( ( io_base_lo & PCI_IO_RANGE_TYPE_MASK ) = = PCI_IO_RANGE_TYPE_32 ) {
u16 io_base_hi , io_limit_hi ;
2012-06-19 17:45:44 +04:00
2005-04-17 02:20:36 +04:00
pci_read_config_word ( dev , PCI_IO_BASE_UPPER16 , & io_base_hi ) ;
pci_read_config_word ( dev , PCI_IO_LIMIT_UPPER16 , & io_limit_hi ) ;
2012-06-19 17:45:44 +04:00
base | = ( ( unsigned long ) io_base_hi < < 16 ) ;
limit | = ( ( unsigned long ) io_limit_hi < < 16 ) ;
2005-04-17 02:20:36 +04:00
}
2012-07-09 23:38:41 +04:00
if ( base < = limit ) {
2005-04-17 02:20:36 +04:00
res - > flags = ( io_base_lo & PCI_IO_RANGE_TYPE_MASK ) | IORESOURCE_IO ;
2012-02-24 07:19:00 +04:00
region . start = base ;
2012-07-09 23:38:57 +04:00
region . end = limit + io_granularity - 1 ;
PCI: Convert pcibios_resource_to_bus() to take a pci_bus, not a pci_dev
These interfaces:
pcibios_resource_to_bus(struct pci_dev *dev, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_dev *dev, *resource, *bus_region)
took a pci_dev, but they really depend only on the pci_bus. And we want to
use them in resource allocation paths where we have the bus but not a
device, so this patch converts them to take the pci_bus instead of the
pci_dev:
pcibios_resource_to_bus(struct pci_bus *bus, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_bus *bus, *resource, *bus_region)
In fact, with standard PCI-PCI bridges, they only depend on the host
bridge, because that's the only place address translation occurs, but
we aren't going that far yet.
[bhelgaas: changelog]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-12-10 10:54:40 +04:00
pcibios_bus_to_resource ( dev - > bus , res , & region ) ;
2009-10-27 22:26:47 +03:00
dev_printk ( KERN_DEBUG , & dev - > dev , " bridge window %pR \n " , res ) ;
2005-04-17 02:20:36 +04:00
}
2010-02-23 20:24:21 +03:00
}
2012-11-22 00:35:00 +04:00
static void pci_read_bridge_mmio ( struct pci_bus * child )
2010-02-23 20:24:21 +03:00
{
struct pci_dev * dev = child - > self ;
u16 mem_base_lo , mem_limit_lo ;
unsigned long base , limit ;
2012-02-24 07:19:00 +04:00
struct pci_bus_region region ;
2010-02-23 20:24:21 +03:00
struct resource * res ;
2005-04-17 02:20:36 +04:00
res = child - > resource [ 1 ] ;
pci_read_config_word ( dev , PCI_MEMORY_BASE , & mem_base_lo ) ;
pci_read_config_word ( dev , PCI_MEMORY_LIMIT , & mem_limit_lo ) ;
2012-06-19 17:45:44 +04:00
base = ( ( unsigned long ) mem_base_lo & PCI_MEMORY_RANGE_MASK ) < < 16 ;
limit = ( ( unsigned long ) mem_limit_lo & PCI_MEMORY_RANGE_MASK ) < < 16 ;
2012-07-09 23:38:41 +04:00
if ( base < = limit ) {
2005-04-17 02:20:36 +04:00
res - > flags = ( mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK ) | IORESOURCE_MEM ;
2012-02-24 07:19:00 +04:00
region . start = base ;
region . end = limit + 0xfffff ;
PCI: Convert pcibios_resource_to_bus() to take a pci_bus, not a pci_dev
These interfaces:
pcibios_resource_to_bus(struct pci_dev *dev, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_dev *dev, *resource, *bus_region)
took a pci_dev, but they really depend only on the pci_bus. And we want to
use them in resource allocation paths where we have the bus but not a
device, so this patch converts them to take the pci_bus instead of the
pci_dev:
pcibios_resource_to_bus(struct pci_bus *bus, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_bus *bus, *resource, *bus_region)
In fact, with standard PCI-PCI bridges, they only depend on the host
bridge, because that's the only place address translation occurs, but
we aren't going that far yet.
[bhelgaas: changelog]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-12-10 10:54:40 +04:00
pcibios_bus_to_resource ( dev - > bus , res , & region ) ;
2009-10-27 22:26:47 +03:00
dev_printk ( KERN_DEBUG , & dev - > dev , " bridge window %pR \n " , res ) ;
2005-04-17 02:20:36 +04:00
}
2010-02-23 20:24:21 +03:00
}
2012-11-22 00:35:00 +04:00
static void pci_read_bridge_mmio_pref ( struct pci_bus * child )
2010-02-23 20:24:21 +03:00
{
struct pci_dev * dev = child - > self ;
u16 mem_base_lo , mem_limit_lo ;
2014-11-20 00:30:32 +03:00
u64 base64 , limit64 ;
PCI: Add pci_bus_addr_t
David Ahern reported that d63e2e1f3df9 ("sparc/PCI: Clip bridge windows
to fit in upstream windows") fails to boot on sparc/T5-8:
pci 0000:06:00.0: reg 0x184: can't handle BAR above 4GB (bus address 0x110204000)
The problem is that sparc64 assumed that dma_addr_t only needed to hold DMA
addresses, i.e., bus addresses returned via the DMA API (dma_map_single(),
etc.), while the PCI core assumed dma_addr_t could hold *any* bus address,
including raw BAR values. On sparc64, all DMA addresses fit in 32 bits, so
dma_addr_t is a 32-bit type. However, BAR values can be 64 bits wide, so
they don't fit in a dma_addr_t. d63e2e1f3df9 added new checking that
tripped over this mismatch.
Add pci_bus_addr_t, which is wide enough to hold any PCI bus address,
including both raw BAR values and DMA addresses. This will be 64 bits
on 64-bit platforms and on platforms with a 64-bit dma_addr_t. Then
dma_addr_t only needs to be wide enough to hold addresses from the DMA API.
[bhelgaas: changelog, bugzilla, Kconfig to ensure pci_bus_addr_t is at
least as wide as dma_addr_t, documentation]
Fixes: d63e2e1f3df9 ("sparc/PCI: Clip bridge windows to fit in upstream windows")
Fixes: 23b13bc76f35 ("PCI: Fail safely if we can't handle BARs larger than 4GB")
Link: http://lkml.kernel.org/r/CAE9FiQU1gJY1LYrxs+ma5LCTEEe4xmtjRG0aXJ9K_Tsu+m9Wuw@mail.gmail.com
Link: http://lkml.kernel.org/r/1427857069-6789-1-git-send-email-yinghai@kernel.org
Link: https://bugzilla.kernel.org/show_bug.cgi?id=96231
Reported-by: David Ahern <david.ahern@oracle.com>
Tested-by: David Ahern <david.ahern@oracle.com>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: David S. Miller <davem@davemloft.net>
CC: stable@vger.kernel.org # v3.19+
2015-05-28 03:23:51 +03:00
pci_bus_addr_t base , limit ;
2012-02-24 07:19:00 +04:00
struct pci_bus_region region ;
2010-02-23 20:24:21 +03:00
struct resource * res ;
2005-04-17 02:20:36 +04:00
res = child - > resource [ 2 ] ;
pci_read_config_word ( dev , PCI_PREF_MEMORY_BASE , & mem_base_lo ) ;
pci_read_config_word ( dev , PCI_PREF_MEMORY_LIMIT , & mem_limit_lo ) ;
2014-11-20 00:30:32 +03:00
base64 = ( mem_base_lo & PCI_PREF_RANGE_MASK ) < < 16 ;
limit64 = ( mem_limit_lo & PCI_PREF_RANGE_MASK ) < < 16 ;
2005-04-17 02:20:36 +04:00
if ( ( mem_base_lo & PCI_PREF_RANGE_TYPE_MASK ) = = PCI_PREF_RANGE_TYPE_64 ) {
u32 mem_base_hi , mem_limit_hi ;
2012-06-19 17:45:44 +04:00
2005-04-17 02:20:36 +04:00
pci_read_config_dword ( dev , PCI_PREF_BASE_UPPER32 , & mem_base_hi ) ;
pci_read_config_dword ( dev , PCI_PREF_LIMIT_UPPER32 , & mem_limit_hi ) ;
/*
* Some bridges set the base > limit by default , and some
* ( broken ) BIOSes do not initialize them . If we find
* this , just assume they are not being used .
*/
if ( mem_base_hi < = mem_limit_hi ) {
2014-11-20 00:30:32 +03:00
base64 | = ( u64 ) mem_base_hi < < 32 ;
limit64 | = ( u64 ) mem_limit_hi < < 32 ;
2005-04-17 02:20:36 +04:00
}
}
2014-11-20 00:30:32 +03:00
PCI: Add pci_bus_addr_t
David Ahern reported that d63e2e1f3df9 ("sparc/PCI: Clip bridge windows
to fit in upstream windows") fails to boot on sparc/T5-8:
pci 0000:06:00.0: reg 0x184: can't handle BAR above 4GB (bus address 0x110204000)
The problem is that sparc64 assumed that dma_addr_t only needed to hold DMA
addresses, i.e., bus addresses returned via the DMA API (dma_map_single(),
etc.), while the PCI core assumed dma_addr_t could hold *any* bus address,
including raw BAR values. On sparc64, all DMA addresses fit in 32 bits, so
dma_addr_t is a 32-bit type. However, BAR values can be 64 bits wide, so
they don't fit in a dma_addr_t. d63e2e1f3df9 added new checking that
tripped over this mismatch.
Add pci_bus_addr_t, which is wide enough to hold any PCI bus address,
including both raw BAR values and DMA addresses. This will be 64 bits
on 64-bit platforms and on platforms with a 64-bit dma_addr_t. Then
dma_addr_t only needs to be wide enough to hold addresses from the DMA API.
[bhelgaas: changelog, bugzilla, Kconfig to ensure pci_bus_addr_t is at
least as wide as dma_addr_t, documentation]
Fixes: d63e2e1f3df9 ("sparc/PCI: Clip bridge windows to fit in upstream windows")
Fixes: 23b13bc76f35 ("PCI: Fail safely if we can't handle BARs larger than 4GB")
Link: http://lkml.kernel.org/r/CAE9FiQU1gJY1LYrxs+ma5LCTEEe4xmtjRG0aXJ9K_Tsu+m9Wuw@mail.gmail.com
Link: http://lkml.kernel.org/r/1427857069-6789-1-git-send-email-yinghai@kernel.org
Link: https://bugzilla.kernel.org/show_bug.cgi?id=96231
Reported-by: David Ahern <david.ahern@oracle.com>
Tested-by: David Ahern <david.ahern@oracle.com>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: David S. Miller <davem@davemloft.net>
CC: stable@vger.kernel.org # v3.19+
2015-05-28 03:23:51 +03:00
base = ( pci_bus_addr_t ) base64 ;
limit = ( pci_bus_addr_t ) limit64 ;
2014-11-20 00:30:32 +03:00
if ( base ! = base64 ) {
dev_err ( & dev - > dev , " can't handle bridge window above 4GB (bus address %#010llx) \n " ,
( unsigned long long ) base64 ) ;
return ;
}
2012-07-09 23:38:41 +04:00
if ( base < = limit ) {
2009-04-24 07:48:32 +04:00
res - > flags = ( mem_base_lo & PCI_PREF_RANGE_TYPE_MASK ) |
IORESOURCE_MEM | IORESOURCE_PREFETCH ;
if ( res - > flags & PCI_PREF_RANGE_TYPE_64 )
res - > flags | = IORESOURCE_MEM_64 ;
2012-02-24 07:19:00 +04:00
region . start = base ;
region . end = limit + 0xfffff ;
PCI: Convert pcibios_resource_to_bus() to take a pci_bus, not a pci_dev
These interfaces:
pcibios_resource_to_bus(struct pci_dev *dev, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_dev *dev, *resource, *bus_region)
took a pci_dev, but they really depend only on the pci_bus. And we want to
use them in resource allocation paths where we have the bus but not a
device, so this patch converts them to take the pci_bus instead of the
pci_dev:
pcibios_resource_to_bus(struct pci_bus *bus, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_bus *bus, *resource, *bus_region)
In fact, with standard PCI-PCI bridges, they only depend on the host
bridge, because that's the only place address translation occurs, but
we aren't going that far yet.
[bhelgaas: changelog]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-12-10 10:54:40 +04:00
pcibios_bus_to_resource ( dev - > bus , res , & region ) ;
2009-10-27 22:26:47 +03:00
dev_printk ( KERN_DEBUG , & dev - > dev , " bridge window %pR \n " , res ) ;
2005-04-17 02:20:36 +04:00
}
}
2012-11-22 00:35:00 +04:00
void pci_read_bridge_bases ( struct pci_bus * child )
2010-02-23 20:24:21 +03:00
{
struct pci_dev * dev = child - > self ;
2010-02-23 20:24:36 +03:00
struct resource * res ;
2010-02-23 20:24:21 +03:00
int i ;
if ( pci_is_root_bus ( child ) ) /* It's a host bus, nothing to read */
return ;
2012-05-18 05:51:11 +04:00
dev_info ( & dev - > dev , " PCI bridge to %pR%s \n " ,
& child - > busn_res ,
2010-02-23 20:24:21 +03:00
dev - > transparent ? " (subtractive decode) " : " " ) ;
2010-02-23 20:24:36 +03:00
pci_bus_remove_resources ( child ) ;
for ( i = 0 ; i < PCI_BRIDGE_RESOURCE_NUM ; i + + )
child - > resource [ i ] = & dev - > resource [ PCI_BRIDGE_RESOURCES + i ] ;
2010-02-23 20:24:21 +03:00
pci_read_bridge_io ( child ) ;
pci_read_bridge_mmio ( child ) ;
pci_read_bridge_mmio_pref ( child ) ;
2010-02-23 20:24:26 +03:00
if ( dev - > transparent ) {
2010-02-23 20:24:36 +03:00
pci_bus_for_each_resource ( child - > parent , res , i ) {
2014-04-15 02:10:54 +04:00
if ( res & & res - > flags ) {
2010-02-23 20:24:36 +03:00
pci_bus_add_resource ( child , res ,
PCI_SUBTRACTIVE_DECODE ) ;
2010-02-23 20:24:26 +03:00
dev_printk ( KERN_DEBUG , & dev - > dev ,
" bridge window %pR (subtractive decode) \n " ,
2010-02-23 20:24:36 +03:00
res ) ;
}
2010-02-23 20:24:26 +03:00
}
}
2010-02-23 20:24:21 +03:00
}
2014-09-29 18:29:26 +04:00
static struct pci_bus * pci_alloc_bus ( struct pci_bus * parent )
2005-04-17 02:20:36 +04:00
{
struct pci_bus * b ;
2006-02-28 17:34:49 +03:00
b = kzalloc ( sizeof ( * b ) , GFP_KERNEL ) ;
2013-06-06 00:22:11 +04:00
if ( ! b )
return NULL ;
INIT_LIST_HEAD ( & b - > node ) ;
INIT_LIST_HEAD ( & b - > children ) ;
INIT_LIST_HEAD ( & b - > devices ) ;
INIT_LIST_HEAD ( & b - > slots ) ;
INIT_LIST_HEAD ( & b - > resources ) ;
b - > max_bus_speed = PCI_SPEED_UNKNOWN ;
b - > cur_bus_speed = PCI_SPEED_UNKNOWN ;
2014-09-29 18:29:26 +04:00
# ifdef CONFIG_PCI_DOMAINS_GENERIC
if ( parent )
b - > domain_nr = parent - > domain_nr ;
# endif
2005-04-17 02:20:36 +04:00
return b ;
}
2013-06-08 02:16:51 +04:00
static void pci_release_host_bridge_dev ( struct device * dev )
{
struct pci_host_bridge * bridge = to_pci_host_bridge ( dev ) ;
if ( bridge - > release_fn )
bridge - > release_fn ( bridge ) ;
pci_free_resource_list ( & bridge - > windows ) ;
kfree ( bridge ) ;
}
2012-04-03 05:31:53 +04:00
static struct pci_host_bridge * pci_alloc_host_bridge ( struct pci_bus * b )
{
struct pci_host_bridge * bridge ;
bridge = kzalloc ( sizeof ( * bridge ) , GFP_KERNEL ) ;
2013-06-06 00:22:11 +04:00
if ( ! bridge )
return NULL ;
2012-04-03 05:31:53 +04:00
2013-06-06 00:22:11 +04:00
INIT_LIST_HEAD ( & bridge - > windows ) ;
bridge - > bus = b ;
2012-04-03 05:31:53 +04:00
return bridge ;
}
2014-01-11 04:14:48 +04:00
static const unsigned char pcix_bus_speed [ ] = {
2009-12-13 16:11:33 +03:00
PCI_SPEED_UNKNOWN , /* 0 */
PCI_SPEED_66MHz_PCIX , /* 1 */
PCI_SPEED_100MHz_PCIX , /* 2 */
PCI_SPEED_133MHz_PCIX , /* 3 */
PCI_SPEED_UNKNOWN , /* 4 */
PCI_SPEED_66MHz_PCIX_ECC , /* 5 */
PCI_SPEED_100MHz_PCIX_ECC , /* 6 */
PCI_SPEED_133MHz_PCIX_ECC , /* 7 */
PCI_SPEED_UNKNOWN , /* 8 */
PCI_SPEED_66MHz_PCIX_266 , /* 9 */
PCI_SPEED_100MHz_PCIX_266 , /* A */
PCI_SPEED_133MHz_PCIX_266 , /* B */
PCI_SPEED_UNKNOWN , /* C */
PCI_SPEED_66MHz_PCIX_533 , /* D */
PCI_SPEED_100MHz_PCIX_533 , /* E */
PCI_SPEED_133MHz_PCIX_533 /* F */
} ;
2013-07-31 10:53:16 +04:00
const unsigned char pcie_link_speed [ ] = {
2009-12-13 16:11:32 +03:00
PCI_SPEED_UNKNOWN , /* 0 */
PCIE_SPEED_2_5GT , /* 1 */
PCIE_SPEED_5_0GT , /* 2 */
2009-12-13 16:11:35 +03:00
PCIE_SPEED_8_0GT , /* 3 */
2009-12-13 16:11:32 +03:00
PCI_SPEED_UNKNOWN , /* 4 */
PCI_SPEED_UNKNOWN , /* 5 */
PCI_SPEED_UNKNOWN , /* 6 */
PCI_SPEED_UNKNOWN , /* 7 */
PCI_SPEED_UNKNOWN , /* 8 */
PCI_SPEED_UNKNOWN , /* 9 */
PCI_SPEED_UNKNOWN , /* A */
PCI_SPEED_UNKNOWN , /* B */
PCI_SPEED_UNKNOWN , /* C */
PCI_SPEED_UNKNOWN , /* D */
PCI_SPEED_UNKNOWN , /* E */
PCI_SPEED_UNKNOWN /* F */
} ;
void pcie_update_link_speed ( struct pci_bus * bus , u16 linksta )
{
2012-12-06 00:51:18 +04:00
bus - > cur_bus_speed = pcie_link_speed [ linksta & PCI_EXP_LNKSTA_CLS ] ;
2009-12-13 16:11:32 +03:00
}
EXPORT_SYMBOL_GPL ( pcie_update_link_speed ) ;
2009-12-13 16:11:34 +03:00
static unsigned char agp_speeds [ ] = {
AGP_UNKNOWN ,
AGP_1X ,
AGP_2X ,
AGP_4X ,
AGP_8X
} ;
static enum pci_bus_speed agp_speed ( int agp3 , int agpstat )
{
int index = 0 ;
if ( agpstat & 4 )
index = 3 ;
else if ( agpstat & 2 )
index = 2 ;
else if ( agpstat & 1 )
index = 1 ;
else
goto out ;
2013-11-14 22:28:18 +04:00
2009-12-13 16:11:34 +03:00
if ( agp3 ) {
index + = 2 ;
if ( index = = 5 )
index = 0 ;
}
out :
return agp_speeds [ index ] ;
}
2009-12-13 16:11:33 +03:00
static void pci_set_bus_speed ( struct pci_bus * bus )
{
struct pci_dev * bridge = bus - > self ;
int pos ;
2009-12-13 16:11:34 +03:00
pos = pci_find_capability ( bridge , PCI_CAP_ID_AGP ) ;
if ( ! pos )
pos = pci_find_capability ( bridge , PCI_CAP_ID_AGP3 ) ;
if ( pos ) {
u32 agpstat , agpcmd ;
pci_read_config_dword ( bridge , pos + PCI_AGP_STATUS , & agpstat ) ;
bus - > max_bus_speed = agp_speed ( agpstat & 8 , agpstat & 7 ) ;
pci_read_config_dword ( bridge , pos + PCI_AGP_COMMAND , & agpcmd ) ;
bus - > cur_bus_speed = agp_speed ( agpstat & 8 , agpcmd & 7 ) ;
}
2009-12-13 16:11:33 +03:00
pos = pci_find_capability ( bridge , PCI_CAP_ID_PCIX ) ;
if ( pos ) {
u16 status ;
enum pci_bus_speed max ;
2012-12-06 00:51:17 +04:00
pci_read_config_word ( bridge , pos + PCI_X_BRIDGE_SSTATUS ,
& status ) ;
if ( status & PCI_X_SSTATUS_533MHZ ) {
2009-12-13 16:11:33 +03:00
max = PCI_SPEED_133MHz_PCIX_533 ;
2012-12-06 00:51:17 +04:00
} else if ( status & PCI_X_SSTATUS_266MHZ ) {
2009-12-13 16:11:33 +03:00
max = PCI_SPEED_133MHz_PCIX_266 ;
2012-12-06 00:51:17 +04:00
} else if ( status & PCI_X_SSTATUS_133MHZ ) {
2014-04-19 04:13:49 +04:00
if ( ( status & PCI_X_SSTATUS_VERS ) = = PCI_X_SSTATUS_V2 )
2009-12-13 16:11:33 +03:00
max = PCI_SPEED_133MHz_PCIX_ECC ;
2014-04-19 04:13:49 +04:00
else
2009-12-13 16:11:33 +03:00
max = PCI_SPEED_133MHz_PCIX ;
} else {
max = PCI_SPEED_66MHz_PCIX ;
}
bus - > max_bus_speed = max ;
2012-12-06 00:51:17 +04:00
bus - > cur_bus_speed = pcix_bus_speed [
( status & PCI_X_SSTATUS_FREQ ) > > 6 ] ;
2009-12-13 16:11:33 +03:00
return ;
}
2013-09-05 11:55:29 +04:00
if ( pci_is_pcie ( bridge ) ) {
2009-12-13 16:11:33 +03:00
u32 linkcap ;
u16 linksta ;
2012-07-24 13:20:06 +04:00
pcie_capability_read_dword ( bridge , PCI_EXP_LNKCAP , & linkcap ) ;
2012-12-06 00:51:18 +04:00
bus - > max_bus_speed = pcie_link_speed [ linkcap & PCI_EXP_LNKCAP_SLS ] ;
2009-12-13 16:11:33 +03:00
2012-07-24 13:20:06 +04:00
pcie_capability_read_word ( bridge , PCI_EXP_LNKSTA , & linksta ) ;
2009-12-13 16:11:33 +03:00
pcie_update_link_speed ( bus , linksta ) ;
}
}
2008-04-19 00:53:55 +04:00
static struct pci_bus * pci_alloc_child_bus ( struct pci_bus * parent ,
struct pci_dev * bridge , int busnr )
2005-04-17 02:20:36 +04:00
{
struct pci_bus * child ;
int i ;
2013-01-22 01:20:52 +04:00
int ret ;
2005-04-17 02:20:36 +04:00
/*
* Allocate a new bus , and inherit stuff from the parent . .
*/
2014-09-29 18:29:26 +04:00
child = pci_alloc_bus ( parent ) ;
2005-04-17 02:20:36 +04:00
if ( ! child )
return NULL ;
child - > parent = parent ;
child - > ops = parent - > ops ;
2013-08-10 00:27:08 +04:00
child - > msi = parent - > msi ;
2005-04-17 02:20:36 +04:00
child - > sysdata = parent - > sysdata ;
2006-02-14 19:52:22 +03:00
child - > bus_flags = parent - > bus_flags ;
2005-04-17 02:20:36 +04:00
2007-05-23 06:47:54 +04:00
/* initialize some portions of the bus device, but don't register it
2013-01-22 01:20:52 +04:00
* now as the parent is not properly set up yet .
2007-05-23 06:47:54 +04:00
*/
child - > dev . class = & pcibus_class ;
2008-10-30 04:17:49 +03:00
dev_set_name ( & child - > dev , " %04x:%02x " , pci_domain_nr ( child ) , busnr ) ;
2005-04-17 02:20:36 +04:00
/*
* Set up the primary , secondary and subordinate
* bus numbers .
*/
2012-05-18 05:51:11 +04:00
child - > number = child - > busn_res . start = busnr ;
child - > primary = parent - > busn_res . start ;
child - > busn_res . end = 0xff ;
2005-04-17 02:20:36 +04:00
2013-01-22 01:20:52 +04:00
if ( ! bridge ) {
child - > dev . parent = parent - > bridge ;
goto add_dev ;
}
2008-11-21 21:41:07 +03:00
child - > self = bridge ;
child - > bridge = get_device ( & bridge - > dev ) ;
2013-01-22 01:20:52 +04:00
child - > dev . parent = child - > bridge ;
2011-04-11 05:37:07 +04:00
pci_set_bus_of_node ( child ) ;
2009-12-13 16:11:33 +03:00
pci_set_bus_speed ( child ) ;
2005-04-17 02:20:36 +04:00
/* Set up default resource pointers and names.. */
2008-11-21 21:39:32 +03:00
for ( i = 0 ; i < PCI_BRIDGE_RESOURCE_NUM ; i + + ) {
2005-04-17 02:20:36 +04:00
child - > resource [ i ] = & bridge - > resource [ PCI_BRIDGE_RESOURCES + i ] ;
child - > resource [ i ] - > name = child - > name ;
}
bridge - > subordinate = child ;
2013-01-22 01:20:52 +04:00
add_dev :
ret = device_register ( & child - > dev ) ;
WARN_ON ( ret < 0 ) ;
2013-04-12 09:44:20 +04:00
pcibios_add_bus ( child ) ;
2013-01-22 01:20:52 +04:00
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files ( child ) ;
2005-04-17 02:20:36 +04:00
return child ;
}
2014-04-19 04:13:49 +04:00
struct pci_bus * pci_add_new_bus ( struct pci_bus * parent , struct pci_dev * dev ,
int busnr )
2005-04-17 02:20:36 +04:00
{
struct pci_bus * child ;
child = pci_alloc_child_bus ( parent , dev , busnr ) ;
2005-04-28 11:25:48 +04:00
if ( child ) {
2006-06-02 08:35:43 +04:00
down_write ( & pci_bus_sem ) ;
2005-04-17 02:20:36 +04:00
list_add_tail ( & child - > node , & parent - > children ) ;
2006-06-02 08:35:43 +04:00
up_write ( & pci_bus_sem ) ;
2005-04-28 11:25:48 +04:00
}
2005-04-17 02:20:36 +04:00
return child ;
}
2014-04-26 00:32:25 +04:00
EXPORT_SYMBOL ( pci_add_new_bus ) ;
2005-04-17 02:20:36 +04:00
2014-09-03 03:26:00 +04:00
static void pci_enable_crs ( struct pci_dev * pdev )
{
u16 root_cap = 0 ;
/* Enable CRS Software Visibility if supported */
pcie_capability_read_word ( pdev , PCI_EXP_RTCAP , & root_cap ) ;
if ( root_cap & PCI_EXP_RTCAP_CRSVIS )
pcie_capability_set_word ( pdev , PCI_EXP_RTCTL ,
PCI_EXP_RTCTL_CRSSVE ) ;
}
2005-04-17 02:20:36 +04:00
/*
* If it ' s a bridge , configure it and scan the bus behind it .
* For CardBus bridges , we don ' t scan behind as the devices will
* be handled by the bridge driver itself .
*
* We need to process bridges in two passes - - first we scan those
* already configured by the BIOS and after we are done with all of
* them , we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers .
*/
2012-11-22 00:35:00 +04:00
int pci_scan_bridge ( struct pci_bus * bus , struct pci_dev * dev , int max , int pass )
2005-04-17 02:20:36 +04:00
{
struct pci_bus * child ;
int is_cardbus = ( dev - > hdr_type = = PCI_HEADER_TYPE_CARDBUS ) ;
2005-12-08 18:53:12 +03:00
u32 buses , i , j = 0 ;
2005-04-17 02:20:36 +04:00
u16 bctl ;
2010-03-17 00:52:58 +03:00
u8 primary , secondary , subordinate ;
2008-10-21 03:06:29 +04:00
int broken = 0 ;
2005-04-17 02:20:36 +04:00
pci_read_config_dword ( dev , PCI_PRIMARY_BUS , & buses ) ;
2010-03-17 00:52:58 +03:00
primary = buses & 0xFF ;
secondary = ( buses > > 8 ) & 0xFF ;
subordinate = ( buses > > 16 ) & 0xFF ;
2005-04-17 02:20:36 +04:00
2010-03-17 00:52:58 +03:00
dev_dbg ( & dev - > dev , " scanning [bus %02x-%02x] behind bridge, pass %d \n " ,
secondary , subordinate , pass ) ;
2005-04-17 02:20:36 +04:00
2012-01-30 15:25:24 +04:00
if ( ! primary & & ( primary ! = bus - > number ) & & secondary & & subordinate ) {
dev_warn ( & dev - > dev , " Primary bus is hard wired to 0 \n " ) ;
primary = bus - > number ;
}
2008-10-21 03:06:29 +04:00
/* Check if setup is sensible at all */
if ( ! pass & &
2012-09-11 04:19:33 +04:00
( primary ! = bus - > number | | secondary < = bus - > number | |
Revert "PCI: Make sure bus number resources stay within their parents bounds"
This reverts commit 1820ffdccb9b ("PCI: Make sure bus number resources stay
within their parents bounds") because it breaks some systems with LSI Logic
FC949ES Fibre Channel Adapters, apparently by exposing a defect in those
adapters.
Dirk tested a Tyan VX50 (B4985) with this device that worked like this
prior to 1820ffdccb9b:
bus: [bus 00-7f] on node 0 link 1
ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-07])
pci 0000:00:0e.0: PCI bridge to [bus 0a]
pci_bus 0000:0a: busn_res: can not insert [bus 0a] under [bus 00-07] (conflicts with (null) [bus 00-07])
pci 0000:0a:00.0: [1000:0646] type 00 class 0x0c0400 (FC adapter)
Note that the root bridge [bus 00-07] aperture is wrong; this is a BIOS
defect in the PCI0 _CRS method. But prior to 1820ffdccb9b, we didn't
enforce that aperture, and the FC adapter worked fine at 0a:00.0.
After 1820ffdccb9b, we notice that 00:0e.0's aperture is not contained in
the root bridge's aperture, so we reconfigure it so it *is* contained:
pci 0000:00:0e.0: bridge configuration invalid ([bus 0a-0a]), reconfiguring
pci 0000:00:0e.0: PCI bridge to [bus 06-07]
This effectively moves the FC device from 0a:00.0 to 07:00.0, which should
be legal. But when we enumerate bus 06, the FC device doesn't respond, so
we don't find anything. This is probably a defect in the FC device.
Possible fixes (due to Yinghai):
1) Add a quirk to fix the _CRS information based on what amd_bus.c read
from the hardware
2) Reset the FC device after we change its bus number
3) Revert 1820ffdccb9b
Fix 1 would be relatively easy, but it does sweep the LSI FC issue under
the rug. We might want to reconfigure bus numbers in the future for some
other reason, e.g., hotplug, and then we could trip over this again.
For that reason, I like fix 2, but we don't know whether it actually works,
and we don't have a patch for it yet.
This revert is fix 3, which also sweeps the LSI FC issue under the rug.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=84281
Reported-by: Dirk Gouders <dirk@gouders.net>
Tested-by: Dirk Gouders <dirk@gouders.net>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
CC: stable@vger.kernel.org # v3.15+
CC: Yinghai Lu <yinghai@kernel.org>
2014-09-19 21:08:40 +04:00
secondary > subordinate ) ) {
2012-09-11 04:19:33 +04:00
dev_info ( & dev - > dev , " bridge configuration invalid ([bus %02x-%02x]), reconfiguring \n " ,
secondary , subordinate ) ;
2008-10-21 03:06:29 +04:00
broken = 1 ;
}
2005-04-17 02:20:36 +04:00
/* Disable MasterAbortMode during probing to avoid reporting
2013-11-14 22:28:18 +04:00
of bus errors ( in some architectures ) */
2005-04-17 02:20:36 +04:00
pci_read_config_word ( dev , PCI_BRIDGE_CONTROL , & bctl ) ;
pci_write_config_word ( dev , PCI_BRIDGE_CONTROL ,
bctl & ~ PCI_BRIDGE_CTL_MASTER_ABORT ) ;
2014-09-03 03:26:00 +04:00
pci_enable_crs ( dev ) ;
2010-03-17 00:52:58 +03:00
if ( ( secondary | | subordinate ) & & ! pcibios_assign_all_busses ( ) & &
! is_cardbus & & ! broken ) {
unsigned int cmax ;
2005-04-17 02:20:36 +04:00
/*
* Bus already configured by firmware , process it in the first
* pass and just note the configuration .
*/
if ( pass )
[PATCH] PCI: Avoid leaving MASTER_ABORT disabled permanently when returning from pci_scan_bridge.
> On Mon, Feb 13, 2006 at 05:13:21PM -0800, David S. Miller wrote:
> >
> > In drivers/pci/probe.c:pci_scan_bridge(), if this is not the first
> > pass (pass != 0) we don't restore the PCI_BRIDGE_CONTROL_REGISTER and
> > thus leave PCI_BRIDGE_CTL_MASTER_ABORT off:
> >
> > int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
> > {
> > ...
> > /* Disable MasterAbortMode during probing to avoid reporting
> > of bus errors (in some architectures) */
> > pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
> > pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
> > bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
> > ...
> > if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) {
> > unsigned int cmax, busnr;
> > /*
> > * Bus already configured by firmware, process it in the first
> > * pass and just note the configuration.
> > */
> > if (pass)
> > return max;
> > ...
> > }
> >
> > pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
> > ...
> >
> > This doesn't seem intentional.
Agreed, looks like an accident. The patch [1] originally came from Kip
Walker (Broadcom back then) between 2.6.0-test3 and 2.6.0-test4. As I
recall it was supposed to fix an issue with with PCI aborts being
signalled by the PCI bridge of the Broadcom BCM1250 family of SOCs when
probing behind pci_scan_bridge. It is undeseriable to disable
PCI_BRIDGE_CTL_MASTER_ABORT in pci_{read,write)_config_* and the
behaviour wasn't considered a bug in need of a workaround, so this was
put in probe.c.
I don't have an affected system at hand, so can't really test but I
propose something like the below patch.
[1] http://www.linux-mips.org/git?p=linux.git;a=commit;h=599457e0cb702a31a3247ea6a5d9c6c99c4cf195
[PCI] Avoid leaving MASTER_ABORT disabled permanently when returning from pci_scan_bridge.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-02-14 19:23:57 +03:00
goto out ;
2005-04-17 02:20:36 +04:00
/*
2014-01-24 00:59:22 +04:00
* The bus might already exist for two reasons : Either we are
* rescanning the bus or the bus is reachable through more than
* one bridge . The second case can happen with the i450NX
* chipset .
2005-04-17 02:20:36 +04:00
*/
2010-03-17 00:52:58 +03:00
child = pci_find_bus ( pci_domain_nr ( bus ) , secondary ) ;
2009-03-20 23:56:10 +03:00
if ( ! child ) {
2010-03-17 00:52:58 +03:00
child = pci_add_new_bus ( bus , dev , secondary ) ;
2009-03-20 23:56:10 +03:00
if ( ! child )
goto out ;
2010-03-17 00:52:58 +03:00
child - > primary = primary ;
2012-05-18 05:51:13 +04:00
pci_bus_insert_busn_res ( child , secondary , subordinate ) ;
2009-03-20 23:56:10 +03:00
child - > bridge_ctl = bctl ;
2005-04-17 02:20:36 +04:00
}
cmax = pci_scan_child_bus ( child ) ;
2014-01-24 00:59:27 +04:00
if ( cmax > subordinate )
dev_warn ( & dev - > dev , " bridge has subordinate %02x but max busn %02x \n " ,
subordinate , cmax ) ;
/* subordinate should equal child->busn_res.end */
if ( subordinate > max )
max = subordinate ;
2005-04-17 02:20:36 +04:00
} else {
/*
* We need to assign a number to this bus which we always
* do in the second pass .
*/
2005-09-23 08:06:31 +04:00
if ( ! pass ) {
2014-01-24 00:59:23 +04:00
if ( pcibios_assign_all_busses ( ) | | broken | | is_cardbus )
2005-09-23 08:06:31 +04:00
/* Temporarily disable forwarding of the
configuration cycles on all bridges in
this bus segment to avoid possible
conflicts in the second pass between two
bridges programmed with overlapping
bus ranges . */
pci_write_config_dword ( dev , PCI_PRIMARY_BUS ,
buses & ~ 0xffffff ) ;
[PATCH] PCI: Avoid leaving MASTER_ABORT disabled permanently when returning from pci_scan_bridge.
> On Mon, Feb 13, 2006 at 05:13:21PM -0800, David S. Miller wrote:
> >
> > In drivers/pci/probe.c:pci_scan_bridge(), if this is not the first
> > pass (pass != 0) we don't restore the PCI_BRIDGE_CONTROL_REGISTER and
> > thus leave PCI_BRIDGE_CTL_MASTER_ABORT off:
> >
> > int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
> > {
> > ...
> > /* Disable MasterAbortMode during probing to avoid reporting
> > of bus errors (in some architectures) */
> > pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
> > pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
> > bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
> > ...
> > if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) {
> > unsigned int cmax, busnr;
> > /*
> > * Bus already configured by firmware, process it in the first
> > * pass and just note the configuration.
> > */
> > if (pass)
> > return max;
> > ...
> > }
> >
> > pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
> > ...
> >
> > This doesn't seem intentional.
Agreed, looks like an accident. The patch [1] originally came from Kip
Walker (Broadcom back then) between 2.6.0-test3 and 2.6.0-test4. As I
recall it was supposed to fix an issue with with PCI aborts being
signalled by the PCI bridge of the Broadcom BCM1250 family of SOCs when
probing behind pci_scan_bridge. It is undeseriable to disable
PCI_BRIDGE_CTL_MASTER_ABORT in pci_{read,write)_config_* and the
behaviour wasn't considered a bug in need of a workaround, so this was
put in probe.c.
I don't have an affected system at hand, so can't really test but I
propose something like the below patch.
[1] http://www.linux-mips.org/git?p=linux.git;a=commit;h=599457e0cb702a31a3247ea6a5d9c6c99c4cf195
[PCI] Avoid leaving MASTER_ABORT disabled permanently when returning from pci_scan_bridge.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-02-14 19:23:57 +03:00
goto out ;
2005-09-23 08:06:31 +04:00
}
2005-04-17 02:20:36 +04:00
/* Clear errors */
pci_write_config_word ( dev , PCI_STATUS , 0xffff ) ;
2014-09-19 20:56:06 +04:00
/* Prevent assigning a bus number that already exists.
* This can happen when a bridge is hot - plugged , so in
* this case we only re - scan this bus . */
2011-06-02 07:02:50 +04:00
child = pci_find_bus ( pci_domain_nr ( bus ) , max + 1 ) ;
if ( ! child ) {
2014-01-24 00:59:21 +04:00
child = pci_add_new_bus ( bus , dev , max + 1 ) ;
2011-06-02 07:02:50 +04:00
if ( ! child )
goto out ;
Revert "PCI: Make sure bus number resources stay within their parents bounds"
This reverts commit 1820ffdccb9b ("PCI: Make sure bus number resources stay
within their parents bounds") because it breaks some systems with LSI Logic
FC949ES Fibre Channel Adapters, apparently by exposing a defect in those
adapters.
Dirk tested a Tyan VX50 (B4985) with this device that worked like this
prior to 1820ffdccb9b:
bus: [bus 00-7f] on node 0 link 1
ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-07])
pci 0000:00:0e.0: PCI bridge to [bus 0a]
pci_bus 0000:0a: busn_res: can not insert [bus 0a] under [bus 00-07] (conflicts with (null) [bus 00-07])
pci 0000:0a:00.0: [1000:0646] type 00 class 0x0c0400 (FC adapter)
Note that the root bridge [bus 00-07] aperture is wrong; this is a BIOS
defect in the PCI0 _CRS method. But prior to 1820ffdccb9b, we didn't
enforce that aperture, and the FC adapter worked fine at 0a:00.0.
After 1820ffdccb9b, we notice that 00:0e.0's aperture is not contained in
the root bridge's aperture, so we reconfigure it so it *is* contained:
pci 0000:00:0e.0: bridge configuration invalid ([bus 0a-0a]), reconfiguring
pci 0000:00:0e.0: PCI bridge to [bus 06-07]
This effectively moves the FC device from 0a:00.0 to 07:00.0, which should
be legal. But when we enumerate bus 06, the FC device doesn't respond, so
we don't find anything. This is probably a defect in the FC device.
Possible fixes (due to Yinghai):
1) Add a quirk to fix the _CRS information based on what amd_bus.c read
from the hardware
2) Reset the FC device after we change its bus number
3) Revert 1820ffdccb9b
Fix 1 would be relatively easy, but it does sweep the LSI FC issue under
the rug. We might want to reconfigure bus numbers in the future for some
other reason, e.g., hotplug, and then we could trip over this again.
For that reason, I like fix 2, but we don't know whether it actually works,
and we don't have a patch for it yet.
This revert is fix 3, which also sweeps the LSI FC issue under the rug.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=84281
Reported-by: Dirk Gouders <dirk@gouders.net>
Tested-by: Dirk Gouders <dirk@gouders.net>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
CC: stable@vger.kernel.org # v3.15+
CC: Yinghai Lu <yinghai@kernel.org>
2014-09-19 21:08:40 +04:00
pci_bus_insert_busn_res ( child , max + 1 , 0xff ) ;
2011-06-02 07:02:50 +04:00
}
2014-01-24 00:59:21 +04:00
max + + ;
2005-04-17 02:20:36 +04:00
buses = ( buses & 0xff000000 )
| ( ( unsigned int ) ( child - > primary ) < < 0 )
2012-05-18 05:51:11 +04:00
| ( ( unsigned int ) ( child - > busn_res . start ) < < 8 )
| ( ( unsigned int ) ( child - > busn_res . end ) < < 16 ) ;
2005-04-17 02:20:36 +04:00
/*
* yenta . c forces a secondary latency timer of 176.
* Copy that behaviour here .
*/
if ( is_cardbus ) {
buses & = ~ 0xff000000 ;
buses | = CARDBUS_LATENCY_TIMER < < 24 ;
}
2011-01-24 23:14:33 +03:00
2005-04-17 02:20:36 +04:00
/*
* We need to blast all three values with a single write .
*/
pci_write_config_dword ( dev , PCI_PRIMARY_BUS , buses ) ;
if ( ! is_cardbus ) {
2007-10-09 03:24:16 +04:00
child - > bridge_ctl = bctl ;
2005-04-17 02:20:36 +04:00
max = pci_scan_child_bus ( child ) ;
} else {
/*
* For CardBus bridges , we leave 4 bus numbers
* as cards with a PCI - to - PCI bridge can be
* inserted later .
*/
2014-04-19 04:13:49 +04:00
for ( i = 0 ; i < CARDBUS_RESERVE_BUSNR ; i + + ) {
2005-12-08 18:53:12 +03:00
struct pci_bus * parent = bus ;
2005-04-28 11:25:47 +04:00
if ( pci_find_bus ( pci_domain_nr ( bus ) ,
max + i + 1 ) )
break ;
2005-12-08 18:53:12 +03:00
while ( parent - > parent ) {
if ( ( ! pcibios_assign_all_busses ( ) ) & &
2012-05-18 05:51:11 +04:00
( parent - > busn_res . end > max ) & &
( parent - > busn_res . end < = max + i ) ) {
2005-12-08 18:53:12 +03:00
j = 1 ;
}
parent = parent - > parent ;
}
if ( j ) {
/*
* Often , there are two cardbus bridges
* - - try to leave one valid bus number
* for each one .
*/
i / = 2 ;
break ;
}
}
2005-04-28 11:25:47 +04:00
max + = i ;
2005-04-17 02:20:36 +04:00
}
/*
* Set the subordinate bus number to its real value .
*/
2012-05-18 05:51:13 +04:00
pci_bus_update_busn_res_end ( child , max ) ;
2005-04-17 02:20:36 +04:00
pci_write_config_byte ( dev , PCI_SUBORDINATE_BUS , max ) ;
}
2008-02-09 01:00:52 +03:00
sprintf ( child - > name ,
( is_cardbus ? " PCI CardBus %04x:%02x " : " PCI Bus %04x:%02x " ) ,
pci_domain_nr ( bus ) , child - > number ) ;
2005-04-17 02:20:36 +04:00
PCI: lets kill the 'PCI hidden behind bridge' message
Adrian Bunk wrote:
> Alois Nešpor wrote
>> PCI: Bus #0b (-#0e) is hidden behind transparent bridge #0a (-#0b) (try 'pci=assign-busses')
>> Please report the result to linux-kernel to fix this permanently"
>>
>> dmesg:
>> "Yenta: Raising subordinate bus# of parent bus (#0a) from #0b to #0e"
>> without pci=assign-busses and nothing with pci=assign-busses.
>
> Bernhard?
Ok, lets kill the message. As Alois Nešpor also saw, that's fixed up by Yenta,
so PCI does not have to warn about it. PCI could still warn about it if
is_cardbus is 0 in that instance of pci_scan_bridge(), but so far I have
not seen a report where this would have been the case so I think we can
spare the kernel of that check (removes ~300 lines of asm) unless debugging
is done.
History: The whole check was added in the days before we had the fixup
for this in Yenta and pci=assign-busses was the only way to get CardBus
cards detected on many (not all) of the machines which give this warning.
In theory, there could be cases when this warning would be triggered and
it's not cardbus, then the warning should still apply, but I think this
should only be the case when working on a completely broken PCI setup,
but one may have already enabled the debug code in drivers/pci and the
patched check would then trigger.
I do not sign this off yet because it's completely untested so far, but
everyone is free to test it (with the #ifdef DEBUG replaced by #if 1 and
pr_debug( changed to printk(.
We may also dump the whole check (remove everything within the #ifdef from
the source) if that's perferred.
On Alois Nešpor's machine this would then (only when debugging) this message:
"PCI: Bus #0b (-#0e) is partially hidden behind transparent bridge #0a (-#0b)"
"partially" should be in the message on his machine because #0b of #0b-#0e
is reachable behind #0a-#0b, but not #0c-#0e.
But that differentiation is now moot anyway because the fixup in Yenta takes
care of it as far as I could see so far, which means that unless somebody
is debugging a totally broken PCI setup, this message is not needed anymore,
not even for debugging PCI.
Ok, here the patch with the following changes:
* Refined to say that the bus is only partially hidden when the parent
bus numbers are not totally way off (outside of) the child bus range
* remove the reference to pci=assign-busses and the plea to report it
We could add a pure source code-only comment to keep a reference to
pci=assign-busses the in case when this is triggered by someone who
is debugging the cause of this message and looking the way to solve it.
From: Bernhard Kaindl <bk@suse.de>
Cc: stable <stable@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2007-07-30 22:35:13 +04:00
/* Has only triggered on CardBus, fixup is in yenta_socket */
2005-12-08 18:53:12 +03:00
while ( bus - > parent ) {
2012-05-18 05:51:11 +04:00
if ( ( child - > busn_res . end > bus - > busn_res . end ) | |
( child - > number > bus - > busn_res . end ) | |
2005-12-08 18:53:12 +03:00
( child - > number < bus - > number ) | |
2012-05-18 05:51:11 +04:00
( child - > busn_res . end < bus - > number ) ) {
2014-04-19 04:13:50 +04:00
dev_info ( & child - > dev , " %pR %s hidden behind%s bridge %s %pR \n " ,
2012-05-18 05:51:11 +04:00
& child - > busn_res ,
( bus - > number > child - > busn_res . end & &
bus - > busn_res . end < child - > number ) ?
2007-11-20 04:48:29 +03:00
" wholly " : " partially " ,
bus - > self - > transparent ? " transparent " : " " ,
2009-11-04 20:32:57 +03:00
dev_name ( & bus - > dev ) ,
2012-05-18 05:51:11 +04:00
& bus - > busn_res ) ;
2005-12-08 18:53:12 +03:00
}
bus = bus - > parent ;
}
[PATCH] PCI: Avoid leaving MASTER_ABORT disabled permanently when returning from pci_scan_bridge.
> On Mon, Feb 13, 2006 at 05:13:21PM -0800, David S. Miller wrote:
> >
> > In drivers/pci/probe.c:pci_scan_bridge(), if this is not the first
> > pass (pass != 0) we don't restore the PCI_BRIDGE_CONTROL_REGISTER and
> > thus leave PCI_BRIDGE_CTL_MASTER_ABORT off:
> >
> > int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass)
> > {
> > ...
> > /* Disable MasterAbortMode during probing to avoid reporting
> > of bus errors (in some architectures) */
> > pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
> > pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
> > bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
> > ...
> > if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) {
> > unsigned int cmax, busnr;
> > /*
> > * Bus already configured by firmware, process it in the first
> > * pass and just note the configuration.
> > */
> > if (pass)
> > return max;
> > ...
> > }
> >
> > pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
> > ...
> >
> > This doesn't seem intentional.
Agreed, looks like an accident. The patch [1] originally came from Kip
Walker (Broadcom back then) between 2.6.0-test3 and 2.6.0-test4. As I
recall it was supposed to fix an issue with with PCI aborts being
signalled by the PCI bridge of the Broadcom BCM1250 family of SOCs when
probing behind pci_scan_bridge. It is undeseriable to disable
PCI_BRIDGE_CTL_MASTER_ABORT in pci_{read,write)_config_* and the
behaviour wasn't considered a bug in need of a workaround, so this was
put in probe.c.
I don't have an affected system at hand, so can't really test but I
propose something like the below patch.
[1] http://www.linux-mips.org/git?p=linux.git;a=commit;h=599457e0cb702a31a3247ea6a5d9c6c99c4cf195
[PCI] Avoid leaving MASTER_ABORT disabled permanently when returning from pci_scan_bridge.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-02-14 19:23:57 +03:00
out :
pci_write_config_word ( dev , PCI_BRIDGE_CONTROL , bctl ) ;
2005-04-17 02:20:36 +04:00
return max ;
}
2014-04-26 00:32:25 +04:00
EXPORT_SYMBOL ( pci_scan_bridge ) ;
2005-04-17 02:20:36 +04:00
/*
* Read interrupt line and base address registers .
* The architecture - dependent code can tweak these , of course .
*/
static void pci_read_irq ( struct pci_dev * dev )
{
unsigned char irq ;
pci_read_config_byte ( dev , PCI_INTERRUPT_PIN , & irq ) ;
2005-11-03 03:24:32 +03:00
dev - > pin = irq ;
2005-04-17 02:20:36 +04:00
if ( irq )
pci_read_config_byte ( dev , PCI_INTERRUPT_LINE , & irq ) ;
dev - > irq = irq ;
}
2010-01-26 20:10:03 +03:00
void set_pcie_port_type ( struct pci_dev * pdev )
2009-03-20 06:25:14 +03:00
{
int pos ;
u16 reg16 ;
PCI: Add dev->has_secondary_link to track downstream PCIe links
A PCIe Port is an interface to a Link. A Root Port is a PCI-PCI bridge in
a Root Complex and has a Link on its secondary (downstream) side. For
other Ports, the Link may be on either the upstream (closer to the Root
Complex) or downstream side of the Port.
The usual topology has a Root Port connected to an Upstream Port. We
previously assumed this was the only possible topology, and that a
Downstream Port's Link was always on its downstream side, like this:
+---------------------+
+------+ | Downstream |
| Root | | Upstream Port +--Link--
| Port +--Link--+ Port |
+------+ | Downstream |
| Port +--Link--
+---------------------+
But systems do exist (see URL below) where the Root Port is connected to a
Downstream Port. In this case, a Downstream Port's Link may be on either
the upstream or downstream side:
+---------------------+
+------+ | Upstream |
| Root | | Downstream Port +--Link--
| Port +--Link--+ Port |
+------+ | Downstream |
| Port +--Link--
+---------------------+
We can't use the Port type to determine which side the Link is on, so add a
bit in struct pci_dev to keep track.
A Root Port's Link is always on the Port's secondary side. A component
(Endpoint or Port) on the other end of the Link obviously has the Link on
its upstream side. If that component is a Port, it is part of a Switch or
a Bridge. A Bridge has a PCI or PCI-X bus on its secondary side, not a
Link. The internal bus of a Switch connects the Port to another Port whose
Link is on the downstream side.
[bhelgaas: changelog, comment, cache "type", use if/else]
Link: http://lkml.kernel.org/r/54EB81B2.4050904@pobox.com
Link: https://bugzilla.kernel.org/show_bug.cgi?id=94361
Suggested-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2015-05-21 10:05:02 +03:00
int type ;
struct pci_dev * parent ;
2009-03-20 06:25:14 +03:00
pos = pci_find_capability ( pdev , PCI_CAP_ID_EXP ) ;
if ( ! pos )
return ;
2009-11-05 06:05:11 +03:00
pdev - > pcie_cap = pos ;
2009-03-20 06:25:14 +03:00
pci_read_config_word ( pdev , pos + PCI_EXP_FLAGS , & reg16 ) ;
2012-07-24 13:20:02 +04:00
pdev - > pcie_flags_reg = reg16 ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
pci_read_config_word ( pdev , pos + PCI_EXP_DEVCAP , & reg16 ) ;
pdev - > pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD ;
PCI: Add dev->has_secondary_link to track downstream PCIe links
A PCIe Port is an interface to a Link. A Root Port is a PCI-PCI bridge in
a Root Complex and has a Link on its secondary (downstream) side. For
other Ports, the Link may be on either the upstream (closer to the Root
Complex) or downstream side of the Port.
The usual topology has a Root Port connected to an Upstream Port. We
previously assumed this was the only possible topology, and that a
Downstream Port's Link was always on its downstream side, like this:
+---------------------+
+------+ | Downstream |
| Root | | Upstream Port +--Link--
| Port +--Link--+ Port |
+------+ | Downstream |
| Port +--Link--
+---------------------+
But systems do exist (see URL below) where the Root Port is connected to a
Downstream Port. In this case, a Downstream Port's Link may be on either
the upstream or downstream side:
+---------------------+
+------+ | Upstream |
| Root | | Downstream Port +--Link--
| Port +--Link--+ Port |
+------+ | Downstream |
| Port +--Link--
+---------------------+
We can't use the Port type to determine which side the Link is on, so add a
bit in struct pci_dev to keep track.
A Root Port's Link is always on the Port's secondary side. A component
(Endpoint or Port) on the other end of the Link obviously has the Link on
its upstream side. If that component is a Port, it is part of a Switch or
a Bridge. A Bridge has a PCI or PCI-X bus on its secondary side, not a
Link. The internal bus of a Switch connects the Port to another Port whose
Link is on the downstream side.
[bhelgaas: changelog, comment, cache "type", use if/else]
Link: http://lkml.kernel.org/r/54EB81B2.4050904@pobox.com
Link: https://bugzilla.kernel.org/show_bug.cgi?id=94361
Suggested-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2015-05-21 10:05:02 +03:00
/*
* A Root Port is always the upstream end of a Link . No PCIe
* component has two Links . Two Links are connected by a Switch
* that has a Port on each Link and internal logic to connect the
* two Ports .
*/
type = pci_pcie_type ( pdev ) ;
if ( type = = PCI_EXP_TYPE_ROOT_PORT )
pdev - > has_secondary_link = 1 ;
else if ( type = = PCI_EXP_TYPE_UPSTREAM | |
type = = PCI_EXP_TYPE_DOWNSTREAM ) {
parent = pci_upstream_bridge ( pdev ) ;
PCI: Tolerate hierarchies with no Root Port
We should not assume any particular hardware topology. Commit d0751b98dfa3
("PCI: Add dev->has_secondary_link to track downstream PCIe links") relied
on the assumption that every PCIe hierarchy is rooted at a Root Port. But
we can't rely on any assumption about what hardware we will find; we just
have to deal with the world as it is.
On some platforms, PCIe devices (endpoints, switch upstream ports, etc.)
appear directly on the root bus, and there is no Root Port in the PCI bus
hierarchy. For example, Meelis observed these top-level devices on a
Sparc V245:
0000:02:00.0 PCI bridge to [bus 03-0d] Switch Upstream Port
0001:02:00.0 PCI bridge to [bus 03] PCIe to PCI/PCI-X Bridge
These devices *look* like they have links going upstream, but there really
are no upstream devices.
In set_pcie_port_type(), we used the parent device to figure out which side
of a switch port has a link, so if the parent device did not exist, we
dereferenced a NULL parent pointer.
Check whether the parent device exists before dereferencing it.
Meelis observed this oops on Sparc V245 and T2000. Ben Herrenschmidt says
this is also possible on IBM PowerVM guests on PowerPC.
[bhelgaas: changelog, comment]
Link: http://lkml.kernel.org/r/alpine.LRH.2.20.1508122118210.18637@math.ut.ee
Reported-by: Meelis Roos <mroos@linux.ee>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: David S. Miller <davem@davemloft.net>
2015-08-17 13:47:58 +03:00
/*
* Usually there ' s an upstream device ( Root Port or Switch
* Downstream Port ) , but we can ' t assume one exists .
*/
if ( parent & & ! parent - > has_secondary_link )
PCI: Add dev->has_secondary_link to track downstream PCIe links
A PCIe Port is an interface to a Link. A Root Port is a PCI-PCI bridge in
a Root Complex and has a Link on its secondary (downstream) side. For
other Ports, the Link may be on either the upstream (closer to the Root
Complex) or downstream side of the Port.
The usual topology has a Root Port connected to an Upstream Port. We
previously assumed this was the only possible topology, and that a
Downstream Port's Link was always on its downstream side, like this:
+---------------------+
+------+ | Downstream |
| Root | | Upstream Port +--Link--
| Port +--Link--+ Port |
+------+ | Downstream |
| Port +--Link--
+---------------------+
But systems do exist (see URL below) where the Root Port is connected to a
Downstream Port. In this case, a Downstream Port's Link may be on either
the upstream or downstream side:
+---------------------+
+------+ | Upstream |
| Root | | Downstream Port +--Link--
| Port +--Link--+ Port |
+------+ | Downstream |
| Port +--Link--
+---------------------+
We can't use the Port type to determine which side the Link is on, so add a
bit in struct pci_dev to keep track.
A Root Port's Link is always on the Port's secondary side. A component
(Endpoint or Port) on the other end of the Link obviously has the Link on
its upstream side. If that component is a Port, it is part of a Switch or
a Bridge. A Bridge has a PCI or PCI-X bus on its secondary side, not a
Link. The internal bus of a Switch connects the Port to another Port whose
Link is on the downstream side.
[bhelgaas: changelog, comment, cache "type", use if/else]
Link: http://lkml.kernel.org/r/54EB81B2.4050904@pobox.com
Link: https://bugzilla.kernel.org/show_bug.cgi?id=94361
Suggested-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2015-05-21 10:05:02 +03:00
pdev - > has_secondary_link = 1 ;
}
2009-03-20 06:25:14 +03:00
}
2010-01-26 20:10:03 +03:00
void set_pcie_hotplug_bridge ( struct pci_dev * pdev )
2009-09-10 01:09:24 +04:00
{
u32 reg32 ;
2012-07-24 13:20:06 +04:00
pcie_capability_read_dword ( pdev , PCI_EXP_SLTCAP , & reg32 ) ;
2009-09-10 01:09:24 +04:00
if ( reg32 & PCI_EXP_SLTCAP_HPC )
pdev - > is_hotplug_bridge = 1 ;
}
2014-05-06 00:20:51 +04:00
/**
* pci_ext_cfg_is_aliased - is ext config space just an alias of std config ?
* @ dev : PCI device
*
* PCI Express to PCI / PCI - X Bridge Specification , rev 1.0 , 4.1 .4 says that
* when forwarding a type1 configuration request the bridge must check that
* the extended register address field is zero . The bridge is not permitted
* to forward the transactions and must handle it as an Unsupported Request .
* Some bridges do not follow this rule and simply drop the extended register
* bits , resulting in the standard config space being aliased , every 256
* bytes across the entire configuration space . Test for this condition by
* comparing the first dword of each potential alias to the vendor / device ID .
* Known offenders :
* ASM1083 / 1085 PCIe - to - PCI Reversible Bridge ( 1 b21 : 1080 , rev 01 & 03 )
* AMD / ATI SBx00 PCI to PCI Bridge ( 1002 : 4384 , rev 40 )
*/
static bool pci_ext_cfg_is_aliased ( struct pci_dev * dev )
{
# ifdef CONFIG_PCI_QUIRKS
int pos ;
u32 header , tmp ;
pci_read_config_dword ( dev , PCI_VENDOR_ID , & header ) ;
for ( pos = PCI_CFG_SPACE_SIZE ;
pos < PCI_CFG_SPACE_EXP_SIZE ; pos + = PCI_CFG_SPACE_SIZE ) {
if ( pci_read_config_dword ( dev , pos , & tmp ) ! = PCIBIOS_SUCCESSFUL
| | header ! = tmp )
return false ;
}
return true ;
# else
return false ;
# endif
}
2014-01-11 04:14:48 +04:00
/**
* pci_cfg_space_size - get the configuration space size of the PCI device .
* @ dev : PCI device
*
* Regular PCI devices have 256 bytes , but PCI - X 2 and PCI Express devices
* have 4096 bytes . Even if the device is capable , that doesn ' t mean we can
* access it . Maybe we don ' t have a way to generate extended config space
* accesses , or the device is behind a reverse Express bridge . So we try
* reading the dword at 0x100 which must either be 0 or a valid extended
* capability header .
*/
static int pci_cfg_space_size_ext ( struct pci_dev * dev )
{
u32 status ;
int pos = PCI_CFG_SPACE_SIZE ;
if ( pci_read_config_dword ( dev , pos , & status ) ! = PCIBIOS_SUCCESSFUL )
goto fail ;
2014-05-06 00:20:51 +04:00
if ( status = = 0xffffffff | | pci_ext_cfg_is_aliased ( dev ) )
2014-01-11 04:14:48 +04:00
goto fail ;
return PCI_CFG_SPACE_EXP_SIZE ;
fail :
return PCI_CFG_SPACE_SIZE ;
}
int pci_cfg_space_size ( struct pci_dev * dev )
{
int pos ;
u32 status ;
u16 class ;
class = dev - > class > > 8 ;
if ( class = = PCI_CLASS_BRIDGE_HOST )
return pci_cfg_space_size_ext ( dev ) ;
if ( ! pci_is_pcie ( dev ) ) {
pos = pci_find_capability ( dev , PCI_CAP_ID_PCIX ) ;
if ( ! pos )
goto fail ;
pci_read_config_dword ( dev , pos + PCI_X_STATUS , & status ) ;
if ( ! ( status & ( PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ ) ) )
goto fail ;
}
return pci_cfg_space_size_ext ( dev ) ;
fail :
return PCI_CFG_SPACE_SIZE ;
}
2007-04-24 01:19:36 +04:00
# define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
2006-12-30 03:47:29 +03:00
2015-05-07 17:52:21 +03:00
static void pci_msi_setup_pci_dev ( struct pci_dev * dev )
{
/*
* Disable the MSI hardware to avoid screaming interrupts
* during boot . This is the power on reset default so
* usually this should be a noop .
*/
dev - > msi_cap = pci_find_capability ( dev , PCI_CAP_ID_MSI ) ;
if ( dev - > msi_cap )
pci_msi_set_enable ( dev , 0 ) ;
dev - > msix_cap = pci_find_capability ( dev , PCI_CAP_ID_MSIX ) ;
if ( dev - > msix_cap )
pci_msix_clear_and_set_ctrl ( dev , PCI_MSIX_FLAGS_ENABLE , 0 ) ;
}
2005-04-17 02:20:36 +04:00
/**
* pci_setup_device - fill in class and map information of a device
* @ dev : the device structure to fill
*
2013-11-14 22:28:18 +04:00
* Initialize the device structure with information about the device ' s
2005-04-17 02:20:36 +04:00
* vendor , class , memory and IO - space addresses , IRQ lines etc .
* Called at initialisation of the PCI subsystem and by CardBus services .
2009-03-20 06:25:14 +03:00
* Returns 0 on success and negative if unknown type of device ( not normal ,
* bridge or CardBus ) .
2005-04-17 02:20:36 +04:00
*/
2009-03-20 06:25:14 +03:00
int pci_setup_device ( struct pci_dev * dev )
2005-04-17 02:20:36 +04:00
{
u32 class ;
2009-03-20 06:25:14 +03:00
u8 hdr_type ;
struct pci_slot * slot ;
2009-10-06 19:45:19 +04:00
int pos = 0 ;
2012-02-24 07:19:00 +04:00
struct pci_bus_region region ;
struct resource * res ;
2009-03-20 06:25:14 +03:00
if ( pci_read_config_byte ( dev , PCI_HEADER_TYPE , & hdr_type ) )
return - EIO ;
dev - > sysdata = dev - > bus - > sysdata ;
dev - > dev . parent = dev - > bus - > bridge ;
dev - > dev . bus = & pci_bus_type ;
dev - > hdr_type = hdr_type & 0x7f ;
dev - > multifunction = ! ! ( hdr_type & 0x80 ) ;
dev - > error_state = pci_channel_io_normal ;
set_pcie_port_type ( dev ) ;
list_for_each_entry ( slot , & dev - > bus - > slots , list )
if ( PCI_SLOT ( dev - > devfn ) = = slot - > number )
dev - > slot = slot ;
/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
set this higher , assuming the system even supports it . */
dev - > dma_mask = 0xffffffff ;
2005-04-17 02:20:36 +04:00
2008-07-03 00:24:49 +04:00
dev_set_name ( & dev - > dev , " %04x:%02x:%02x.%d " , pci_domain_nr ( dev - > bus ) ,
dev - > bus - > number , PCI_SLOT ( dev - > devfn ) ,
PCI_FUNC ( dev - > devfn ) ) ;
2005-04-17 02:20:36 +04:00
pci_read_config_dword ( dev , PCI_CLASS_REVISION , & class ) ;
2007-06-09 02:46:30 +04:00
dev - > revision = class & 0xff ;
2012-02-20 02:50:12 +04:00
dev - > class = class > > 8 ; /* upper 3 bytes */
2005-04-17 02:20:36 +04:00
2012-02-20 02:50:12 +04:00
dev_printk ( KERN_DEBUG , & dev - > dev , " [%04x:%04x] type %02x class %#08x \n " ,
dev - > vendor , dev - > device , dev - > hdr_type , dev - > class ) ;
2005-04-17 02:20:36 +04:00
2009-03-21 17:05:11 +03:00
/* need to have dev->class ready */
dev - > cfg_size = pci_cfg_space_size ( dev ) ;
2005-04-17 02:20:36 +04:00
/* "Unknown power state" */
2005-08-18 02:32:19 +04:00
dev - > current_state = PCI_UNKNOWN ;
2005-04-17 02:20:36 +04:00
2015-05-07 17:52:21 +03:00
pci_msi_setup_pci_dev ( dev ) ;
2005-04-17 02:20:36 +04:00
/* Early fixups, before probing the BARs */
pci_fixup_device ( pci_fixup_early , dev ) ;
2009-05-27 20:25:05 +04:00
/* device class may be changed after fixup */
class = dev - > class > > 8 ;
2005-04-17 02:20:36 +04:00
switch ( dev - > hdr_type ) { /* header type */
case PCI_HEADER_TYPE_NORMAL : /* standard header */
if ( class = = PCI_CLASS_BRIDGE_PCI )
goto bad ;
pci_read_irq ( dev ) ;
pci_read_bases ( dev , 6 , PCI_ROM_ADDRESS ) ;
pci_read_config_word ( dev , PCI_SUBSYSTEM_VENDOR_ID , & dev - > subsystem_vendor ) ;
pci_read_config_word ( dev , PCI_SUBSYSTEM_ID , & dev - > subsystem_device ) ;
2006-10-04 03:41:26 +04:00
/*
2014-03-06 01:07:03 +04:00
* Do the ugly legacy mode stuff here rather than broken chip
* quirk code . Legacy mode ATA controllers have fixed
* addresses . These are not always echoed in BAR0 - 3 , and
* BAR0 - 3 in a few cases contain junk !
2006-10-04 03:41:26 +04:00
*/
if ( class = = PCI_CLASS_STORAGE_IDE ) {
u8 progif ;
pci_read_config_byte ( dev , PCI_CLASS_PROG , & progif ) ;
if ( ( progif & 1 ) = = 0 ) {
2012-02-24 07:19:00 +04:00
region . start = 0x1F0 ;
region . end = 0x1F7 ;
res = & dev - > resource [ 0 ] ;
res - > flags = LEGACY_IO_RESOURCE ;
PCI: Convert pcibios_resource_to_bus() to take a pci_bus, not a pci_dev
These interfaces:
pcibios_resource_to_bus(struct pci_dev *dev, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_dev *dev, *resource, *bus_region)
took a pci_dev, but they really depend only on the pci_bus. And we want to
use them in resource allocation paths where we have the bus but not a
device, so this patch converts them to take the pci_bus instead of the
pci_dev:
pcibios_resource_to_bus(struct pci_bus *bus, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_bus *bus, *resource, *bus_region)
In fact, with standard PCI-PCI bridges, they only depend on the host
bridge, because that's the only place address translation occurs, but
we aren't going that far yet.
[bhelgaas: changelog]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-12-10 10:54:40 +04:00
pcibios_bus_to_resource ( dev - > bus , res , & region ) ;
2014-03-06 01:07:03 +04:00
dev_info ( & dev - > dev , " legacy IDE quirk: reg 0x10: %pR \n " ,
res ) ;
2012-02-24 07:19:00 +04:00
region . start = 0x3F6 ;
region . end = 0x3F6 ;
res = & dev - > resource [ 1 ] ;
res - > flags = LEGACY_IO_RESOURCE ;
PCI: Convert pcibios_resource_to_bus() to take a pci_bus, not a pci_dev
These interfaces:
pcibios_resource_to_bus(struct pci_dev *dev, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_dev *dev, *resource, *bus_region)
took a pci_dev, but they really depend only on the pci_bus. And we want to
use them in resource allocation paths where we have the bus but not a
device, so this patch converts them to take the pci_bus instead of the
pci_dev:
pcibios_resource_to_bus(struct pci_bus *bus, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_bus *bus, *resource, *bus_region)
In fact, with standard PCI-PCI bridges, they only depend on the host
bridge, because that's the only place address translation occurs, but
we aren't going that far yet.
[bhelgaas: changelog]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-12-10 10:54:40 +04:00
pcibios_bus_to_resource ( dev - > bus , res , & region ) ;
2014-03-06 01:07:03 +04:00
dev_info ( & dev - > dev , " legacy IDE quirk: reg 0x14: %pR \n " ,
res ) ;
2006-10-04 03:41:26 +04:00
}
if ( ( progif & 4 ) = = 0 ) {
2012-02-24 07:19:00 +04:00
region . start = 0x170 ;
region . end = 0x177 ;
res = & dev - > resource [ 2 ] ;
res - > flags = LEGACY_IO_RESOURCE ;
PCI: Convert pcibios_resource_to_bus() to take a pci_bus, not a pci_dev
These interfaces:
pcibios_resource_to_bus(struct pci_dev *dev, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_dev *dev, *resource, *bus_region)
took a pci_dev, but they really depend only on the pci_bus. And we want to
use them in resource allocation paths where we have the bus but not a
device, so this patch converts them to take the pci_bus instead of the
pci_dev:
pcibios_resource_to_bus(struct pci_bus *bus, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_bus *bus, *resource, *bus_region)
In fact, with standard PCI-PCI bridges, they only depend on the host
bridge, because that's the only place address translation occurs, but
we aren't going that far yet.
[bhelgaas: changelog]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-12-10 10:54:40 +04:00
pcibios_bus_to_resource ( dev - > bus , res , & region ) ;
2014-03-06 01:07:03 +04:00
dev_info ( & dev - > dev , " legacy IDE quirk: reg 0x18: %pR \n " ,
res ) ;
2012-02-24 07:19:00 +04:00
region . start = 0x376 ;
region . end = 0x376 ;
res = & dev - > resource [ 3 ] ;
res - > flags = LEGACY_IO_RESOURCE ;
PCI: Convert pcibios_resource_to_bus() to take a pci_bus, not a pci_dev
These interfaces:
pcibios_resource_to_bus(struct pci_dev *dev, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_dev *dev, *resource, *bus_region)
took a pci_dev, but they really depend only on the pci_bus. And we want to
use them in resource allocation paths where we have the bus but not a
device, so this patch converts them to take the pci_bus instead of the
pci_dev:
pcibios_resource_to_bus(struct pci_bus *bus, *bus_region, *resource)
pcibios_bus_to_resource(struct pci_bus *bus, *resource, *bus_region)
In fact, with standard PCI-PCI bridges, they only depend on the host
bridge, because that's the only place address translation occurs, but
we aren't going that far yet.
[bhelgaas: changelog]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-12-10 10:54:40 +04:00
pcibios_bus_to_resource ( dev - > bus , res , & region ) ;
2014-03-06 01:07:03 +04:00
dev_info ( & dev - > dev , " legacy IDE quirk: reg 0x1c: %pR \n " ,
res ) ;
2006-10-04 03:41:26 +04:00
}
}
2005-04-17 02:20:36 +04:00
break ;
case PCI_HEADER_TYPE_BRIDGE : /* bridge header */
if ( class ! = PCI_CLASS_BRIDGE_PCI )
goto bad ;
/* The PCI-to-PCI bridge spec requires that subtractive
decoding ( i . e . transparent ) bridge must have programming
2013-11-14 22:28:18 +04:00
interface code of 0x01 . */
2005-11-03 03:55:49 +03:00
pci_read_irq ( dev ) ;
2005-04-17 02:20:36 +04:00
dev - > transparent = ( ( dev - > class & 0xff ) = = 1 ) ;
pci_read_bases ( dev , 2 , PCI_ROM_ADDRESS1 ) ;
2009-09-10 01:09:24 +04:00
set_pcie_hotplug_bridge ( dev ) ;
2009-10-06 19:45:19 +04:00
pos = pci_find_capability ( dev , PCI_CAP_ID_SSVID ) ;
if ( pos ) {
pci_read_config_word ( dev , pos + PCI_SSVID_VENDOR_ID , & dev - > subsystem_vendor ) ;
pci_read_config_word ( dev , pos + PCI_SSVID_DEVICE_ID , & dev - > subsystem_device ) ;
}
2005-04-17 02:20:36 +04:00
break ;
case PCI_HEADER_TYPE_CARDBUS : /* CardBus bridge header */
if ( class ! = PCI_CLASS_BRIDGE_CARDBUS )
goto bad ;
pci_read_irq ( dev ) ;
pci_read_bases ( dev , 1 , 0 ) ;
pci_read_config_word ( dev , PCI_CB_SUBSYSTEM_VENDOR_ID , & dev - > subsystem_vendor ) ;
pci_read_config_word ( dev , PCI_CB_SUBSYSTEM_ID , & dev - > subsystem_device ) ;
break ;
default : /* unknown header */
2014-04-19 04:13:50 +04:00
dev_err ( & dev - > dev , " unknown header type %02x, ignoring device \n " ,
dev - > hdr_type ) ;
2009-03-20 06:25:14 +03:00
return - EIO ;
2005-04-17 02:20:36 +04:00
bad :
2014-04-19 04:13:50 +04:00
dev_err ( & dev - > dev , " ignoring class %#08x (doesn't match header type %02x) \n " ,
dev - > class , dev - > hdr_type ) ;
2005-04-17 02:20:36 +04:00
dev - > class = PCI_CLASS_NOT_DEFINED ;
}
/* We found a fine healthy device, go go go... */
return 0 ;
}
2014-09-13 06:02:00 +04:00
static struct hpp_type0 pci_default_type0 = {
. revision = 1 ,
. cache_line_size = 8 ,
. latency_timer = 0x40 ,
. enable_serr = 0 ,
. enable_perr = 0 ,
} ;
static void program_hpp_type0 ( struct pci_dev * dev , struct hpp_type0 * hpp )
{
u16 pci_cmd , pci_bctl ;
2014-08-30 04:10:19 +04:00
if ( ! hpp )
2014-09-13 06:02:00 +04:00
hpp = & pci_default_type0 ;
if ( hpp - > revision > 1 ) {
dev_warn ( & dev - > dev ,
" PCI settings rev %d not supported; using defaults \n " ,
hpp - > revision ) ;
hpp = & pci_default_type0 ;
}
pci_write_config_byte ( dev , PCI_CACHE_LINE_SIZE , hpp - > cache_line_size ) ;
pci_write_config_byte ( dev , PCI_LATENCY_TIMER , hpp - > latency_timer ) ;
pci_read_config_word ( dev , PCI_COMMAND , & pci_cmd ) ;
if ( hpp - > enable_serr )
pci_cmd | = PCI_COMMAND_SERR ;
if ( hpp - > enable_perr )
pci_cmd | = PCI_COMMAND_PARITY ;
pci_write_config_word ( dev , PCI_COMMAND , pci_cmd ) ;
/* Program bridge control value */
if ( ( dev - > class > > 8 ) = = PCI_CLASS_BRIDGE_PCI ) {
pci_write_config_byte ( dev , PCI_SEC_LATENCY_TIMER ,
hpp - > latency_timer ) ;
pci_read_config_word ( dev , PCI_BRIDGE_CONTROL , & pci_bctl ) ;
if ( hpp - > enable_serr )
pci_bctl | = PCI_BRIDGE_CTL_SERR ;
if ( hpp - > enable_perr )
pci_bctl | = PCI_BRIDGE_CTL_PARITY ;
pci_write_config_word ( dev , PCI_BRIDGE_CONTROL , pci_bctl ) ;
}
}
static void program_hpp_type1 ( struct pci_dev * dev , struct hpp_type1 * hpp )
{
if ( hpp )
dev_warn ( & dev - > dev , " PCI-X settings not supported \n " ) ;
}
static void program_hpp_type2 ( struct pci_dev * dev , struct hpp_type2 * hpp )
{
int pos ;
u32 reg32 ;
if ( ! hpp )
return ;
if ( hpp - > revision > 1 ) {
dev_warn ( & dev - > dev , " PCIe settings rev %d not supported \n " ,
hpp - > revision ) ;
return ;
}
2014-09-03 23:26:29 +04:00
/*
* Don ' t allow _HPX to change MPS or MRRS settings . We manage
* those to make sure they ' re consistent with the rest of the
* platform .
*/
hpp - > pci_exp_devctl_and | = PCI_EXP_DEVCTL_PAYLOAD |
PCI_EXP_DEVCTL_READRQ ;
hpp - > pci_exp_devctl_or & = ~ ( PCI_EXP_DEVCTL_PAYLOAD |
PCI_EXP_DEVCTL_READRQ ) ;
2014-09-13 06:02:00 +04:00
/* Initialize Device Control Register */
pcie_capability_clear_and_set_word ( dev , PCI_EXP_DEVCTL ,
~ hpp - > pci_exp_devctl_and , hpp - > pci_exp_devctl_or ) ;
/* Initialize Link Control Register */
2014-11-11 23:09:46 +03:00
if ( pcie_cap_has_lnkctl ( dev ) )
2014-09-13 06:02:00 +04:00
pcie_capability_clear_and_set_word ( dev , PCI_EXP_LNKCTL ,
~ hpp - > pci_exp_lnkctl_and , hpp - > pci_exp_lnkctl_or ) ;
/* Find Advanced Error Reporting Enhanced Capability */
pos = pci_find_ext_capability ( dev , PCI_EXT_CAP_ID_ERR ) ;
if ( ! pos )
return ;
/* Initialize Uncorrectable Error Mask Register */
pci_read_config_dword ( dev , pos + PCI_ERR_UNCOR_MASK , & reg32 ) ;
reg32 = ( reg32 & hpp - > unc_err_mask_and ) | hpp - > unc_err_mask_or ;
pci_write_config_dword ( dev , pos + PCI_ERR_UNCOR_MASK , reg32 ) ;
/* Initialize Uncorrectable Error Severity Register */
pci_read_config_dword ( dev , pos + PCI_ERR_UNCOR_SEVER , & reg32 ) ;
reg32 = ( reg32 & hpp - > unc_err_sever_and ) | hpp - > unc_err_sever_or ;
pci_write_config_dword ( dev , pos + PCI_ERR_UNCOR_SEVER , reg32 ) ;
/* Initialize Correctable Error Mask Register */
pci_read_config_dword ( dev , pos + PCI_ERR_COR_MASK , & reg32 ) ;
reg32 = ( reg32 & hpp - > cor_err_mask_and ) | hpp - > cor_err_mask_or ;
pci_write_config_dword ( dev , pos + PCI_ERR_COR_MASK , reg32 ) ;
/* Initialize Advanced Error Capabilities and Control Register */
pci_read_config_dword ( dev , pos + PCI_ERR_CAP , & reg32 ) ;
reg32 = ( reg32 & hpp - > adv_err_cap_and ) | hpp - > adv_err_cap_or ;
pci_write_config_dword ( dev , pos + PCI_ERR_CAP , reg32 ) ;
/*
* FIXME : The following two registers are not supported yet .
*
* o Secondary Uncorrectable Error Severity Register
* o Secondary Uncorrectable Error Mask Register
*/
}
PCI: Add pci_configure_device() during enumeration
Some platforms can tell the OS how to configure PCI devices, e.g., how to
set cache line size, error reporting enables, etc. ACPI defines _HPP and
_HPX methods for this purpose.
This configuration was previously done by some of the hotplug drivers using
pci_configure_slot(). But not all hotplug drivers did this, and per the
spec (ACPI rev 5.0, sec 6.2.7), we can also do it for "devices not
configured by the BIOS at system boot."
Move this configuration into the PCI core by adding pci_configure_device()
and calling it from pci_device_add(), so we do this for all devices as we
enumerate them.
This is based on pci_configure_slot(), which is used by hotplug drivers.
I omitted:
- pcie_bus_configure_settings() because it configures MPS and MRRS, which
requires global knowledge of the fabric and must be done later, and
- configuration of subordinate devices; that will happen when we call
pci_device_add() for those devices.
Because pci_configure_slot() was only done by hotplug drivers, this initial
version of pci_configure_device() only configures hot-added devices,
ignoring anything added during boot.
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: Yinghai Lu <yinghai@kernel.org>
2014-08-28 00:29:47 +04:00
static void pci_configure_device ( struct pci_dev * dev )
{
struct hotplug_params hpp ;
int ret ;
memset ( & hpp , 0 , sizeof ( hpp ) ) ;
ret = pci_get_hp_params ( dev , & hpp ) ;
if ( ret )
return ;
program_hpp_type2 ( dev , hpp . t2 ) ;
program_hpp_type1 ( dev , hpp . t1 ) ;
program_hpp_type0 ( dev , hpp . t0 ) ;
}
2008-10-13 15:49:55 +04:00
static void pci_release_capabilities ( struct pci_dev * dev )
{
pci_vpd_release ( dev ) ;
2009-03-20 06:25:11 +03:00
pci_iov_release ( dev ) ;
2012-02-11 12:18:30 +04:00
pci_free_cap_save_buffers ( dev ) ;
2008-10-13 15:49:55 +04:00
}
2005-04-17 02:20:36 +04:00
/**
* pci_release_dev - free a pci device structure when all users of it are finished .
* @ dev : device that ' s been disconnected
*
* Will be called only by the device core when all users of this pci device are
* done .
*/
static void pci_release_dev ( struct device * dev )
{
2014-02-01 18:38:29 +04:00
struct pci_dev * pci_dev ;
2005-04-17 02:20:36 +04:00
2014-02-01 18:38:29 +04:00
pci_dev = to_pci_dev ( dev ) ;
2008-10-13 15:49:55 +04:00
pci_release_capabilities ( pci_dev ) ;
2011-04-11 05:37:07 +04:00
pci_release_of_node ( pci_dev ) ;
2013-06-04 21:18:14 +04:00
pcibios_release_device ( pci_dev ) ;
2013-05-25 17:48:31 +04:00
pci_bus_put ( pci_dev - > bus ) ;
PCI: Introduce new device binding path using pci_dev.driver_override
The driver_override field allows us to specify the driver for a device
rather than relying on the driver to provide a positive match of the
device. This shortcuts the existing process of looking up the vendor and
device ID, adding them to the driver new_id, binding the device, then
removing the ID, but it also provides a couple advantages.
First, the above existing process allows the driver to bind to any device
matching the new_id for the window where it's enabled. This is often not
desired, such as the case of trying to bind a single device to a meta
driver like pci-stub or vfio-pci. Using driver_override we can do this
deterministically using:
echo pci-stub > /sys/bus/pci/devices/0000:03:00.0/driver_override
echo 0000:03:00.0 > /sys/bus/pci/devices/0000:03:00.0/driver/unbind
echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
Previously we could not invoke drivers_probe after adding a device to
new_id for a driver as we get non-deterministic behavior whether the driver
we intend or the standard driver will claim the device. Now it becomes a
deterministic process, only the driver matching driver_override will probe
the device.
To return the device to the standard driver, we simply clear the
driver_override and reprobe the device:
echo > /sys/bus/pci/devices/0000:03:00.0/driver_override
echo 0000:03:00.0 > /sys/bus/pci/devices/0000:03:00.0/driver/unbind
echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
Another advantage to this approach is that we can specify a driver override
to force a specific binding or prevent any binding. For instance when an
IOMMU group is exposed to userspace through VFIO we require that all
devices within that group are owned by VFIO. However, devices can be
hot-added into an IOMMU group, in which case we want to prevent the device
from binding to any driver (override driver = "none") or perhaps have it
automatically bind to vfio-pci. With driver_override it's a simple matter
for this field to be set internally when the device is first discovered to
prevent driver matches.
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Alexander Graf <agraf@suse.de>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-05-20 18:53:21 +04:00
kfree ( pci_dev - > driver_override ) ;
2005-04-17 02:20:36 +04:00
kfree ( pci_dev ) ;
}
2013-05-25 17:48:30 +04:00
struct pci_dev * pci_alloc_dev ( struct pci_bus * bus )
2007-04-05 11:19:08 +04:00
{
struct pci_dev * dev ;
dev = kzalloc ( sizeof ( struct pci_dev ) , GFP_KERNEL ) ;
if ( ! dev )
return NULL ;
INIT_LIST_HEAD ( & dev - > bus_list ) ;
2013-04-08 07:05:07 +04:00
dev - > dev . type = & pci_dev_type ;
2013-05-25 17:48:30 +04:00
dev - > bus = pci_bus_get ( bus ) ;
2007-04-05 11:19:08 +04:00
return dev ;
}
2013-05-25 17:48:30 +04:00
EXPORT_SYMBOL ( pci_alloc_dev ) ;
2012-01-27 22:55:10 +04:00
bool pci_bus_read_dev_vendor_id ( struct pci_bus * bus , int devfn , u32 * l ,
2014-04-19 04:13:49 +04:00
int crs_timeout )
2005-04-17 02:20:36 +04:00
{
int delay = 1 ;
2012-01-27 22:55:10 +04:00
if ( pci_bus_read_config_dword ( bus , devfn , PCI_VENDOR_ID , l ) )
return false ;
2005-04-17 02:20:36 +04:00
/* some broken boards return 0 or ~0 if a slot is empty: */
2012-01-27 22:55:10 +04:00
if ( * l = = 0xffffffff | | * l = = 0x00000000 | |
* l = = 0x0000ffff | | * l = = 0xffff0000 )
return false ;
2005-04-17 02:20:36 +04:00
2014-09-09 01:19:49 +04:00
/*
* Configuration Request Retry Status . Some root ports return the
* actual device ID instead of the synthetic ID ( 0xFFFF ) required
* by the PCIe spec . Ignore the device ID and only check for
* ( vendor id = = 1 ) .
*/
while ( ( * l & 0xffff ) = = 0x0001 ) {
2012-01-27 22:55:10 +04:00
if ( ! crs_timeout )
return false ;
2005-04-17 02:20:36 +04:00
msleep ( delay ) ;
delay * = 2 ;
2012-01-27 22:55:10 +04:00
if ( pci_bus_read_config_dword ( bus , devfn , PCI_VENDOR_ID , l ) )
return false ;
2005-04-17 02:20:36 +04:00
/* Card hasn't responded in 60 seconds? Must be stuck. */
2012-01-27 22:55:10 +04:00
if ( delay > crs_timeout ) {
2014-04-19 04:13:50 +04:00
printk ( KERN_WARNING " pci %04x:%02x:%02x.%d: not responding \n " ,
pci_domain_nr ( bus ) , bus - > number , PCI_SLOT ( devfn ) ,
PCI_FUNC ( devfn ) ) ;
2012-01-27 22:55:10 +04:00
return false ;
2005-04-17 02:20:36 +04:00
}
}
2012-01-27 22:55:10 +04:00
return true ;
}
EXPORT_SYMBOL ( pci_bus_read_dev_vendor_id ) ;
/*
* Read the config data for a PCI device , sanity - check it
* and fill in the dev structure . . .
*/
static struct pci_dev * pci_scan_device ( struct pci_bus * bus , int devfn )
{
struct pci_dev * dev ;
u32 l ;
if ( ! pci_bus_read_dev_vendor_id ( bus , devfn , & l , 60 * 1000 ) )
return NULL ;
2013-05-25 17:48:31 +04:00
dev = pci_alloc_dev ( bus ) ;
2005-04-17 02:20:36 +04:00
if ( ! dev )
return NULL ;
dev - > devfn = devfn ;
dev - > vendor = l & 0xffff ;
dev - > device = ( l > > 16 ) & 0xffff ;
2008-09-02 19:40:51 +04:00
2011-04-11 05:37:07 +04:00
pci_set_of_node ( dev ) ;
2009-03-20 06:25:14 +03:00
if ( pci_setup_device ( dev ) ) {
2013-05-25 17:48:31 +04:00
pci_bus_put ( dev - > bus ) ;
2005-04-17 02:20:36 +04:00
kfree ( dev ) ;
return NULL ;
}
return dev ;
}
2008-10-13 15:49:55 +04:00
static void pci_init_capabilities ( struct pci_dev * dev )
{
/* MSI/MSI-X list */
pci_msi_init_pci_dev ( dev ) ;
2008-12-08 00:02:58 +03:00
/* Buffers for saving PCIe and PCI-X capabilities */
pci_allocate_cap_save_buffers ( dev ) ;
2008-10-13 15:49:55 +04:00
/* Power Management */
pci_pm_init ( dev ) ;
/* Vital Product Data */
pci_vpd_pci22_init ( dev ) ;
2008-10-14 10:02:53 +04:00
/* Alternative Routing-ID Forwarding */
2013-01-15 07:12:17 +04:00
pci_configure_ari ( dev ) ;
2009-03-20 06:25:11 +03:00
/* Single Root I/O Virtualization */
pci_iov_init ( dev ) ;
2009-10-07 21:27:17 +04:00
/* Enable ACS P2P upstream forwarding */
2009-12-04 23:15:21 +03:00
pci_enable_acs ( dev ) ;
2008-10-13 15:49:55 +04:00
}
2007-03-27 09:53:30 +04:00
void pci_device_add ( struct pci_dev * dev , struct pci_bus * bus )
2005-04-17 02:20:36 +04:00
{
2013-01-22 01:20:52 +04:00
int ret ;
PCI: Add pci_configure_device() during enumeration
Some platforms can tell the OS how to configure PCI devices, e.g., how to
set cache line size, error reporting enables, etc. ACPI defines _HPP and
_HPX methods for this purpose.
This configuration was previously done by some of the hotplug drivers using
pci_configure_slot(). But not all hotplug drivers did this, and per the
spec (ACPI rev 5.0, sec 6.2.7), we can also do it for "devices not
configured by the BIOS at system boot."
Move this configuration into the PCI core by adding pci_configure_device()
and calling it from pci_device_add(), so we do this for all devices as we
enumerate them.
This is based on pci_configure_slot(), which is used by hotplug drivers.
I omitted:
- pcie_bus_configure_settings() because it configures MPS and MRRS, which
requires global knowledge of the fabric and must be done later, and
- configuration of subordinate devices; that will happen when we call
pci_device_add() for those devices.
Because pci_configure_slot() was only done by hotplug drivers, this initial
version of pci_configure_device() only configures hot-added devices,
ignoring anything added during boot.
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: Yinghai Lu <yinghai@kernel.org>
2014-08-28 00:29:47 +04:00
pci_configure_device ( dev ) ;
2005-09-06 03:31:03 +04:00
device_initialize ( & dev - > dev ) ;
dev - > dev . release = pci_release_dev ;
2005-04-17 02:20:36 +04:00
2013-01-22 01:20:44 +04:00
set_dev_node ( & dev - > dev , pcibus_to_node ( bus ) ) ;
2005-09-06 03:31:03 +04:00
dev - > dev . dma_mask = & dev - > dma_mask ;
2008-02-05 09:27:55 +03:00
dev - > dev . dma_parms = & dev - > dma_parms ;
2005-09-06 03:31:03 +04:00
dev - > dev . coherent_dma_mask = 0xffffffffull ;
2015-03-03 20:52:13 +03:00
of_pci_dma_configure ( dev ) ;
2005-04-17 02:20:36 +04:00
2008-02-05 09:27:55 +03:00
pci_set_dma_max_seg_size ( dev , 65536 ) ;
2008-02-05 09:28:14 +03:00
pci_set_dma_seg_boundary ( dev , 0xffffffff ) ;
2008-02-05 09:27:55 +03:00
2005-04-17 02:20:36 +04:00
/* Fix up broken headers */
pci_fixup_device ( pci_fixup_header , dev ) ;
2012-02-16 09:40:31 +04:00
/* moved out from quirk header fixup code */
pci_reassigndev_resource_alignment ( dev ) ;
2009-09-10 01:49:59 +04:00
/* Clear the state_saved flag. */
dev - > state_saved = false ;
2008-10-13 15:49:55 +04:00
/* Initialize various capabilities */
pci_init_capabilities ( dev ) ;
2008-07-07 05:34:48 +04:00
2005-04-17 02:20:36 +04:00
/*
* Add the device to our list of discovered devices
* and the bus list for fixup functions , etc .
*/
2006-06-02 08:35:43 +04:00
down_write ( & pci_bus_sem ) ;
2005-04-17 02:20:36 +04:00
list_add_tail ( & dev - > bus_list , & bus - > devices ) ;
2006-06-02 08:35:43 +04:00
up_write ( & pci_bus_sem ) ;
2013-01-22 01:20:52 +04:00
ret = pcibios_add_device ( dev ) ;
WARN_ON ( ret < 0 ) ;
/* Notifier could use PCI capabilities */
dev - > match_driver = false ;
ret = device_add ( & dev - > dev ) ;
WARN_ON ( ret < 0 ) ;
2005-09-06 03:31:03 +04:00
}
2014-04-15 02:11:40 +04:00
struct pci_dev * pci_scan_single_device ( struct pci_bus * bus , int devfn )
2005-09-06 03:31:03 +04:00
{
struct pci_dev * dev ;
2009-03-20 23:56:00 +03:00
dev = pci_get_slot ( bus , devfn ) ;
if ( dev ) {
pci_dev_put ( dev ) ;
return dev ;
}
2005-09-06 03:31:03 +04:00
dev = pci_scan_device ( bus , devfn ) ;
if ( ! dev )
return NULL ;
pci_device_add ( dev , bus ) ;
2005-04-17 02:20:36 +04:00
return dev ;
}
2007-11-22 02:07:11 +03:00
EXPORT_SYMBOL ( pci_scan_single_device ) ;
2005-04-17 02:20:36 +04:00
2013-01-25 20:12:31 +04:00
static unsigned next_fn ( struct pci_bus * bus , struct pci_dev * dev , unsigned fn )
2009-12-13 16:10:02 +03:00
{
2013-01-25 20:12:31 +04:00
int pos ;
u16 cap = 0 ;
unsigned next_fn ;
2010-01-18 00:01:41 +03:00
2013-01-25 20:12:31 +04:00
if ( pci_ari_enabled ( bus ) ) {
if ( ! dev )
return 0 ;
pos = pci_find_ext_capability ( dev , PCI_EXT_CAP_ID_ARI ) ;
if ( ! pos )
return 0 ;
2010-01-18 00:01:41 +03:00
2013-01-25 20:12:31 +04:00
pci_read_config_word ( dev , pos + PCI_ARI_CAP , & cap ) ;
next_fn = PCI_ARI_CAP_NFN ( cap ) ;
if ( next_fn < = fn )
return 0 ; /* protect against malformed list */
2009-12-13 16:10:02 +03:00
2013-01-25 20:12:31 +04:00
return next_fn ;
}
/* dev may be NULL for non-contiguous multifunction devices */
if ( ! dev | | dev - > multifunction )
return ( fn + 1 ) % 8 ;
2009-12-13 16:10:02 +03:00
return 0 ;
}
static int only_one_child ( struct pci_bus * bus )
{
struct pci_dev * parent = bus - > self ;
2012-05-01 01:21:02 +04:00
2009-12-13 16:10:02 +03:00
if ( ! parent | | ! pci_is_pcie ( parent ) )
return 0 ;
2012-07-24 13:20:03 +04:00
if ( pci_pcie_type ( parent ) = = PCI_EXP_TYPE_ROOT_PORT )
2012-05-01 01:21:02 +04:00
return 1 ;
2015-05-21 10:05:04 +03:00
if ( parent - > has_secondary_link & &
2012-05-01 01:21:02 +04:00
! pci_has_flag ( PCI_SCAN_ALL_PCIE_DEVS ) )
2009-12-13 16:10:02 +03:00
return 1 ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
/**
* pci_scan_slot - scan a PCI slot on a bus for devices .
* @ bus : PCI bus to scan
* @ devfn : slot number to scan ( must have zero function . )
*
* Scan a PCI slot on the specified PCI bus for devices , adding
* discovered devices to the @ bus - > devices list . New devices
2008-02-15 01:56:56 +03:00
* will not have is_added set .
2009-03-20 23:56:05 +03:00
*
* Returns the number of new devices found .
2005-04-17 02:20:36 +04:00
*/
2007-03-27 09:53:30 +04:00
int pci_scan_slot ( struct pci_bus * bus , int devfn )
2005-04-17 02:20:36 +04:00
{
2009-12-13 16:10:02 +03:00
unsigned fn , nr = 0 ;
2009-03-20 23:56:05 +03:00
struct pci_dev * dev ;
2009-12-13 16:10:02 +03:00
if ( only_one_child ( bus ) & & ( devfn > 0 ) )
return 0 ; /* Already scanned the entire slot */
2005-04-17 02:20:36 +04:00
2009-03-20 23:56:05 +03:00
dev = pci_scan_single_device ( bus , devfn ) ;
2010-01-18 00:01:41 +03:00
if ( ! dev )
return 0 ;
if ( ! dev - > is_added )
2009-03-20 23:56:05 +03:00
nr + + ;
2013-01-25 20:12:31 +04:00
for ( fn = next_fn ( bus , dev , 0 ) ; fn > 0 ; fn = next_fn ( bus , dev , fn ) ) {
2009-12-13 16:10:02 +03:00
dev = pci_scan_single_device ( bus , devfn + fn ) ;
if ( dev ) {
if ( ! dev - > is_added )
nr + + ;
dev - > multifunction = 1 ;
2005-04-17 02:20:36 +04:00
}
}
PCI: add PCI Express ASPM support
PCI Express ASPM defines a protocol for PCI Express components in the D0
state to reduce Link power by placing their Links into a low power state
and instructing the other end of the Link to do likewise. This
capability allows hardware-autonomous, dynamic Link power reduction
beyond what is achievable by software-only controlled power management.
However, The device should be configured by software appropriately.
Enabling ASPM will save power, but will introduce device latency.
This patch adds ASPM support in Linux. It introduces a global policy for
ASPM, a sysfs file /sys/module/pcie_aspm/parameters/policy can control
it. The interface can be used as a boot option too. Currently we have
below setting:
-default, BIOS default setting
-powersave, highest power saving mode, enable all available ASPM
state and clock power management
-performance, highest performance, disable ASPM and clock power
management
By default, the 'default' policy is used currently.
In my test, power difference between powersave mode and performance mode
is about 1.3w in a system with 3 PCIE links.
Note: some devices might not work well with aspm, either because chipset
issue or device issue. The patch provide API (pci_disable_link_state),
driver can disable ASPM for specific device.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2008-02-25 04:46:41 +03:00
2008-07-23 06:32:31 +04:00
/* only one slot has pcie device */
if ( bus - > self & & nr )
PCI: add PCI Express ASPM support
PCI Express ASPM defines a protocol for PCI Express components in the D0
state to reduce Link power by placing their Links into a low power state
and instructing the other end of the Link to do likewise. This
capability allows hardware-autonomous, dynamic Link power reduction
beyond what is achievable by software-only controlled power management.
However, The device should be configured by software appropriately.
Enabling ASPM will save power, but will introduce device latency.
This patch adds ASPM support in Linux. It introduces a global policy for
ASPM, a sysfs file /sys/module/pcie_aspm/parameters/policy can control
it. The interface can be used as a boot option too. Currently we have
below setting:
-default, BIOS default setting
-powersave, highest power saving mode, enable all available ASPM
state and clock power management
-performance, highest performance, disable ASPM and clock power
management
By default, the 'default' policy is used currently.
In my test, power difference between powersave mode and performance mode
is about 1.3w in a system with 3 PCIE links.
Note: some devices might not work well with aspm, either because chipset
issue or device issue. The patch provide API (pci_disable_link_state),
driver can disable ASPM for specific device.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2008-02-25 04:46:41 +03:00
pcie_aspm_init_link_state ( bus - > self ) ;
2005-04-17 02:20:36 +04:00
return nr ;
}
2014-04-26 00:32:25 +04:00
EXPORT_SYMBOL ( pci_scan_slot ) ;
2005-04-17 02:20:36 +04:00
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
static int pcie_find_smpss ( struct pci_dev * dev , void * data )
{
u8 * smpss = data ;
if ( ! pci_is_pcie ( dev ) )
return 0 ;
PCI: Don't restrict MPS for slots below Root Ports
When booting with "pci=pcie_bus_safe", we previously limited the
fabric MPS to 128 when we found:
(1) A hotplug-capable Downstream Port ("dev->is_hotplug_bridge &&
pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT"), or
(2) A hotplug-capable Root Port with a slot that was either empty or
contained a multi-function device ("dev->is_hotplug_bridge &&
!list_is_singular(&dev->bus->devices)")
Part (1) is valid, but part (2) is not.
After a hot-add in the slot below a Root Port, we can reconfigure all
MPS values in the fabric below the Root Port because the new device is
the only thing below the Root Port and there are no active drivers.
Therefore, there's no reason to limit the MPS for Root Ports, no
matter what's in the slot.
Test info:
-+-[0000:40]-+-07.0-[0000:46]--+-00.0 Intel 82576 NIC
\-00.1 Intel 82576 NIC
0000:40:07.0 Root Port bridge to [bus 46] (MPS supported=256)
0000:46:00.0 Endpoint (MPS supported=512)
0000:46:00.1 Endpoint (MPS supported=512)
# echo 0 > /sys/bus/pci/slots/7/power
# echo 1 > /sys/bus/pci/slots/7/power
pcieport 0000:40:07.0: PCI-E Max Payload Size set to 256/ 256 (was 256)
pci 0000:46:00.0: PCI-E Max Payload Size set to 256/ 512 (was 128)
pci 0000:46:00.1: PCI-E Max Payload Size set to 256/ 512 (was 128)
Before this change, we set MPS to 128 for the Root Port and both NICs
because the slot contained a multi-function device and
dev->is_hotplug_bridge && !list_is_singular(&dev->bus->devices)
was true. After this change, we set it to 256.
[bhelgaas: changelog, comments, split out upstream bridge check]
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Cc: Jon Mason <jdmason@kudzu.us>
2013-08-22 07:24:47 +04:00
/*
* We don ' t have a way to change MPS settings on devices that have
* drivers attached . A hot - added device might support only the minimum
* MPS setting ( MPS = 128 ) . Therefore , if the fabric contains a bridge
* where devices may be hot - added , we limit the fabric MPS to 128 so
* hot - added devices will work correctly .
*
* However , if we hot - add a device to a slot directly below a Root
* Port , it ' s impossible for there to be other existing devices below
* the port . We don ' t limit the MPS in this case because we can
* reconfigure MPS on both the Root Port and the hot - added device ,
* and there are no other devices involved .
*
* Note that this PCIE_BUS_SAFE path assumes no peer - to - peer DMA .
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
*/
PCI: Don't restrict MPS for slots below Root Ports
When booting with "pci=pcie_bus_safe", we previously limited the
fabric MPS to 128 when we found:
(1) A hotplug-capable Downstream Port ("dev->is_hotplug_bridge &&
pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT"), or
(2) A hotplug-capable Root Port with a slot that was either empty or
contained a multi-function device ("dev->is_hotplug_bridge &&
!list_is_singular(&dev->bus->devices)")
Part (1) is valid, but part (2) is not.
After a hot-add in the slot below a Root Port, we can reconfigure all
MPS values in the fabric below the Root Port because the new device is
the only thing below the Root Port and there are no active drivers.
Therefore, there's no reason to limit the MPS for Root Ports, no
matter what's in the slot.
Test info:
-+-[0000:40]-+-07.0-[0000:46]--+-00.0 Intel 82576 NIC
\-00.1 Intel 82576 NIC
0000:40:07.0 Root Port bridge to [bus 46] (MPS supported=256)
0000:46:00.0 Endpoint (MPS supported=512)
0000:46:00.1 Endpoint (MPS supported=512)
# echo 0 > /sys/bus/pci/slots/7/power
# echo 1 > /sys/bus/pci/slots/7/power
pcieport 0000:40:07.0: PCI-E Max Payload Size set to 256/ 256 (was 256)
pci 0000:46:00.0: PCI-E Max Payload Size set to 256/ 512 (was 128)
pci 0000:46:00.1: PCI-E Max Payload Size set to 256/ 512 (was 128)
Before this change, we set MPS to 128 for the Root Port and both NICs
because the slot contained a multi-function device and
dev->is_hotplug_bridge && !list_is_singular(&dev->bus->devices)
was true. After this change, we set it to 256.
[bhelgaas: changelog, comments, split out upstream bridge check]
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Cc: Jon Mason <jdmason@kudzu.us>
2013-08-22 07:24:47 +04:00
if ( dev - > is_hotplug_bridge & &
pci_pcie_type ( dev ) ! = PCI_EXP_TYPE_ROOT_PORT )
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
* smpss = 0 ;
if ( * smpss > dev - > pcie_mpss )
* smpss = dev - > pcie_mpss ;
return 0 ;
}
static void pcie_write_mps ( struct pci_dev * dev , int mps )
{
2011-10-14 23:56:14 +04:00
int rc ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
if ( pcie_bus_config = = PCIE_BUS_PERFORMANCE ) {
2011-10-14 23:56:14 +04:00
mps = 128 < < dev - > pcie_mpss ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
2012-07-24 13:20:03 +04:00
if ( pci_pcie_type ( dev ) ! = PCI_EXP_TYPE_ROOT_PORT & &
dev - > bus - > self )
2011-10-14 23:56:14 +04:00
/* For "Performance", the assumption is made that
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
* downstream communication will never be larger than
* the MRRS . So , the MPS only needs to be configured
* for the upstream communication . This being the case ,
* walk from the top down and set the MPS of the child
* to that of the parent bus .
2011-10-14 23:56:14 +04:00
*
* Configure the device MPS with the smaller of the
* device MPSS or the bridge MPS ( which is assumed to be
* properly configured at this point to the largest
* allowable MPS based on its parent bus ) .
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
*/
2011-10-14 23:56:14 +04:00
mps = min ( mps , pcie_get_mps ( dev - > bus - > self ) ) ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
}
rc = pcie_set_mps ( dev , mps ) ;
if ( rc )
dev_err ( & dev - > dev , " Failed attempting to set the MPS \n " ) ;
}
2011-10-14 23:56:14 +04:00
static void pcie_write_mrrs ( struct pci_dev * dev )
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
{
2011-10-14 23:56:14 +04:00
int rc , mrrs ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
2011-09-09 01:41:18 +04:00
/* In the "safe" case, do not configure the MRRS. There appear to be
* issues with setting MRRS to 0 on a number of devices .
*/
if ( pcie_bus_config ! = PCIE_BUS_PERFORMANCE )
return ;
/* For Max performance, the MRRS must be set to the largest supported
* value . However , it cannot be configured larger than the MPS the
2011-10-14 23:56:14 +04:00
* device or the bus can support . This should already be properly
* configured by a prior call to pcie_write_mps .
2011-09-09 01:41:18 +04:00
*/
2011-10-14 23:56:14 +04:00
mrrs = pcie_get_mps ( dev ) ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
/* MRRS is a R/W register. Invalid values can be written, but a
2011-09-09 01:41:18 +04:00
* subsequent read will verify if the value is acceptable or not .
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
* If the MRRS value provided is not acceptable ( e . g . , too large ) ,
* shrink the value until it is acceptable to the HW .
2013-11-14 22:28:18 +04:00
*/
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
while ( mrrs ! = pcie_get_readrq ( dev ) & & mrrs > = 128 ) {
rc = pcie_set_readrq ( dev , mrrs ) ;
2011-10-14 23:56:14 +04:00
if ( ! rc )
break ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
2011-10-14 23:56:14 +04:00
dev_warn ( & dev - > dev , " Failed attempting to set the MRRS \n " ) ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
mrrs / = 2 ;
}
2011-10-14 23:56:14 +04:00
if ( mrrs < 128 )
2014-04-19 04:13:50 +04:00
dev_err ( & dev - > dev , " MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe \n " ) ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
}
2013-08-26 12:33:06 +04:00
static void pcie_bus_detect_mps ( struct pci_dev * dev )
{
struct pci_dev * bridge = dev - > bus - > self ;
int mps , p_mps ;
if ( ! bridge )
return ;
mps = pcie_get_mps ( dev ) ;
p_mps = pcie_get_mps ( bridge ) ;
if ( mps ! = p_mps )
dev_warn ( & dev - > dev , " Max Payload Size %d, but upstream %s set to %d; if necessary, use \" pci=pcie_bus_safe \" and report a bug \n " ,
mps , pci_name ( bridge ) , p_mps ) ;
}
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
static int pcie_bus_configure_set ( struct pci_dev * dev , void * data )
{
2011-10-14 23:56:16 +04:00
int mps , orig_mps ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
if ( ! pci_is_pcie ( dev ) )
return 0 ;
2013-08-26 12:33:06 +04:00
if ( pcie_bus_config = = PCIE_BUS_TUNE_OFF ) {
pcie_bus_detect_mps ( dev ) ;
return 0 ;
}
2011-10-14 23:56:16 +04:00
mps = 128 < < * ( u8 * ) data ;
orig_mps = pcie_get_mps ( dev ) ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
pcie_write_mps ( dev , mps ) ;
2011-10-14 23:56:14 +04:00
pcie_write_mrrs ( dev ) ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
2014-04-19 04:13:50 +04:00
dev_info ( & dev - > dev , " Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d \n " ,
pcie_get_mps ( dev ) , 128 < < dev - > pcie_mpss ,
2011-10-14 23:56:16 +04:00
orig_mps , pcie_get_readrq ( dev ) ) ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
return 0 ;
}
2011-10-14 23:56:16 +04:00
/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
* parents then children fashion . If this changes , then this code will not
* work as designed .
*/
2013-08-22 07:24:44 +04:00
void pcie_bus_configure_settings ( struct pci_bus * bus )
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
{
2014-04-29 22:51:55 +04:00
u8 smpss = 0 ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
2013-08-22 07:24:44 +04:00
if ( ! bus - > self )
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
return ;
if ( ! pci_is_pcie ( bus - > self ) )
2011-10-03 18:50:20 +04:00
return ;
/* FIXME - Peer to peer DMA is possible, though the endpoint would need
2013-08-26 12:33:05 +04:00
* to be aware of the MPS of the destination . To work around this ,
2011-10-03 18:50:20 +04:00
* simply force the MPS of the entire system to the smallest possible .
*/
if ( pcie_bus_config = = PCIE_BUS_PEER2PEER )
smpss = 0 ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
if ( pcie_bus_config = = PCIE_BUS_SAFE ) {
2013-08-22 07:24:44 +04:00
smpss = bus - > self - > pcie_mpss ;
2011-10-03 18:50:20 +04:00
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
pcie_find_smpss ( bus - > self , & smpss ) ;
pci_walk_bus ( bus , pcie_find_smpss , & smpss ) ;
}
pcie_bus_configure_set ( bus - > self , & smpss ) ;
pci_walk_bus ( bus , pcie_bus_configure_set , & smpss ) ;
}
2011-08-02 09:01:18 +04:00
EXPORT_SYMBOL_GPL ( pcie_bus_configure_settings ) ;
PCI: Set PCI-E Max Payload Size on fabric
On a given PCI-E fabric, each device, bridge, and root port can have a
different PCI-E maximum payload size. There is a sizable performance
boost for having the largest possible maximum payload size on each PCI-E
device. However, if improperly configured, fatal bus errors can occur.
Thus, it is important to ensure that PCI-E payloads sends by a device
are never larger than the MPS setting of all devices on the way to the
destination.
This can be achieved two ways:
- A conservative approach is to use the smallest common denominator of
the entire tree below a root complex for every device on that fabric.
This means for example that having a 128 bytes MPS USB controller on one
leg of a switch will dramatically reduce performances of a video card or
10GE adapter on another leg of that same switch.
It also means that any hierarchy supporting hotplug slots (including
expresscard or thunderbolt I suppose, dbl check that) will have to be
entirely clamped to 128 bytes since we cannot predict what will be
plugged into those slots, and we cannot change the MPS on a "live"
system.
- A more optimal way is possible, if it falls within a couple of
constraints:
* The top-level host bridge will never generate packets larger than the
smallest TLP (or if it can be controlled independently from its MPS at
least)
* The device will never generate packets larger than MPS (which can be
configured via MRRS)
* No support of direct PCI-E <-> PCI-E transfers between devices without
some additional code to specifically deal with that case
Then we can use an approach that basically ignores downstream requests
and focuses exclusively on upstream requests. In that case, all we need
to care about is that a device MPS is no larger than its parent MPS,
which allows us to keep all switches/bridges to the max MPS supported by
their parent and eventually the PHB.
In this case, your USB controller would no longer "starve" your 10GE
Ethernet and your hotplug slots won't affect your global MPS.
Additionally, the hotplugged devices themselves can be configured to a
larger MPS up to the value configured in the hotplug bridge.
To choose between the two available options, two PCI kernel boot args
have been added to the PCI calls. "pcie_bus_safe" will provide the
former behavior, while "pcie_bus_perf" will perform the latter behavior.
By default, the latter behavior is used.
NOTE: due to the location of the enablement, each arch will need to add
calls to this function. This patch only enables x86.
This patch includes a number of changes recommended by Benjamin
Herrenschmidt.
Tested-by: Jordan_Hargrave@dell.com
Signed-off-by: Jon Mason <mason@myri.com>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
2011-07-21 00:20:54 +04:00
2012-11-22 00:35:00 +04:00
unsigned int pci_scan_child_bus ( struct pci_bus * bus )
2005-04-17 02:20:36 +04:00
{
2012-05-18 05:51:11 +04:00
unsigned int devfn , pass , max = bus - > busn_res . start ;
2005-04-17 02:20:36 +04:00
struct pci_dev * dev ;
2009-11-04 20:32:52 +03:00
dev_dbg ( & bus - > dev , " scanning bus \n " ) ;
2005-04-17 02:20:36 +04:00
/* Go find them, Rover! */
for ( devfn = 0 ; devfn < 0x100 ; devfn + = 8 )
pci_scan_slot ( bus , devfn ) ;
2009-03-20 06:25:13 +03:00
/* Reserve buses for SR-IOV capability. */
max + = pci_iov_bus_range ( bus ) ;
2005-04-17 02:20:36 +04:00
/*
* After performing arch - dependent fixup of the bus , look behind
* all PCI - to - PCI bridges on this bus .
*/
2009-03-20 23:56:10 +03:00
if ( ! bus - > is_added ) {
2009-11-04 20:32:52 +03:00
dev_dbg ( & bus - > dev , " fixups for bus \n " ) ;
2009-03-20 23:56:10 +03:00
pcibios_fixup_bus ( bus ) ;
2013-04-12 09:44:16 +04:00
bus - > is_added = 1 ;
2009-03-20 23:56:10 +03:00
}
2014-04-19 04:13:49 +04:00
for ( pass = 0 ; pass < 2 ; pass + + )
2005-04-17 02:20:36 +04:00
list_for_each_entry ( dev , & bus - > devices , bus_list ) {
2014-05-04 08:23:38 +04:00
if ( pci_is_bridge ( dev ) )
2005-04-17 02:20:36 +04:00
max = pci_scan_bridge ( bus , dev , max , pass ) ;
}
/*
* We ' ve scanned the bus and so we know all about what ' s on
* the other side of any bridges that may be on this bus plus
* any devices .
*
* Return how far we ' ve got finding sub - buses .
*/
2009-11-04 20:32:52 +03:00
dev_dbg ( & bus - > dev , " bus scan returning with max=%02x \n " , max ) ;
2005-04-17 02:20:36 +04:00
return max ;
}
2014-04-26 00:32:25 +04:00
EXPORT_SYMBOL_GPL ( pci_scan_child_bus ) ;
2005-04-17 02:20:36 +04:00
ACPI / PCI: Set root bridge ACPI handle in advance
The ACPI handles of PCI root bridges need to be known to
acpi_bind_one(), so that it can create the appropriate
"firmware_node" and "physical_node" files for them, but currently
the way it gets to know those handles is not exactly straightforward
(to put it lightly).
This is how it works, roughly:
1. acpi_bus_scan() finds the handle of a PCI root bridge,
creates a struct acpi_device object for it and passes that
object to acpi_pci_root_add().
2. acpi_pci_root_add() creates a struct acpi_pci_root object,
populates its "device" field with its argument's address
(device->handle is the ACPI handle found in step 1).
3. The struct acpi_pci_root object created in step 2 is passed
to pci_acpi_scan_root() and used to get resources that are
passed to pci_create_root_bus().
4. pci_create_root_bus() creates a struct pci_host_bridge object
and passes its "dev" member to device_register().
5. platform_notify(), which for systems with ACPI is set to
acpi_platform_notify(), is called.
So far, so good. Now it starts to be "interesting".
6. acpi_find_bridge_device() is used to find the ACPI handle of
the given device (which is the PCI root bridge) and executes
acpi_pci_find_root_bridge(), among other things, for the
given device object.
7. acpi_pci_find_root_bridge() uses the name (sic!) of the given
device object to extract the segment and bus numbers of the PCI
root bridge and passes them to acpi_get_pci_rootbridge_handle().
8. acpi_get_pci_rootbridge_handle() browses the list of ACPI PCI
root bridges and finds the one that matches the given segment
and bus numbers. Its handle is then used to initialize the
ACPI handle of the PCI root bridge's device object by
acpi_bind_one(). However, this is *exactly* the ACPI handle we
started with in step 1.
Needless to say, this is quite embarassing, but it may be avoided
thanks to commit f3fd0c8 (ACPI: Allow ACPI handles of devices to be
initialized in advance), which makes it possible to initialize the
ACPI handle of a device before passing it to device_register().
Accordingly, add a new __weak routine, pcibios_root_bridge_prepare(),
defaulting to an empty implementation that can be replaced by the
interested architecutres (x86 and ia64 at the moment) with functions
that will set the root bridge's ACPI handle before its dev member is
passed to device_register(). Make both x86 and ia64 provide such
implementations of pcibios_root_bridge_prepare() and remove
acpi_pci_find_root_bridge() and acpi_get_pci_rootbridge_handle() that
aren't necessary any more.
Included is a fix for breakage on systems with non-ACPI PCI host
bridges from Bjorn Helgaas.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-01-10 01:33:37 +04:00
/**
* pcibios_root_bridge_prepare - Platform - specific host bridge setup .
* @ bridge : Host bridge to set up .
*
* Default empty implementation . Replace with an architecture - specific setup
* routine , if necessary .
*/
int __weak pcibios_root_bridge_prepare ( struct pci_host_bridge * bridge )
{
return 0 ;
}
2013-04-12 09:44:20 +04:00
void __weak pcibios_add_bus ( struct pci_bus * bus )
{
}
void __weak pcibios_remove_bus ( struct pci_bus * bus )
{
}
2011-10-29 02:25:45 +04:00
struct pci_bus * pci_create_root_bus ( struct device * parent , int bus ,
struct pci_ops * ops , void * sysdata , struct list_head * resources )
2005-04-17 02:20:36 +04:00
{
2012-02-24 07:19:00 +04:00
int error ;
2012-02-24 07:18:59 +04:00
struct pci_host_bridge * bridge ;
2009-11-04 20:32:52 +03:00
struct pci_bus * b , * b2 ;
2015-02-05 08:44:44 +03:00
struct resource_entry * window , * n ;
2011-10-29 02:25:40 +04:00
struct resource * res ;
2012-02-24 07:19:00 +04:00
resource_size_t offset ;
char bus_addr [ 64 ] ;
char * fmt ;
2005-04-17 02:20:36 +04:00
2014-09-29 18:29:26 +04:00
b = pci_alloc_bus ( NULL ) ;
2005-04-17 02:20:36 +04:00
if ( ! b )
2012-04-03 05:31:53 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
b - > sysdata = sysdata ;
b - > ops = ops ;
2013-01-22 01:20:52 +04:00
b - > number = b - > busn_res . start = bus ;
2014-09-29 18:29:26 +04:00
pci_bus_assign_domain_nr ( b , parent ) ;
2009-11-04 20:32:52 +03:00
b2 = pci_find_bus ( pci_domain_nr ( b ) , bus ) ;
if ( b2 ) {
2005-04-17 02:20:36 +04:00
/* If we already got to this bus through a different bridge, ignore it */
2009-11-04 20:32:52 +03:00
dev_dbg ( & b2 - > dev , " bus already known \n " ) ;
2005-04-17 02:20:36 +04:00
goto err_out ;
}
2006-06-02 08:35:43 +04:00
2012-04-03 05:31:53 +04:00
bridge = pci_alloc_host_bridge ( b ) ;
if ( ! bridge )
goto err_out ;
bridge - > dev . parent = parent ;
2013-06-08 02:16:51 +04:00
bridge - > dev . release = pci_release_host_bridge_dev ;
2012-04-03 05:31:53 +04:00
dev_set_name ( & bridge - > dev , " pci%04x:%02x " , pci_domain_nr ( b ) , bus ) ;
ACPI / PCI: Set root bridge ACPI handle in advance
The ACPI handles of PCI root bridges need to be known to
acpi_bind_one(), so that it can create the appropriate
"firmware_node" and "physical_node" files for them, but currently
the way it gets to know those handles is not exactly straightforward
(to put it lightly).
This is how it works, roughly:
1. acpi_bus_scan() finds the handle of a PCI root bridge,
creates a struct acpi_device object for it and passes that
object to acpi_pci_root_add().
2. acpi_pci_root_add() creates a struct acpi_pci_root object,
populates its "device" field with its argument's address
(device->handle is the ACPI handle found in step 1).
3. The struct acpi_pci_root object created in step 2 is passed
to pci_acpi_scan_root() and used to get resources that are
passed to pci_create_root_bus().
4. pci_create_root_bus() creates a struct pci_host_bridge object
and passes its "dev" member to device_register().
5. platform_notify(), which for systems with ACPI is set to
acpi_platform_notify(), is called.
So far, so good. Now it starts to be "interesting".
6. acpi_find_bridge_device() is used to find the ACPI handle of
the given device (which is the PCI root bridge) and executes
acpi_pci_find_root_bridge(), among other things, for the
given device object.
7. acpi_pci_find_root_bridge() uses the name (sic!) of the given
device object to extract the segment and bus numbers of the PCI
root bridge and passes them to acpi_get_pci_rootbridge_handle().
8. acpi_get_pci_rootbridge_handle() browses the list of ACPI PCI
root bridges and finds the one that matches the given segment
and bus numbers. Its handle is then used to initialize the
ACPI handle of the PCI root bridge's device object by
acpi_bind_one(). However, this is *exactly* the ACPI handle we
started with in step 1.
Needless to say, this is quite embarassing, but it may be avoided
thanks to commit f3fd0c8 (ACPI: Allow ACPI handles of devices to be
initialized in advance), which makes it possible to initialize the
ACPI handle of a device before passing it to device_register().
Accordingly, add a new __weak routine, pcibios_root_bridge_prepare(),
defaulting to an empty implementation that can be replaced by the
interested architecutres (x86 and ia64 at the moment) with functions
that will set the root bridge's ACPI handle before its dev member is
passed to device_register(). Make both x86 and ia64 provide such
implementations of pcibios_root_bridge_prepare() and remove
acpi_pci_find_root_bridge() and acpi_get_pci_rootbridge_handle() that
aren't necessary any more.
Included is a fix for breakage on systems with non-ACPI PCI host
bridges from Bjorn Helgaas.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-01-10 01:33:37 +04:00
error = pcibios_root_bridge_prepare ( bridge ) ;
2013-06-06 21:10:08 +04:00
if ( error ) {
kfree ( bridge ) ;
goto err_out ;
}
ACPI / PCI: Set root bridge ACPI handle in advance
The ACPI handles of PCI root bridges need to be known to
acpi_bind_one(), so that it can create the appropriate
"firmware_node" and "physical_node" files for them, but currently
the way it gets to know those handles is not exactly straightforward
(to put it lightly).
This is how it works, roughly:
1. acpi_bus_scan() finds the handle of a PCI root bridge,
creates a struct acpi_device object for it and passes that
object to acpi_pci_root_add().
2. acpi_pci_root_add() creates a struct acpi_pci_root object,
populates its "device" field with its argument's address
(device->handle is the ACPI handle found in step 1).
3. The struct acpi_pci_root object created in step 2 is passed
to pci_acpi_scan_root() and used to get resources that are
passed to pci_create_root_bus().
4. pci_create_root_bus() creates a struct pci_host_bridge object
and passes its "dev" member to device_register().
5. platform_notify(), which for systems with ACPI is set to
acpi_platform_notify(), is called.
So far, so good. Now it starts to be "interesting".
6. acpi_find_bridge_device() is used to find the ACPI handle of
the given device (which is the PCI root bridge) and executes
acpi_pci_find_root_bridge(), among other things, for the
given device object.
7. acpi_pci_find_root_bridge() uses the name (sic!) of the given
device object to extract the segment and bus numbers of the PCI
root bridge and passes them to acpi_get_pci_rootbridge_handle().
8. acpi_get_pci_rootbridge_handle() browses the list of ACPI PCI
root bridges and finds the one that matches the given segment
and bus numbers. Its handle is then used to initialize the
ACPI handle of the PCI root bridge's device object by
acpi_bind_one(). However, this is *exactly* the ACPI handle we
started with in step 1.
Needless to say, this is quite embarassing, but it may be avoided
thanks to commit f3fd0c8 (ACPI: Allow ACPI handles of devices to be
initialized in advance), which makes it possible to initialize the
ACPI handle of a device before passing it to device_register().
Accordingly, add a new __weak routine, pcibios_root_bridge_prepare(),
defaulting to an empty implementation that can be replaced by the
interested architecutres (x86 and ia64 at the moment) with functions
that will set the root bridge's ACPI handle before its dev member is
passed to device_register(). Make both x86 and ia64 provide such
implementations of pcibios_root_bridge_prepare() and remove
acpi_pci_find_root_bridge() and acpi_get_pci_rootbridge_handle() that
aren't necessary any more.
Included is a fix for breakage on systems with non-ACPI PCI host
bridges from Bjorn Helgaas.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-01-10 01:33:37 +04:00
2012-04-03 05:31:53 +04:00
error = device_register ( & bridge - > dev ) ;
2013-06-06 21:10:08 +04:00
if ( error ) {
put_device ( & bridge - > dev ) ;
goto err_out ;
}
2012-04-03 05:31:53 +04:00
b - > bridge = get_device ( & bridge - > dev ) ;
2010-02-08 21:16:33 +03:00
device_enable_async_suspend ( b - > bridge ) ;
2011-04-11 05:37:07 +04:00
pci_set_bus_of_node ( b ) ;
2005-04-17 02:20:36 +04:00
2008-02-19 14:20:41 +03:00
if ( ! parent )
set_dev_node ( b - > bridge , pcibus_to_node ( b ) ) ;
2007-05-23 06:47:54 +04:00
b - > dev . class = & pcibus_class ;
b - > dev . parent = b - > bridge ;
2008-10-30 04:17:49 +03:00
dev_set_name ( & b - > dev , " %04x:%02x " , pci_domain_nr ( b ) , bus ) ;
2007-05-23 06:47:54 +04:00
error = device_register ( & b - > dev ) ;
2005-04-17 02:20:36 +04:00
if ( error )
goto class_dev_reg_err ;
2013-04-12 09:44:20 +04:00
pcibios_add_bus ( b ) ;
2005-04-17 02:20:36 +04:00
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files ( b ) ;
2011-10-29 02:25:40 +04:00
if ( parent )
dev_info ( parent , " PCI host bridge to bus %s \n " , dev_name ( & b - > dev ) ) ;
else
printk ( KERN_INFO " PCI host bridge to bus %s \n " , dev_name ( & b - > dev ) ) ;
2012-02-24 07:19:00 +04:00
/* Add initial resources to the bus */
2015-02-05 08:44:44 +03:00
resource_list_for_each_entry_safe ( window , n , resources ) {
list_move_tail ( & window - > node , & bridge - > windows ) ;
2012-02-24 07:19:00 +04:00
res = window - > res ;
offset = window - > offset ;
2012-05-18 05:51:12 +04:00
if ( res - > flags & IORESOURCE_BUS )
pci_bus_insert_busn_res ( b , bus , res - > end ) ;
else
pci_bus_add_resource ( b , res , 0 ) ;
2012-02-24 07:19:00 +04:00
if ( offset ) {
if ( resource_type ( res ) = = IORESOURCE_IO )
fmt = " (bus address [%#06llx-%#06llx]) " ;
else
fmt = " (bus address [%#010llx-%#010llx]) " ;
snprintf ( bus_addr , sizeof ( bus_addr ) , fmt ,
( unsigned long long ) ( res - > start - offset ) ,
( unsigned long long ) ( res - > end - offset ) ) ;
} else
bus_addr [ 0 ] = ' \0 ' ;
dev_info ( & b - > dev , " root bus resource %pR%s \n " , res , bus_addr ) ;
2011-10-29 02:25:40 +04:00
}
2012-02-24 07:18:59 +04:00
down_write ( & pci_bus_sem ) ;
list_add_tail ( & b - > node , & pci_root_buses ) ;
up_write ( & pci_bus_sem ) ;
2005-04-17 02:20:36 +04:00
return b ;
class_dev_reg_err :
2012-04-03 05:31:53 +04:00
put_device ( & bridge - > dev ) ;
device_unregister ( & bridge - > dev ) ;
2005-04-17 02:20:36 +04:00
err_out :
kfree ( b ) ;
return NULL ;
}
2015-04-08 21:21:33 +03:00
EXPORT_SYMBOL_GPL ( pci_create_root_bus ) ;
2005-09-06 03:31:03 +04:00
2012-05-18 21:35:50 +04:00
int pci_bus_insert_busn_res ( struct pci_bus * b , int bus , int bus_max )
{
struct resource * res = & b - > busn_res ;
struct resource * parent_res , * conflict ;
res - > start = bus ;
res - > end = bus_max ;
res - > flags = IORESOURCE_BUS ;
if ( ! pci_is_root_bus ( b ) )
parent_res = & b - > parent - > busn_res ;
else {
parent_res = get_pci_domain_busn_res ( pci_domain_nr ( b ) ) ;
res - > flags | = IORESOURCE_PCI_FIXED ;
}
2014-01-24 00:59:24 +04:00
conflict = request_resource_conflict ( parent_res , res ) ;
2012-05-18 21:35:50 +04:00
if ( conflict )
dev_printk ( KERN_DEBUG , & b - > dev ,
" busn_res: can not insert %pR under %s%pR (conflicts with %s %pR) \n " ,
res , pci_is_root_bus ( b ) ? " domain " : " " ,
parent_res , conflict - > name , conflict ) ;
return conflict = = NULL ;
}
int pci_bus_update_busn_res_end ( struct pci_bus * b , int bus_max )
{
struct resource * res = & b - > busn_res ;
struct resource old_res = * res ;
resource_size_t size ;
int ret ;
if ( res - > start > bus_max )
return - EINVAL ;
size = bus_max - res - > start + 1 ;
ret = adjust_resource ( res , res - > start , size ) ;
dev_printk ( KERN_DEBUG , & b - > dev ,
" busn_res: %pR end %s updated to %02x \n " ,
& old_res , ret ? " can not be " : " is " , bus_max ) ;
if ( ! ret & & ! res - > parent )
pci_bus_insert_busn_res ( b , res - > start , res - > end ) ;
return ret ;
}
void pci_bus_release_busn_res ( struct pci_bus * b )
{
struct resource * res = & b - > busn_res ;
int ret ;
if ( ! res - > flags | | ! res - > parent )
return ;
ret = release_resource ( res ) ;
dev_printk ( KERN_DEBUG , & b - > dev ,
" busn_res: %pR %s released \n " ,
res , ret ? " can not be " : " is " ) ;
}
2012-11-22 00:35:00 +04:00
struct pci_bus * pci_scan_root_bus ( struct device * parent , int bus ,
2011-10-29 02:25:50 +04:00
struct pci_ops * ops , void * sysdata , struct list_head * resources )
{
2015-02-05 08:44:44 +03:00
struct resource_entry * window ;
2012-05-18 05:51:12 +04:00
bool found = false ;
2011-10-29 02:25:50 +04:00
struct pci_bus * b ;
2012-05-18 05:51:12 +04:00
int max ;
2015-02-05 08:44:44 +03:00
resource_list_for_each_entry ( window , resources )
2012-05-18 05:51:12 +04:00
if ( window - > res - > flags & IORESOURCE_BUS ) {
found = true ;
break ;
}
2011-10-29 02:25:50 +04:00
b = pci_create_root_bus ( parent , bus , ops , sysdata , resources ) ;
if ( ! b )
return NULL ;
2012-05-18 05:51:12 +04:00
if ( ! found ) {
dev_info ( & b - > dev ,
" No busn resource found for root bus, will use [bus %02x-ff] \n " ,
bus ) ;
pci_bus_insert_busn_res ( b , bus , 255 ) ;
}
max = pci_scan_child_bus ( b ) ;
if ( ! found )
pci_bus_update_busn_res_end ( b , max ) ;
2011-10-29 02:25:50 +04:00
return b ;
}
EXPORT_SYMBOL ( pci_scan_root_bus ) ;
2012-11-22 00:35:00 +04:00
struct pci_bus * pci_scan_bus ( int bus , struct pci_ops * ops ,
2011-10-29 02:25:55 +04:00
void * sysdata )
{
LIST_HEAD ( resources ) ;
struct pci_bus * b ;
pci_add_resource ( & resources , & ioport_resource ) ;
pci_add_resource ( & resources , & iomem_resource ) ;
2012-05-18 05:51:12 +04:00
pci_add_resource ( & resources , & busn_resource ) ;
2011-10-29 02:25:55 +04:00
b = pci_create_root_bus ( NULL , bus , ops , sysdata , & resources ) ;
if ( b ) {
2012-05-18 05:51:12 +04:00
pci_scan_child_bus ( b ) ;
2011-10-29 02:25:55 +04:00
} else {
pci_free_resource_list ( & resources ) ;
}
return b ;
}
EXPORT_SYMBOL ( pci_scan_bus ) ;
2012-01-21 14:08:22 +04:00
/**
* pci_rescan_bus_bridge_resize - scan a PCI bus for devices .
* @ bridge : PCI bridge for the bus to scan
*
* Scan a PCI bus and child buses for new devices , add them ,
* and enable them , resizing bridge mmio / io resource if necessary
* and possible . The caller must ensure the child devices are already
* removed for resizing to occur .
*
* Returns the max number of subordinate bus discovered .
*/
2014-04-15 02:11:40 +04:00
unsigned int pci_rescan_bus_bridge_resize ( struct pci_dev * bridge )
2012-01-21 14:08:22 +04:00
{
unsigned int max ;
struct pci_bus * bus = bridge - > subordinate ;
max = pci_scan_child_bus ( bus ) ;
pci_assign_unassigned_bridge_resources ( bridge ) ;
pci_bus_add_devices ( bus ) ;
return max ;
}
2012-10-31 00:31:21 +04:00
/**
* pci_rescan_bus - scan a PCI bus for devices .
* @ bus : PCI bus to scan
*
* Scan a PCI bus and child buses for new devices , adds them ,
* and enables them .
*
* Returns the max number of subordinate bus discovered .
*/
2014-04-15 02:11:40 +04:00
unsigned int pci_rescan_bus ( struct pci_bus * bus )
2012-10-31 00:31:21 +04:00
{
unsigned int max ;
max = pci_scan_child_bus ( bus ) ;
pci_assign_unassigned_bus_resources ( bus ) ;
pci_bus_add_devices ( bus ) ;
return max ;
}
EXPORT_SYMBOL_GPL ( pci_rescan_bus ) ;
2014-01-10 18:22:18 +04:00
/*
* pci_rescan_bus ( ) , pci_rescan_bus_bridge_resize ( ) and PCI device removal
* routines should always be executed under this mutex .
*/
static DEFINE_MUTEX ( pci_rescan_remove_lock ) ;
void pci_lock_rescan_remove ( void )
{
mutex_lock ( & pci_rescan_remove_lock ) ;
}
EXPORT_SYMBOL_GPL ( pci_lock_rescan_remove ) ;
void pci_unlock_rescan_remove ( void )
{
mutex_unlock ( & pci_rescan_remove_lock ) ;
}
EXPORT_SYMBOL_GPL ( pci_unlock_rescan_remove ) ;
2014-04-19 04:13:49 +04:00
static int __init pci_sort_bf_cmp ( const struct device * d_a ,
const struct device * d_b )
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 00:23:23 +04:00
{
2008-08-26 20:00:57 +04:00
const struct pci_dev * a = to_pci_dev ( d_a ) ;
const struct pci_dev * b = to_pci_dev ( d_b ) ;
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 00:23:23 +04:00
if ( pci_domain_nr ( a - > bus ) < pci_domain_nr ( b - > bus ) ) return - 1 ;
else if ( pci_domain_nr ( a - > bus ) > pci_domain_nr ( b - > bus ) ) return 1 ;
if ( a - > bus - > number < b - > bus - > number ) return - 1 ;
else if ( a - > bus - > number > b - > bus - > number ) return 1 ;
if ( a - > devfn < b - > devfn ) return - 1 ;
else if ( a - > devfn > b - > devfn ) return 1 ;
return 0 ;
}
2008-02-15 01:56:56 +03:00
void __init pci_sort_breadthfirst ( void )
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 00:23:23 +04:00
{
2008-08-26 20:00:57 +04:00
bus_sort_breadthfirst ( & pci_bus_type , & pci_sort_bf_cmp ) ;
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 00:23:23 +04:00
}