2010-05-24 08:44:54 +04:00
/*
2017-06-10 02:00:06 +03:00
* Copyright ( c ) 2010 - 2017 Intel Corporation . All rights reserved .
2010-05-24 08:44:54 +04:00
* Copyright ( c ) 2008 , 2009 QLogic Corporation . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/pci.h>
# include <linux/io.h>
# include <linux/delay.h>
# include <linux/vmalloc.h>
2011-05-27 23:35:46 +04:00
# include <linux/module.h>
2010-05-24 08:44:54 +04:00
# include "qib.h"
/*
* This file contains PCIe utility routines that are common to the
* various QLogic InfiniPath adapters
*/
/*
* Code to adjust PCIe capabilities .
* To minimize the change footprint , we call it
* from qib_pcie_params , which every chip - specific
* file calls , even though this violates some
* expectations of harmlessness .
*/
2013-09-25 00:24:49 +04:00
static void qib_tune_pcie_caps ( struct qib_devdata * ) ;
static void qib_tune_pcie_coalesce ( struct qib_devdata * ) ;
2010-05-24 08:44:54 +04:00
/*
* Do all the common PCIe setup and initialization .
* devdata is not yet allocated , and is not allocated until after this
* routine returns success . Therefore qib_dev_err ( ) can ' t be used for error
* printing .
*/
int qib_pcie_init ( struct pci_dev * pdev , const struct pci_device_id * ent )
{
int ret ;
ret = pci_enable_device ( pdev ) ;
if ( ret ) {
/*
* This can happen ( in theory ) iff :
* We did a chip reset , and then failed to reprogram the
* BAR , or the chip reset due to an internal error . We then
* unloaded the driver and reloaded it .
*
* Both reset cases set the BAR back to initial state . For
* the latter case , the AER sticky error bit at offset 0x718
* should be set , but the Linux kernel doesn ' t yet know
* about that , it appears . If the original BAR was retained
* in the kernel data structures , this may be OK .
*/
qib_early_err ( & pdev - > dev , " pci enable failed: error %d \n " ,
- ret ) ;
goto done ;
}
ret = pci_request_regions ( pdev , QIB_DRV_NAME ) ;
if ( ret ) {
qib_devinfo ( pdev , " pci_request_regions fails: err %d \n " , - ret ) ;
goto bail ;
}
2020-11-21 12:51:27 +03:00
ret = dma_set_mask_and_coherent ( & pdev - > dev , DMA_BIT_MASK ( 64 ) ) ;
2010-05-24 08:44:54 +04:00
if ( ret ) {
/*
* If the 64 bit setup fails , try 32 bit . Some systems
* do not setup 64 bit maps on systems with 2 GB or less
* memory installed .
*/
2020-11-21 12:51:27 +03:00
ret = dma_set_mask_and_coherent ( & pdev - > dev , DMA_BIT_MASK ( 32 ) ) ;
2010-05-24 08:44:54 +04:00
if ( ret ) {
qib_devinfo ( pdev , " Unable to set DMA mask: %d \n " , ret ) ;
goto bail ;
}
2010-10-26 08:19:06 +04:00
}
2010-05-24 08:44:54 +04:00
pci_set_master ( pdev ) ;
goto done ;
bail :
pci_disable_device ( pdev ) ;
pci_release_regions ( pdev ) ;
done :
return ret ;
}
/*
* Do remaining PCIe setup , once dd is allocated , and save away
* fields required to re - initialize after a chip reset , or for
* various other purposes
*/
int qib_pcie_ddinit ( struct qib_devdata * dd , struct pci_dev * pdev ,
const struct pci_device_id * ent )
{
unsigned long len ;
resource_size_t addr ;
dd - > pcidev = pdev ;
pci_set_drvdata ( pdev , dd ) ;
addr = pci_resource_start ( pdev , 0 ) ;
len = pci_resource_len ( pdev , 0 ) ;
2020-01-06 11:43:50 +03:00
dd - > kregbase = ioremap ( addr , len ) ;
2010-05-24 08:44:54 +04:00
if ( ! dd - > kregbase )
return - ENOMEM ;
dd - > kregend = ( u64 __iomem * ) ( ( void __iomem * ) dd - > kregbase + len ) ;
dd - > physaddr = addr ; /* used for io_remap, etc. */
/*
* Save BARs to rewrite after device reset . Save all 64 bits of
* BAR , just in case .
*/
dd - > pcibar0 = addr ;
dd - > pcibar1 = addr > > 32 ;
dd - > deviceid = ent - > device ; /* save for later use */
dd - > vendorid = ent - > vendor ;
return 0 ;
}
/*
* Do PCIe cleanup , after chip - specific cleanup , etc . Just prior
* to releasing the dd memory .
* void because none of the core pcie cleanup returns are void
*/
void qib_pcie_ddcleanup ( struct qib_devdata * dd )
{
u64 __iomem * base = ( void __iomem * ) dd - > kregbase ;
dd - > kregbase = NULL ;
iounmap ( base ) ;
if ( dd - > piobase )
iounmap ( dd - > piobase ) ;
if ( dd - > userbase )
iounmap ( dd - > userbase ) ;
2010-06-18 03:13:44 +04:00
if ( dd - > piovl15base )
iounmap ( dd - > piovl15base ) ;
2010-05-24 08:44:54 +04:00
pci_disable_device ( dd - > pcidev ) ;
pci_release_regions ( dd - > pcidev ) ;
pci_set_drvdata ( dd - > pcidev , NULL ) ;
}
2021-01-21 12:45:00 +03:00
/*
2010-05-24 08:44:54 +04:00
* We save the msi lo and hi values , so we can restore them after
* chip reset ( the kernel PCI infrastructure doesn ' t yet handle that
* correctly .
*/
2017-09-26 17:00:24 +03:00
static void qib_cache_msi_info ( struct qib_devdata * dd , int pos )
2010-05-24 08:44:54 +04:00
{
struct pci_dev * pdev = dd - > pcidev ;
u16 control ;
2017-06-10 02:00:06 +03:00
pci_read_config_dword ( pdev , pos + PCI_MSI_ADDRESS_LO , & dd - > msi_lo ) ;
pci_read_config_dword ( pdev , pos + PCI_MSI_ADDRESS_HI , & dd - > msi_hi ) ;
2010-05-24 08:44:54 +04:00
pci_read_config_word ( pdev , pos + PCI_MSI_FLAGS , & control ) ;
2017-06-10 02:00:06 +03:00
2010-05-24 08:44:54 +04:00
/* now save the data (vector) info */
2017-06-10 02:00:06 +03:00
pci_read_config_word ( pdev ,
pos + ( ( control & PCI_MSI_FLAGS_64BIT ) ? 12 : 8 ) ,
2010-05-24 08:44:54 +04:00
& dd - > msi_data ) ;
}
2017-06-10 02:00:06 +03:00
int qib_pcie_params ( struct qib_devdata * dd , u32 minw , u32 * nent )
2010-05-24 08:44:54 +04:00
{
u16 linkstat , speed ;
2017-06-10 02:00:06 +03:00
int nvec ;
int maxvec ;
2017-09-26 17:00:24 +03:00
unsigned int flags = PCI_IRQ_MSIX | PCI_IRQ_MSI ;
2010-05-24 08:44:54 +04:00
2012-07-24 13:20:28 +04:00
if ( ! pci_is_pcie ( dd - > pcidev ) ) {
2010-05-24 08:44:54 +04:00
qib_dev_err ( dd , " Can't find PCI Express capability! \n " ) ;
/* set up something... */
dd - > lbus_width = 1 ;
dd - > lbus_speed = 2500 ; /* Gen1, 2.5GHz */
2017-09-26 17:00:24 +03:00
nvec = - 1 ;
2010-05-24 08:44:54 +04:00
goto bail ;
}
2017-09-26 17:00:24 +03:00
if ( dd - > flags & QIB_HAS_INTX )
flags | = PCI_IRQ_LEGACY ;
2017-06-10 02:00:06 +03:00
maxvec = ( nent & & * nent ) ? * nent : 1 ;
2017-09-26 17:00:24 +03:00
nvec = pci_alloc_irq_vectors ( dd - > pcidev , 1 , maxvec , flags ) ;
if ( nvec < 0 )
2017-06-10 02:00:06 +03:00
goto bail ;
/*
2017-09-26 17:00:24 +03:00
* If nent exists , make sure to record how many vectors were allocated .
* If msix_enabled is false , return 0 so the fallback code works
* correctly .
2017-06-10 02:00:06 +03:00
*/
2017-09-26 17:00:24 +03:00
if ( nent )
* nent = ! dd - > pcidev - > msix_enabled ? 0 : nvec ;
2017-06-10 02:00:06 +03:00
2017-09-26 17:00:24 +03:00
if ( dd - > pcidev - > msi_enabled )
qib_cache_msi_info ( dd , dd - > pcidev - > msi_cap ) ;
2010-05-24 08:44:54 +04:00
2012-07-24 13:20:28 +04:00
pcie_capability_read_word ( dd - > pcidev , PCI_EXP_LNKSTA , & linkstat ) ;
2010-05-24 08:44:54 +04:00
/*
* speed is bits 0 - 3 , linkwidth is bits 4 - 8
* no defines for them in headers
*/
speed = linkstat & 0xf ;
linkstat > > = 4 ;
linkstat & = 0x1f ;
dd - > lbus_width = linkstat ;
switch ( speed ) {
case 1 :
dd - > lbus_speed = 2500 ; /* Gen1, 2.5GHz */
break ;
case 2 :
dd - > lbus_speed = 5000 ; /* Gen1, 5GHz */
break ;
default : /* not defined, assume gen1 */
dd - > lbus_speed = 2500 ;
break ;
}
/*
* Check against expected pcie width and complain if " wrong "
* on first initialization , not afterwards ( i . e . , reset ) .
*/
if ( minw & & linkstat < minw )
qib_dev_err ( dd ,
" PCIe width %u (x%u HCA), performance reduced \n " ,
linkstat , minw ) ;
qib_tune_pcie_caps ( dd ) ;
qib_tune_pcie_coalesce ( dd ) ;
bail :
/* fill in string, even on errors */
snprintf ( dd - > lbus_info , sizeof ( dd - > lbus_info ) ,
" PCIe,%uMHz,x%u \n " , dd - > lbus_speed , dd - > lbus_width ) ;
2017-09-26 17:00:24 +03:00
return nvec < 0 ? nvec : 0 ;
}
/**
* qib_free_irq - Cleanup INTx and MSI interrupts
* @ dd : valid pointer to qib dev data
*
* Since cleanup for INTx and MSI interrupts is trivial , have a common
* routine .
*
*/
void qib_free_irq ( struct qib_devdata * dd )
{
pci_free_irq ( dd - > pcidev , 0 , dd ) ;
pci_free_irq_vectors ( dd - > pcidev ) ;
2010-05-24 08:44:54 +04:00
}
/*
* Setup pcie interrupt stuff again after a reset . I ' d like to just call
* pci_enable_msi ( ) again for msi , but when I do that ,
* the MSI enable bit doesn ' t get set in the command word , and
2022-09-08 16:20:36 +03:00
* we switch to a different interrupt vector , which is confusing ,
2010-05-24 08:44:54 +04:00
* so I instead just do it all inline . Perhaps somehow can tie this
* into the PCIe hotplug support at some point
*/
int qib_reinit_intr ( struct qib_devdata * dd )
{
int pos ;
u16 control ;
int ret = 0 ;
/* If we aren't using MSI, don't restore it */
if ( ! dd - > msi_lo )
goto bail ;
2013-08-08 17:11:56 +04:00
pos = dd - > pcidev - > msi_cap ;
2010-05-24 08:44:54 +04:00
if ( ! pos ) {
2012-07-19 17:04:25 +04:00
qib_dev_err ( dd ,
" Can't find MSI capability, can't restore MSI settings \n " ) ;
2010-05-24 08:44:54 +04:00
ret = 0 ;
/* nothing special for MSIx, just MSI */
goto bail ;
}
pci_write_config_dword ( dd - > pcidev , pos + PCI_MSI_ADDRESS_LO ,
dd - > msi_lo ) ;
pci_write_config_dword ( dd - > pcidev , pos + PCI_MSI_ADDRESS_HI ,
dd - > msi_hi ) ;
pci_read_config_word ( dd - > pcidev , pos + PCI_MSI_FLAGS , & control ) ;
if ( ! ( control & PCI_MSI_FLAGS_ENABLE ) ) {
control | = PCI_MSI_FLAGS_ENABLE ;
pci_write_config_word ( dd - > pcidev , pos + PCI_MSI_FLAGS ,
control ) ;
}
/* now rewrite the data (vector) info */
pci_write_config_word ( dd - > pcidev , pos +
( ( control & PCI_MSI_FLAGS_64BIT ) ? 12 : 8 ) ,
dd - > msi_data ) ;
ret = 1 ;
bail :
2017-09-26 17:00:24 +03:00
qib_free_irq ( dd ) ;
if ( ! ret & & ( dd - > flags & QIB_HAS_INTX ) )
2010-05-24 08:44:54 +04:00
ret = 1 ;
/* and now set the pci master bit again */
pci_set_master ( dd - > pcidev ) ;
return ret ;
}
/*
* These two routines are helper routines for the device reset code
* to move all the pcie code out of the chip - specific driver code .
*/
void qib_pcie_getcmd ( struct qib_devdata * dd , u16 * cmd , u8 * iline , u8 * cline )
{
pci_read_config_word ( dd - > pcidev , PCI_COMMAND , cmd ) ;
pci_read_config_byte ( dd - > pcidev , PCI_INTERRUPT_LINE , iline ) ;
pci_read_config_byte ( dd - > pcidev , PCI_CACHE_LINE_SIZE , cline ) ;
}
void qib_pcie_reenable ( struct qib_devdata * dd , u16 cmd , u8 iline , u8 cline )
{
int r ;
2015-01-16 19:23:31 +03:00
2010-05-24 08:44:54 +04:00
r = pci_write_config_dword ( dd - > pcidev , PCI_BASE_ADDRESS_0 ,
dd - > pcibar0 ) ;
if ( r )
qib_dev_err ( dd , " rewrite of BAR0 failed: %d \n " , r ) ;
r = pci_write_config_dword ( dd - > pcidev , PCI_BASE_ADDRESS_1 ,
dd - > pcibar1 ) ;
if ( r )
qib_dev_err ( dd , " rewrite of BAR1 failed: %d \n " , r ) ;
/* now re-enable memory access, and restore cosmetic settings */
pci_write_config_word ( dd - > pcidev , PCI_COMMAND , cmd ) ;
pci_write_config_byte ( dd - > pcidev , PCI_INTERRUPT_LINE , iline ) ;
pci_write_config_byte ( dd - > pcidev , PCI_CACHE_LINE_SIZE , cline ) ;
r = pci_enable_device ( dd - > pcidev ) ;
if ( r )
2012-07-19 17:04:25 +04:00
qib_dev_err ( dd ,
" pci_enable_device failed after reset: %d \n " , r ) ;
2010-05-24 08:44:54 +04:00
}
static int qib_pcie_coalesce ;
module_param_named ( pcie_coalesce , qib_pcie_coalesce , int , S_IRUGO ) ;
2018-11-26 19:23:20 +03:00
MODULE_PARM_DESC ( pcie_coalesce , " tune PCIe coalescing on some Intel chipsets " ) ;
2010-05-24 08:44:54 +04:00
/*
* Enable PCIe completion and data coalescing , on Intel 5 x00 and 7300
* chipsets . This is known to be unsafe for some revisions of some
* of these chipsets , with some BIOS settings , and enabling it on those
* systems may result in the system crashing , and / or data corruption .
*/
2013-09-25 00:24:49 +04:00
static void qib_tune_pcie_coalesce ( struct qib_devdata * dd )
2010-05-24 08:44:54 +04:00
{
struct pci_dev * parent ;
u16 devid ;
u32 mask , bits , val ;
if ( ! qib_pcie_coalesce )
2013-09-25 00:24:49 +04:00
return ;
2010-05-24 08:44:54 +04:00
/* Find out supported and configured values for parent (root) */
parent = dd - > pcidev - > bus - > self ;
if ( parent - > bus - > parent ) {
qib_devinfo ( dd - > pcidev , " Parent not root \n " ) ;
2013-09-25 00:24:49 +04:00
return ;
2010-05-24 08:44:54 +04:00
}
2012-07-24 13:20:28 +04:00
if ( ! pci_is_pcie ( parent ) )
2013-09-25 00:24:49 +04:00
return ;
2010-05-24 08:44:54 +04:00
if ( parent - > vendor ! = 0x8086 )
2013-09-25 00:24:49 +04:00
return ;
2010-05-24 08:44:54 +04:00
/*
* - bit 12 : Max_rdcmp_Imt_EN : need to set to 1
* - bit 11 : COALESCE_FORCE : need to set to 0
* - bit 10 : COALESCE_EN : need to set to 1
* ( but limitations on some on some chipsets )
*
* On the Intel 5000 , 5100 , and 7300 chipsets , there is
* also : - bit 25 : 24 : COALESCE_MODE , need to set to 0
*/
devid = parent - > device ;
if ( devid > = 0x25e2 & & devid < = 0x25fa ) {
/* 5000 P/V/X/Z */
2011-05-10 09:07:31 +04:00
if ( parent - > revision < = 0xb2 )
2010-05-24 08:44:54 +04:00
bits = 1U < < 10 ;
else
bits = 7U < < 10 ;
mask = ( 3U < < 24 ) | ( 7U < < 10 ) ;
} else if ( devid > = 0x65e2 & & devid < = 0x65fa ) {
/* 5100 */
bits = 1U < < 10 ;
mask = ( 3U < < 24 ) | ( 7U < < 10 ) ;
} else if ( devid > = 0x4021 & & devid < = 0x402e ) {
/* 5400 */
bits = 7U < < 10 ;
mask = 7U < < 10 ;
} else if ( devid > = 0x3604 & & devid < = 0x360a ) {
/* 7300 */
bits = 7U < < 10 ;
mask = ( 3U < < 24 ) | ( 7U < < 10 ) ;
} else {
/* not one of the chipsets that we know about */
2013-09-25 00:24:49 +04:00
return ;
2010-05-24 08:44:54 +04:00
}
pci_read_config_dword ( parent , 0x48 , & val ) ;
val & = ~ mask ;
val | = bits ;
2017-10-11 20:49:22 +03:00
pci_write_config_dword ( parent , 0x48 , val ) ;
2010-05-24 08:44:54 +04:00
}
/*
* BIOS may not set PCIe bus - utilization parameters for best performance .
* Check and optionally adjust them to maximize our throughput .
*/
2012-01-13 06:29:59 +04:00
static int qib_pcie_caps ;
2010-05-24 08:44:54 +04:00
module_param_named ( pcie_caps , qib_pcie_caps , int , S_IRUGO ) ;
2011-12-23 20:12:10 +04:00
MODULE_PARM_DESC ( pcie_caps , " Max PCIe tuning: Payload (0..3), ReadReq (4..7) " ) ;
2010-05-24 08:44:54 +04:00
2013-09-25 00:24:49 +04:00
static void qib_tune_pcie_caps ( struct qib_devdata * dd )
2010-05-24 08:44:54 +04:00
{
struct pci_dev * parent ;
2013-09-09 17:13:06 +04:00
u16 rc_mpss , rc_mps , ep_mpss , ep_mps ;
u16 rc_mrrs , ep_mrrs , max_mrrs ;
2010-05-24 08:44:54 +04:00
/* Find out supported and configured values for parent (root) */
parent = dd - > pcidev - > bus - > self ;
2013-09-09 17:13:05 +04:00
if ( ! pci_is_root_bus ( parent - > bus ) ) {
2010-05-24 08:44:54 +04:00
qib_devinfo ( dd - > pcidev , " Parent not root \n " ) ;
2013-09-25 00:24:49 +04:00
return ;
2010-05-24 08:44:54 +04:00
}
2012-07-24 13:20:28 +04:00
if ( ! pci_is_pcie ( parent ) | | ! pci_is_pcie ( dd - > pcidev ) )
2013-09-25 00:24:49 +04:00
return ;
2013-09-09 17:13:06 +04:00
rc_mpss = parent - > pcie_mpss ;
rc_mps = ffs ( pcie_get_mps ( parent ) ) - 8 ;
2010-05-24 08:44:54 +04:00
/* Find out supported and configured values for endpoint (us) */
2013-09-09 17:13:06 +04:00
ep_mpss = dd - > pcidev - > pcie_mpss ;
ep_mps = ffs ( pcie_get_mps ( dd - > pcidev ) ) - 8 ;
2012-07-24 13:20:28 +04:00
2010-05-24 08:44:54 +04:00
/* Find max payload supported by root, endpoint */
2013-09-09 17:13:06 +04:00
if ( rc_mpss > ep_mpss )
rc_mpss = ep_mpss ;
2010-05-24 08:44:54 +04:00
/* If Supported greater than limit in module param, limit it */
2013-09-09 17:13:06 +04:00
if ( rc_mpss > ( qib_pcie_caps & 7 ) )
rc_mpss = qib_pcie_caps & 7 ;
2010-05-24 08:44:54 +04:00
/* If less than (allowed, supported), bump root payload */
2013-09-09 17:13:06 +04:00
if ( rc_mpss > rc_mps ) {
rc_mps = rc_mpss ;
pcie_set_mps ( parent , 128 < < rc_mps ) ;
2010-05-24 08:44:54 +04:00
}
/* If less than (allowed, supported), bump endpoint payload */
2013-09-09 17:13:06 +04:00
if ( rc_mpss > ep_mps ) {
ep_mps = rc_mpss ;
pcie_set_mps ( dd - > pcidev , 128 < < ep_mps ) ;
2010-05-24 08:44:54 +04:00
}
/*
* Now the Read Request size .
* No field for max supported , but PCIe spec limits it to 4096 ,
* which is code ' 5 ' ( log2 ( 4096 ) - 7 )
*/
2013-09-09 17:13:06 +04:00
max_mrrs = 5 ;
if ( max_mrrs > ( ( qib_pcie_caps > > 4 ) & 7 ) )
max_mrrs = ( qib_pcie_caps > > 4 ) & 7 ;
max_mrrs = 128 < < max_mrrs ;
rc_mrrs = pcie_get_readrq ( parent ) ;
ep_mrrs = pcie_get_readrq ( dd - > pcidev ) ;
if ( max_mrrs > rc_mrrs ) {
rc_mrrs = max_mrrs ;
pcie_set_readrq ( parent , rc_mrrs ) ;
2010-05-24 08:44:54 +04:00
}
2013-09-09 17:13:06 +04:00
if ( max_mrrs > ep_mrrs ) {
ep_mrrs = max_mrrs ;
pcie_set_readrq ( dd - > pcidev , ep_mrrs ) ;
2010-05-24 08:44:54 +04:00
}
}
/* End of PCIe capability tuning */
/*
* From here through qib_pci_err_handler definition is invoked via
* PCI error infrastructure , registered via pci
*/
static pci_ers_result_t
qib_pci_error_detected ( struct pci_dev * pdev , pci_channel_state_t state )
{
struct qib_devdata * dd = pci_get_drvdata ( pdev ) ;
pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED ;
switch ( state ) {
case pci_channel_io_normal :
qib_devinfo ( pdev , " State Normal, ignoring \n " ) ;
break ;
case pci_channel_io_frozen :
qib_devinfo ( pdev , " State Frozen, requesting reset \n " ) ;
pci_disable_device ( pdev ) ;
ret = PCI_ERS_RESULT_NEED_RESET ;
break ;
case pci_channel_io_perm_failure :
qib_devinfo ( pdev , " State Permanent Failure, disabling \n " ) ;
if ( dd ) {
/* no more register accesses! */
dd - > flags & = ~ QIB_PRESENT ;
qib_disable_after_error ( dd ) ;
}
/* else early, or other problem */
ret = PCI_ERS_RESULT_DISCONNECT ;
break ;
default : /* shouldn't happen */
qib_devinfo ( pdev , " QIB PCI errors detected (state %d) \n " ,
state ) ;
break ;
}
return ret ;
}
static pci_ers_result_t
qib_pci_mmio_enabled ( struct pci_dev * pdev )
{
u64 words = 0U ;
struct qib_devdata * dd = pci_get_drvdata ( pdev ) ;
pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED ;
if ( dd & & dd - > pport ) {
words = dd - > f_portcntr ( dd - > pport , QIBPORTCNTR_WORDRCV ) ;
if ( words = = ~ 0ULL )
ret = PCI_ERS_RESULT_NEED_RESET ;
}
2012-07-19 17:04:25 +04:00
qib_devinfo ( pdev ,
" QIB mmio_enabled function called, read wordscntr %Lx, returning %d \n " ,
words , ret ) ;
2010-05-24 08:44:54 +04:00
return ret ;
}
static pci_ers_result_t
qib_pci_slot_reset ( struct pci_dev * pdev )
{
2012-07-19 23:34:19 +04:00
qib_devinfo ( pdev , " QIB slot_reset function called, ignored \n " ) ;
2010-05-24 08:44:54 +04:00
return PCI_ERS_RESULT_CAN_RECOVER ;
}
static void
qib_pci_resume ( struct pci_dev * pdev )
{
struct qib_devdata * dd = pci_get_drvdata ( pdev ) ;
2015-01-16 19:23:31 +03:00
2010-05-24 08:44:54 +04:00
qib_devinfo ( pdev , " QIB resume function called \n " ) ;
/*
* Running jobs will fail , since it ' s asynchronous
* unlike sysfs - requested reset . Better than
* doing nothing .
*/
qib_init ( dd , 1 ) ; /* same as re-init after reset */
}
2012-09-07 20:33:17 +04:00
const struct pci_error_handlers qib_pci_err_handler = {
2010-05-24 08:44:54 +04:00
. error_detected = qib_pci_error_detected ,
. mmio_enabled = qib_pci_mmio_enabled ,
. slot_reset = qib_pci_slot_reset ,
. resume = qib_pci_resume ,
} ;