2012-11-30 06:14:21 +04:00
/*
* This is the Fusion MPT base driver providing common API layer interface
* for access to MPT ( Message Passing Technology ) firmware .
*
* This code is based on drivers / scsi / mpt3sas / mpt3sas_base . h
2014-09-12 14:05:29 +04:00
* Copyright ( C ) 2012 - 2014 LSI Corporation
2015-01-12 09:09:02 +03:00
* Copyright ( C ) 2013 - 2014 Avago Technologies
* ( mailto : MPT - FusionLinux . pdl @ avagotech . com )
2012-11-30 06:14:21 +04:00
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version 2
* of the License , or ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* NO WARRANTY
* THE PROGRAM IS PROVIDED ON AN " AS IS " BASIS , WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND , EITHER EXPRESS OR IMPLIED INCLUDING , WITHOUT
* LIMITATION , ANY WARRANTIES OR CONDITIONS OF TITLE , NON - INFRINGEMENT ,
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE . Each Recipient is
* solely responsible for determining the appropriateness of using and
* distributing the Program and assumes all risks associated with its
* exercise of rights under this Agreement , including but not limited to
* the risks and costs of program errors , damage to or loss of data ,
* programs or equipment , and unavailability or interruption of operations .
* DISCLAIMER OF LIABILITY
* NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
* DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING WITHOUT LIMITATION LOST PROFITS ) , HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR
* TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE
* USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
* HEREUNDER , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 ,
* USA .
*/
# ifndef MPT3SAS_BASE_H_INCLUDED
# define MPT3SAS_BASE_H_INCLUDED
# include "mpi/mpi2_type.h"
# include "mpi/mpi2.h"
# include "mpi/mpi2_ioc.h"
# include "mpi/mpi2_cnfg.h"
# include "mpi/mpi2_init.h"
# include "mpi/mpi2_raid.h"
# include "mpi/mpi2_tool.h"
# include "mpi/mpi2_sas.h"
2017-10-31 15:32:28 +03:00
# include "mpi/mpi2_pci.h"
2018-10-25 17:03:40 +03:00
# include "mpi/mpi2_image.h"
2012-11-30 06:14:21 +04:00
# include <scsi/scsi.h>
# include <scsi/scsi_cmnd.h>
# include <scsi/scsi_device.h>
# include <scsi/scsi_host.h>
# include <scsi/scsi_tcq.h>
# include <scsi/scsi_transport_sas.h>
# include <scsi/scsi_dbg.h>
# include <scsi/scsi_eh.h>
2015-11-11 15:00:18 +03:00
# include <linux/pci.h>
# include <linux/poll.h>
scsi: mpt3sas: Irq poll to avoid CPU hard lockups
Issue Description:
We have seen cpu lock up issue from fields if system has greater (more than
96) logical cpu count. SAS3.0 controller (Invader series) supports at max
96 msix vector and SAS3.5 product (Ventura) supports at max 128 msix
vectors.
This may be a generic issue (if PCI device supports completion on multiple
reply queues). Let me explain it w.r.t to mpt3sas supported h/w just to
simplify the problem and possible changes to handle such issues. IT HBA
(mpt3sas) supports multiple reply queues in completion path. Driver creates
MSI-x vectors for controller as "min of (FW supported Reply queue, Logical
CPUs)". If submitter is not interrupted via completion on same CPU, there
is a loop in the IO path. This behavior can cause hard/soft CPU lockups, IO
timeout, system sluggish etc.
Example - one CPU (e.g. CPU A) is busy submitting the IOs and another CPU
(e.g. CPU B) is busy with processing the corresponding IO's reply
descriptors from reply descriptor queue upon receiving the interrupts from
HBA. If the CPU A is continuously pumping the IOs then always CPU B (which
is executing the ISR) will see the valid reply descriptors in the reply
descriptor queue and it will be continuously processing those reply
descriptor in a loop without quitting the ISR handler.
Mpt3sas driver will exit ISR handler if it finds unused reply descriptor in
the reply descriptor queue. Since CPU A will be continuously sending the
IOs, CPU B may always see a valid reply descriptor (posted by HBA Firmware
after processing the IO) in the reply descriptor queue. In worst case,
driver will not quit from this loop in the ISR handler. Eventually, CPU
lockup will be detected by watchdog.
Above mentioned behavior is not common if "rq_affinity" set to 2 or
affinity_hint is honored by irqbalance as "exact". If rq_affinity is set
to 2, submitter will be always interrupted via completion on same CPU. If
irqbalance is using "exact" policy, interrupt will be delivered to
submitter CPU.
If CPU counts to MSI-X vectors (reply descriptor Queues) count ratio is not
1:1, we still have exposure of issue explained above and for that we don't
have any solution.
Exposure of soft/hard lockup if CPU count is more than MSI-x supported by
device.
If CPUs count to MSI-x vectors count ratio is not 1:1, (Other way, if CPU
counts to MSI-x vector count ratio is something like X:1, where X > 1) then
'exact' irqbalance policy OR rq_affinity = 2 won't help to avoid CPU
hard/soft lockups. There won't be any one to one mapping between CPU to
MSI-x vector instead one MSI-x interrupt (or reply descriptor queue) is
shared with group/set of CPUs and there is a possibility of having a loop
in the IO path within that CPU group and may observe lockups.
For example: Consider a system having two NUMA nodes and each node having
four logical CPUs and also consider that number of MSI-x vectors enabled on
the HBA is two, then CPUs count to MSI-x vector count ratio as 4:1. e.g.
MSIx vector 0 is affinity to CPU 0, CPU 1, CPU 2 & CPU 3 of NUMA node 0 and
MSI-x vector 1 is affinity to CPU 4, CPU 5, CPU 6 & CPU 7 of NUMA node 1.
numactl --hardware
available: 2 nodes (0-1)
node 0 cpus: 0 1 2 3 --> MSI-x 0
node 0 size: 65536 MB
node 0 free: 63176 MB
node 1 cpus: 4 5 6 7 -->MSI-x 1
node 1 size: 65536 MB
node 1 free: 63176 MB
Assume that user started an application which uses all the CPUs of NUMA
node 0 for issuing the IOs. Only one CPU from affinity list (it can be any
cpu since this behavior depends upon irqbalance) CPU0 will receive the
interrupts from MSIx vector 0 for all the IOs. Eventually, CPU 0 IO
submission percentage will be decreasing and ISR processing percentage will
be increasing as it is more busy with processing the interrupts. Gradually
IO submission percentage on CPU 0 will be zero and it's ISR processing
percentage will be 100 percentage as IO loop has already formed within the
NUMA node 0, i.e. CPU 1, CPU 2 & CPU 3 will be continuously busy with
submitting the heavy IOs and only CPU 0 is busy in the ISR path as it
always find the valid reply descriptor in the reply descriptor
queue. Eventually, we will observe the hard lockup here.
Chances of occurring of hard/soft lockups are directly proportional to
value of X. If value of X is high, then chances of observing CPU lockups is
high.
Solution: Use IRQ poll interface defined in " irq_poll.c". mpt3sas driver
will execute ISR routine in Softirq context and it will always quit the
loop based on budget provided in IRQ poll interface.
In these scenarios (i.e. where CPUs count to MSI-X vectors count ratio is
X:1 (where X > 1)), IRQ poll interface will avoid CPU hard lockups due to
voluntary exit from the reply queue processing based on budget. Note -
Only one MSI-x vector is busy doing processing.
Irqstat output:
IRQs / 1 second(s)
IRQ# TOTAL NODE0 NODE1 NODE2 NODE3 NAME
44 122871 122871 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix0
45 0 0 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix1
We use this approach only if cpu count is more than FW supported MSI-x
vector
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-15 10:40:27 +03:00
# include <linux/irq_poll.h>
2012-11-30 06:14:21 +04:00
# include "mpt3sas_debug.h"
# include "mpt3sas_trigger_diag.h"
2020-11-26 12:43:05 +03:00
# include "mpt3sas_trigger_pages.h"
2012-11-30 06:14:21 +04:00
/* driver versioning info */
# define MPT3SAS_DRIVER_NAME "mpt3sas"
2015-01-12 09:09:02 +03:00
# define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
2012-11-30 06:14:21 +04:00
# define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
2021-03-05 13:29:04 +03:00
# define MPT3SAS_DRIVER_VERSION "37.101.00.00"
2021-02-04 06:37:24 +03:00
# define MPT3SAS_MAJOR_VERSION 37
2021-03-05 13:29:04 +03:00
# define MPT3SAS_MINOR_VERSION 101
2013-06-29 02:25:45 +04:00
# define MPT3SAS_BUILD_VERSION 0
2012-11-30 06:14:21 +04:00
# define MPT3SAS_RELEASE_VERSION 00
2015-11-11 15:00:34 +03:00
# define MPT2SAS_DRIVER_NAME "mpt2sas"
# define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
# define MPT2SAS_DRIVER_VERSION "20.102.00.00"
# define MPT2SAS_MAJOR_VERSION 20
# define MPT2SAS_MINOR_VERSION 102
# define MPT2SAS_BUILD_VERSION 0
# define MPT2SAS_RELEASE_VERSION 00
2019-12-26 14:13:27 +03:00
/* CoreDump: Default timeout */
# define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /*15 seconds*/
2019-12-26 14:13:28 +03:00
# define MPT3SAS_COREDUMP_LOOP_DONE (0xFF)
2020-11-26 12:43:04 +03:00
# define MPT3SAS_TIMESYNC_TIMEOUT_SECONDS (10) /* 10 seconds */
# define MPT3SAS_TIMESYNC_UPDATE_INTERVAL (900) /* 15 minutes */
# define MPT3SAS_TIMESYNC_UNIT_MASK (0x80) /* bit 7 */
# define MPT3SAS_TIMESYNC_MASK (0x7F) /* 0 - 6 bits */
# define SECONDS_PER_MIN (60)
# define SECONDS_PER_HOUR (3600)
# define MPT3SAS_COREDUMP_LOOP_DONE (0xFF)
# define MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP (0x81)
2019-12-26 14:13:27 +03:00
2012-11-30 06:14:21 +04:00
/*
* Set MPT3SAS_SG_DEPTH value based on user input .
*/
2016-04-05 00:48:10 +03:00
# define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE
2015-11-11 15:00:18 +03:00
# define MPT_MIN_PHYS_SEGMENTS 16
2017-10-10 16:11:16 +03:00
# define MPT_KDUMP_MIN_PHYS_SEGMENTS 32
2015-11-11 15:00:18 +03:00
2018-02-07 13:51:48 +03:00
# define MCPU_MAX_CHAINS_PER_IO 3
2012-11-30 06:14:21 +04:00
# ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
# define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE
# else
2015-11-11 15:00:18 +03:00
# define MPT3SAS_SG_DEPTH MPT_MAX_PHYS_SEGMENTS
2012-11-30 06:14:21 +04:00
# endif
2015-11-11 15:00:18 +03:00
# ifdef CONFIG_SCSI_MPT2SAS_MAX_SGE
# define MPT2SAS_SG_DEPTH CONFIG_SCSI_MPT2SAS_MAX_SGE
# else
# define MPT2SAS_SG_DEPTH MPT_MAX_PHYS_SEGMENTS
# endif
2012-11-30 06:14:21 +04:00
/*
* Generic Defines
*/
# define MPT3SAS_SATA_QUEUE_DEPTH 32
# define MPT3SAS_SAS_QUEUE_DEPTH 254
# define MPT3SAS_RAID_QUEUE_DEPTH 128
2017-10-10 16:11:16 +03:00
# define MPT3SAS_KDUMP_SCSI_IO_DEPTH 200
2012-11-30 06:14:21 +04:00
2016-05-06 11:59:30 +03:00
# define MPT3SAS_RAID_MAX_SECTORS 8192
2017-10-31 15:32:28 +03:00
# define MPT3SAS_HOST_PAGE_SIZE_4K 12
2017-10-31 15:32:33 +03:00
# define MPT3SAS_NVME_QUEUE_DEPTH 128
2012-11-30 06:14:21 +04:00
# define MPT_NAME_LENGTH 32 /* generic length of strings */
# define MPT_STRING_LENGTH 64
2018-02-07 13:51:47 +03:00
# define MPI_FRAME_START_OFFSET 256
# define REPLY_FREE_POOL_SIZE 512 /*(32 maxcredix *4)*(4 times)*/
2012-11-30 06:14:21 +04:00
# define MPT_MAX_CALLBACKS 32
# define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
2016-01-28 09:37:02 +03:00
/* reserved for issuing internally framed scsi io cmds */
# define INTERNAL_SCSIIO_CMDS_COUNT 3
2012-11-30 06:14:21 +04:00
# define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/
# define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF
2016-01-28 09:37:04 +03:00
# define MAX_CHAIN_ELEMT_SZ 16
# define DEFAULT_NUM_FWCHAIN_ELEMTS 8
2019-12-26 14:13:25 +03:00
# define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6
2018-04-24 12:28:39 +03:00
# define FW_IMG_HDR_READ_TIMEOUT 15
2018-10-31 16:23:32 +03:00
# define IOC_OPERATIONAL_WAIT_COUNT 10
2017-10-31 15:32:28 +03:00
/*
* NVMe defines
*/
# define NVME_PRP_SIZE 8 /* PRP size */
# define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
2018-04-24 12:28:41 +03:00
# define NVME_TASK_ABORT_MIN_TIMEOUT 6
# define NVME_TASK_ABORT_MAX_TIMEOUT 60
# define NVME_TASK_MNGT_CUSTOM_MASK (0x0010)
2017-10-31 15:32:28 +03:00
# define NVME_PRP_PAGE_SIZE 4096 /* Page size */
2018-06-16 00:41:57 +03:00
struct mpt3sas_nvme_cmd {
u8 rsvd [ 24 ] ;
__le64 prp1 ;
__le64 prp2 ;
} ;
2018-04-24 12:28:41 +03:00
2012-11-30 06:14:21 +04:00
/*
* logging format
*/
2018-09-17 18:01:08 +03:00
# define ioc_err(ioc, fmt, ...) \
pr_err ( " %s: " fmt , ( ioc ) - > name , # # __VA_ARGS__ )
# define ioc_notice(ioc, fmt, ...) \
pr_notice ( " %s: " fmt , ( ioc ) - > name , # # __VA_ARGS__ )
# define ioc_warn(ioc, fmt, ...) \
pr_warn ( " %s: " fmt , ( ioc ) - > name , # # __VA_ARGS__ )
# define ioc_info(ioc, fmt, ...) \
pr_info ( " %s: " fmt , ( ioc ) - > name , # # __VA_ARGS__ )
2015-11-11 15:00:28 +03:00
/*
* WarpDrive Specific Log codes
*/
# define MPT2_WARPDRIVE_LOGENTRY (0x8002)
# define MPT2_WARPDRIVE_LC_SSDT (0x41)
# define MPT2_WARPDRIVE_LC_SSDLW (0x43)
# define MPT2_WARPDRIVE_LC_SSDLF (0x44)
# define MPT2_WARPDRIVE_LC_BRMF (0x4D)
2012-11-30 06:14:21 +04:00
/*
* per target private data
*/
# define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
# define MPT_TARGET_FLAGS_VOLUME 0x02
# define MPT_TARGET_FLAGS_DELETED 0x04
# define MPT_TARGET_FASTPATH_IO 0x08
2017-10-31 15:32:27 +03:00
# define MPT_TARGET_FLAGS_PCIE_DEVICE 0x10
2012-11-30 06:14:21 +04:00
2015-11-11 15:00:26 +03:00
# define SAS2_PCI_DEVICE_B0_REVISION (0x01)
# define SAS3_PCI_DEVICE_C0_REVISION (0x02)
2019-01-29 15:14:42 +03:00
/* Atlas PCIe Switch Management Port */
# define MPI26_ATLAS_PCIe_SWITCH_DEVID (0x00B2)
2014-09-12 14:05:30 +04:00
/*
* Intel HBA branding
*/
2015-11-11 15:00:32 +03:00
# define MPT2SAS_INTEL_RMS25JB080_BRANDING \
" Intel(R) Integrated RAID Module RMS25JB080 "
# define MPT2SAS_INTEL_RMS25JB040_BRANDING \
" Intel(R) Integrated RAID Module RMS25JB040 "
# define MPT2SAS_INTEL_RMS25KB080_BRANDING \
" Intel(R) Integrated RAID Module RMS25KB080 "
# define MPT2SAS_INTEL_RMS25KB040_BRANDING \
" Intel(R) Integrated RAID Module RMS25KB040 "
# define MPT2SAS_INTEL_RMS25LB040_BRANDING \
" Intel(R) Integrated RAID Module RMS25LB040 "
# define MPT2SAS_INTEL_RMS25LB080_BRANDING \
" Intel(R) Integrated RAID Module RMS25LB080 "
# define MPT2SAS_INTEL_RMS2LL080_BRANDING \
" Intel Integrated RAID Module RMS2LL080 "
# define MPT2SAS_INTEL_RMS2LL040_BRANDING \
" Intel Integrated RAID Module RMS2LL040 "
# define MPT2SAS_INTEL_RS25GB008_BRANDING \
" Intel(R) RAID Controller RS25GB008 "
# define MPT2SAS_INTEL_SSD910_BRANDING \
" Intel(R) SSD 910 Series "
2014-09-12 14:05:30 +04:00
# define MPT3SAS_INTEL_RMS3JC080_BRANDING \
" Intel(R) Integrated RAID Module RMS3JC080 "
# define MPT3SAS_INTEL_RS3GC008_BRANDING \
" Intel(R) RAID Controller RS3GC008 "
# define MPT3SAS_INTEL_RS3FC044_BRANDING \
" Intel(R) RAID Controller RS3FC044 "
# define MPT3SAS_INTEL_RS3UC080_BRANDING \
" Intel(R) RAID Controller RS3UC080 "
2012-11-30 06:14:21 +04:00
2014-09-12 14:05:30 +04:00
/*
* Intel HBA SSDIDs
*/
2015-11-11 15:00:32 +03:00
# define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516
# define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
# define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
# define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
# define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A
# define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B
# define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
# define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
# define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
# define MPT2SAS_INTEL_SSD910_SSDID 0x3700
# define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521
# define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522
# define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523
# define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524
2012-11-30 06:14:21 +04:00
2015-06-30 09:54:56 +03:00
/*
* Dell HBA branding
*/
2015-11-11 15:00:32 +03:00
# define MPT2SAS_DELL_BRANDING_SIZE 32
# define MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING "Dell 6Gbps SAS HBA"
# define MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING "Dell PERC H200 Adapter"
# define MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING "Dell PERC H200 Integrated"
# define MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING "Dell PERC H200 Modular"
# define MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING "Dell PERC H200 Embedded"
# define MPT2SAS_DELL_PERC_H200_BRANDING "Dell PERC H200"
# define MPT2SAS_DELL_6GBPS_SAS_BRANDING "Dell 6Gbps SAS"
2015-06-30 09:54:56 +03:00
# define MPT3SAS_DELL_12G_HBA_BRANDING \
" Dell 12Gbps HBA "
/*
* Dell HBA SSDIDs
*/
2015-11-11 15:00:32 +03:00
# define MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID 0x1F1C
# define MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID 0x1F1D
# define MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID 0x1F1E
# define MPT2SAS_DELL_PERC_H200_MODULAR_SSDID 0x1F1F
# define MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID 0x1F20
# define MPT2SAS_DELL_PERC_H200_SSDID 0x1F21
# define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22
# define MPT3SAS_DELL_12G_HBA_SSDID 0x1F46
2015-06-30 09:54:56 +03:00
2015-06-30 09:54:57 +03:00
/*
* Cisco HBA branding
*/
2015-06-30 09:55:02 +03:00
# define MPT3SAS_CISCO_12G_8E_HBA_BRANDING \
2015-11-11 15:00:32 +03:00
" Cisco 9300-8E 12G SAS HBA "
2015-06-30 09:55:02 +03:00
# define MPT3SAS_CISCO_12G_8I_HBA_BRANDING \
2015-11-11 15:00:32 +03:00
" Cisco 9300-8i 12G SAS HBA "
2015-06-30 09:55:02 +03:00
# define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING \
2015-11-11 15:00:32 +03:00
" Cisco 12G Modular SAS Pass through Controller "
2015-06-30 09:55:02 +03:00
# define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING \
2015-11-11 15:00:32 +03:00
" UCS C3X60 12G SAS Pass through Controller "
2015-06-30 09:54:57 +03:00
/*
* Cisco HBA SSSDIDs
*/
2015-06-30 09:55:02 +03:00
# define MPT3SAS_CISCO_12G_8E_HBA_SSDID 0x14C
# define MPT3SAS_CISCO_12G_8I_HBA_SSDID 0x154
# define MPT3SAS_CISCO_12G_AVILA_HBA_SSDID 0x155
# define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID 0x156
2015-06-30 09:54:57 +03:00
2012-11-30 06:14:21 +04:00
/*
* status bits for ioc - > diag_buffer_status
*/
# define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
# define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
# define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
2019-09-13 16:04:44 +03:00
# define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08)
2019-09-13 16:04:45 +03:00
# define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10)
2012-11-30 06:14:21 +04:00
2015-11-11 15:00:32 +03:00
/*
* HP HBA branding
*/
# define MPT2SAS_HP_3PAR_SSVID 0x1590
# define MPT2SAS_HP_2_4_INTERNAL_BRANDING \
" HP H220 Host Bus Adapter "
# define MPT2SAS_HP_2_4_EXTERNAL_BRANDING \
" HP H221 Host Bus Adapter "
# define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING \
" HP H222 Host Bus Adapter "
# define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING \
" HP H220i Host Bus Adapter "
# define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING \
" HP H210i Host Bus Adapter "
/*
* HO HBA SSDIDs
*/
# define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041
# define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042
# define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043
# define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044
# define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046
2015-06-30 09:54:47 +03:00
/*
* Combined Reply Queue constants ,
* There are twelve Supplemental Reply Post Host Index Registers
* and each register is at offset 0x10 bytes from the previous one .
*/
2018-05-31 13:34:51 +03:00
# define MAX_COMBINED_MSIX_VECTORS(gen35) ((gen35 == 1) ? 16 : 8)
2016-10-26 11:04:38 +03:00
# define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12
# define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16
# define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
2012-11-30 06:14:21 +04:00
/* OEM Identifiers */
# define MFG10_OEM_ID_INVALID (0x00000000)
# define MFG10_OEM_ID_DELL (0x00000001)
# define MFG10_OEM_ID_FSC (0x00000002)
# define MFG10_OEM_ID_SUN (0x00000003)
# define MFG10_OEM_ID_IBM (0x00000004)
/* GENERIC Flags 0*/
# define MFG10_GF0_OCE_DISABLED (0x00000001)
# define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002)
# define MFG10_GF0_R10_DISPLAY (0x00000004)
# define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
# define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
2015-06-30 09:55:00 +03:00
# define VIRTUAL_IO_FAILED_RETRY (0x32010081)
2019-05-31 15:14:36 +03:00
/* High IOPs definitions */
2019-05-31 15:14:38 +03:00
# define MPT3SAS_DEVICE_HIGH_IOPS_DEPTH 8
2019-05-31 15:14:36 +03:00
# define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8
2019-05-31 15:14:38 +03:00
# define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16
2019-05-31 15:14:36 +03:00
# define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128
scsi: mpt3sas: Handle RDPQ DMA allocation in same 4G region
For INVADER_SERIES, each set of 8 reply queues (0 - 7, 8 - 15,..), and for
VENTURA_SERIES, each set of 16 reply queues (0 - 15, 16 - 31,..) need to be
within the same 4 GB boundary. Driver uses limitation of VENTURA_SERIES to
manage INVADER_SERIES as well. The driver is allocating the DMA able
memory for RDPQs accordingly.
1) At driver load, set DMA mask to 64 and allocate memory for RDPQs
2) Check if allocated resources for RDPQ are in the same 4GB range
3) If #2 is true, continue with 64 bit DMA and go to #6
4) If #2 is false, then free all the resources from #1
5) Set DMA mask to 32 and allocate RDPQs
6) Proceed with driver loading and other allocations
Link: https://lore.kernel.org/r/1587626596-1044-5-git-send-email-suganath-prabu.subramani@broadcom.com
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-04-23 10:23:15 +03:00
# define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
2019-05-31 15:14:36 +03:00
2012-11-30 06:14:21 +04:00
/* OEM Specific Flags will come from OEM specific header files */
struct Mpi2ManufacturingPage10_t {
MPI2_CONFIG_PAGE_HEADER Header ; /* 00h */
U8 OEMIdentifier ; /* 04h */
U8 Reserved1 ; /* 05h */
U16 Reserved2 ; /* 08h */
U32 Reserved3 ; /* 0Ch */
U32 GenericFlags0 ; /* 10h */
U32 GenericFlags1 ; /* 14h */
U32 Reserved4 ; /* 18h */
U32 OEMSpecificFlags0 ; /* 1Ch */
U32 OEMSpecificFlags1 ; /* 20h */
U32 Reserved5 [ 18 ] ; /* 24h - 60h*/
} ;
/* Miscellaneous options */
struct Mpi2ManufacturingPage11_t {
MPI2_CONFIG_PAGE_HEADER Header ; /* 00h */
__le32 Reserved1 ; /* 04h */
u8 Reserved2 ; /* 08h */
u8 EEDPTagMode ; /* 09h */
u8 Reserved3 ; /* 0Ah */
u8 Reserved4 ; /* 0Bh */
2018-04-24 12:28:41 +03:00
__le32 Reserved5 [ 8 ] ; /* 0Ch-2Ch */
u16 AddlFlags2 ; /* 2Ch */
u8 AddlFlags3 ; /* 2Eh */
u8 Reserved6 ; /* 2Fh */
__le32 Reserved7 [ 7 ] ; /* 30h - 4Bh */
u8 NVMeAbortTO ; /* 4Ch */
scsi: mpt3sas: Register trace buffer based on NVDATA settings
Currently if user wishes to enable the host trace buffer during driver load
time, then user has to load the driver with module parameter
'diag_buffer_enable' set to one.
Alternatively now the user can enable host trace buffer by enabling the
following fields in manufacturing page11 in NVDATA (nvdata xml is used
while building HBA firmware image):
* HostTraceBufferMaxSizeKB - Maximum trace buffer size in KB that host can
allocate,
* HostTraceBufferMinSizeKB - Minimum trace buffer size in KB atleast host
should allocate,
* HostTraceBufferDecrementSizeKB - size by which host can reduce from
buffer size and retry the buffer allocation
when buffer allocation failed with previous
calculated buffer size.
The driver will register the trace buffer automatically without any module
parameter during boot time when above fields are enabled in manufacturing
page11 in HBA firmware.
Driver follows the following algorithm for enabling the host trace buffer
during driver load time:
* If user has loaded the driver with module parameter 'diag_buffer_enable'
set to one, then driver allocates 2MB buffer and registers this buffer
with HBA firmware for capturing the firmware trace logs.
* Else driver reads manufacture page11 data and checks whether
HostTraceBufferMaxSizeKB filed is zero or not?
- If HostTraceBufferMaxSizeKB is non-zero then driver tries to allocate
HostTraceBufferMaxSizeKB size of memory. If the buffer allocation is
successful, then it will register this buffer with HBA firmware, else
in a loop the driver will try again by reducing the current buffer size
with HostTraceBufferDecrementSizeKB size until memory allocation is
successful or buffer size falls below HostTraceBufferMinSizeKB. If the
memory allocation is successful, then the buffer will be registered
with the firmware. Else, if the buffer size falls below the
HostTraceBufferMinSizeKB, then driver won't register trace buffer with
HBA firmware.
- If HostTraceBufferMaxSizeKB is zero, then driver won't register trace
buffer with HBA firmware.
Link: https://lore.kernel.org/r/1568379890-18347-2-git-send-email-sreekanth.reddy@broadcom.com
Signed-off-by: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-09-13 16:04:38 +03:00
u8 NumPerDevEvents ; /* 4Dh */
u8 HostTraceBufferDecrementSizeKB ; /* 4Eh */
u8 HostTraceBufferFlags ; /* 4Fh */
u16 HostTraceBufferMaxSizeKB ; /* 50h */
u16 HostTraceBufferMinSizeKB ; /* 52h */
2019-12-26 14:13:27 +03:00
u8 CoreDumpTOSec ; /* 54h */
2020-11-26 12:43:04 +03:00
u8 TimeSyncInterval ; /* 55h */
2019-12-26 14:13:27 +03:00
u16 Reserved9 ; /* 56h */
__le32 Reserved10 ; /* 58h */
2012-11-30 06:14:21 +04:00
} ;
/**
* struct MPT3SAS_TARGET - starget private hostdata
* @ starget : starget object
* @ sas_address : target sas address
2015-11-11 15:00:28 +03:00
* @ raid_device : raid_device pointer to access volume data
2012-11-30 06:14:21 +04:00
* @ handle : device handle
* @ num_luns : number luns
* @ flags : MPT_TARGET_FLAGS_XXX flags
* @ deleted : target flaged for deletion
* @ tm_busy : target is busy with TM request .
2020-10-27 16:08:34 +03:00
* @ port : hba port entry containing target ' s port number info
2017-10-31 15:32:27 +03:00
* @ sas_dev : The sas_device associated with this target
* @ pcie_dev : The pcie device associated with this target
2012-11-30 06:14:21 +04:00
*/
struct MPT3SAS_TARGET {
struct scsi_target * starget ;
u64 sas_address ;
2015-11-11 15:00:28 +03:00
struct _raid_device * raid_device ;
2012-11-30 06:14:21 +04:00
u16 handle ;
int num_luns ;
u32 flags ;
u8 deleted ;
u8 tm_busy ;
2020-10-27 16:08:34 +03:00
struct hba_port * port ;
2017-10-31 15:32:27 +03:00
struct _sas_device * sas_dev ;
struct _pcie_device * pcie_dev ;
2012-11-30 06:14:21 +04:00
} ;
/*
* per device private data
*/
# define MPT_DEVICE_FLAGS_INIT 0x01
2015-11-11 15:00:28 +03:00
# define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003)
# define MFG_PAGE10_HIDE_ALL_DISKS (0x00)
# define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01)
# define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02)
2012-11-30 06:14:21 +04:00
/**
* struct MPT3SAS_DEVICE - sdev private hostdata
* @ sas_target : starget private hostdata
* @ lun : lun number
* @ flags : MPT_DEVICE_XXX flags
* @ configured_lun : lun is configured
* @ block : device is in SDEV_BLOCK state
* @ tlr_snoop_check : flag used in determining whether to disable TLR
* @ eedp_enable : eedp support enable bit
* @ eedp_type : 0 ( type_1 ) , 1 ( type_2 ) , 2 ( type_3 )
* @ eedp_block_length : block size
2017-01-01 20:39:24 +03:00
* @ ata_command_pending : SATL passthrough outstanding for device
2012-11-30 06:14:21 +04:00
*/
struct MPT3SAS_DEVICE {
struct MPT3SAS_TARGET * sas_target ;
unsigned int lun ;
u32 flags ;
u8 configured_lun ;
u8 block ;
u8 tlr_snoop_check ;
2016-01-28 09:37:01 +03:00
u8 ignore_delay_remove ;
2016-12-13 03:31:40 +03:00
/* Iopriority Command Handling */
u8 ncq_prio_enable ;
2017-01-01 20:39:24 +03:00
/*
* Bug workaround for SATL handling : the mpt2 / 3 sas firmware
* doesn ' t return BUSY or TASK_SET_FULL for subsequent
* commands while a SATL pass through is in operation as the
* spec requires , it simply does nothing with them until the
* pass through completes , causing them possibly to timeout if
* the passthrough is a long executing command ( like format or
* secure erase ) . This variable allows us to do the right
* thing while a SATL command is pending .
*/
unsigned long ata_command_pending ;
2016-12-13 03:31:40 +03:00
2012-11-30 06:14:21 +04:00
} ;
# define MPT3_CMD_NOT_USED 0x8000 /* free */
# define MPT3_CMD_COMPLETE 0x0001 /* completed */
# define MPT3_CMD_PENDING 0x0002 /* pending */
# define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */
# define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */
/**
* struct _internal_cmd - internal commands struct
* @ mutex : mutex
* @ done : completion
* @ reply : reply message pointer
* @ sense : sense data
* @ status : MPT3_CMD_XXX status
* @ smid : system message id
*/
struct _internal_cmd {
struct mutex mutex ;
struct completion done ;
void * reply ;
void * sense ;
u16 status ;
u16 smid ;
} ;
/**
* struct _sas_device - attached device information
* @ list : sas device list
* @ starget : starget object
* @ sas_address : device sas address
* @ device_name : retrieved from the SAS IDENTIFY frame .
* @ handle : device handle
* @ sas_address_parent : sas address of parent expander or sas host
* @ enclosure_handle : enclosure handle
* @ enclosure_logical_id : enclosure logical identifier
* @ volume_handle : volume handle ( valid when hidden raid member )
* @ volume_wwid : volume unique identifier
* @ device_info : bitfield provides detailed info about the device
* @ id : target id
* @ channel : target channel
* @ slot : number number
* @ phy : phy identifier provided in sas device page 0
* @ responding : used in _scsih_sas_device_mark_responding
2014-09-12 14:05:26 +04:00
* @ fast_path : fast path feature enable bit
* @ pfa_led_on : flag for PFA LED status
2015-06-30 09:54:49 +03:00
* @ pend_sas_rphy_add : flag to check if device is in sas_rphy_add ( )
* addition routine .
2017-10-10 16:11:20 +03:00
* @ chassis_slot : chassis slot
* @ is_chassis_slot_valid : chassis slot valid or not
2020-10-27 16:08:34 +03:00
* @ port : hba port entry containing device ' s port number info
2020-10-27 16:08:40 +03:00
* @ rphy : device ' s sas_rphy address used to identify this device structure in
* target_alloc callback function
2012-11-30 06:14:21 +04:00
*/
struct _sas_device {
struct list_head list ;
struct scsi_target * starget ;
u64 sas_address ;
u64 device_name ;
u16 handle ;
u64 sas_address_parent ;
u16 enclosure_handle ;
u64 enclosure_logical_id ;
u16 volume_handle ;
u64 volume_wwid ;
u32 device_info ;
int id ;
int channel ;
u16 slot ;
u8 phy ;
u8 responding ;
u8 fast_path ;
2014-09-12 14:05:26 +04:00
u8 pfa_led_on ;
2015-06-30 09:54:49 +03:00
u8 pend_sas_rphy_add ;
2015-06-30 09:54:52 +03:00
u8 enclosure_level ;
2017-10-10 16:11:20 +03:00
u8 chassis_slot ;
u8 is_chassis_slot_valid ;
2016-07-28 07:45:51 +03:00
u8 connector_name [ 5 ] ;
2015-11-11 15:00:30 +03:00
struct kref refcount ;
2020-10-27 16:08:34 +03:00
struct hba_port * port ;
2020-10-27 16:08:40 +03:00
struct sas_rphy * rphy ;
2012-11-30 06:14:21 +04:00
} ;
2015-11-11 15:00:30 +03:00
static inline void sas_device_get ( struct _sas_device * s )
{
kref_get ( & s - > refcount ) ;
}
static inline void sas_device_free ( struct kref * r )
{
kfree ( container_of ( r , struct _sas_device , refcount ) ) ;
}
static inline void sas_device_put ( struct _sas_device * s )
{
kref_put ( & s - > refcount , sas_device_free ) ;
}
2017-10-31 15:32:27 +03:00
/*
* struct _pcie_device - attached PCIe device information
* @ list : pcie device list
* @ starget : starget object
* @ wwid : device WWID
* @ handle : device handle
* @ device_info : bitfield provides detailed info about the device
* @ id : target id
* @ channel : target channel
* @ slot : slot number
* @ port_num : port number
* @ responding : used in _scsih_pcie_device_mark_responding
* @ fast_path : fast path feature enable bit
* @ nvme_mdts : MaximumDataTransferSize from PCIe Device Page 2 for
* NVMe device only
* @ enclosure_handle : enclosure handle
* @ enclosure_logical_id : enclosure logical identifier
* @ enclosure_level : The level of device ' s enclosure from the controller
* @ connector_name : ASCII value of the Connector ' s name
* @ serial_number : pointer of serial number string allocated runtime
2019-08-03 16:59:51 +03:00
* @ access_status : Device ' s Access Status
2019-12-26 14:13:25 +03:00
* @ shutdown_latency : NVMe device ' s RTD3 Entry Latency
2017-10-31 15:32:27 +03:00
* @ refcount : reference count for deletion
*/
struct _pcie_device {
struct list_head list ;
struct scsi_target * starget ;
u64 wwid ;
u16 handle ;
u32 device_info ;
int id ;
int channel ;
u16 slot ;
u8 port_num ;
u8 responding ;
u8 fast_path ;
u32 nvme_mdts ;
u16 enclosure_handle ;
u64 enclosure_logical_id ;
u8 enclosure_level ;
u8 connector_name [ 4 ] ;
u8 * serial_number ;
2018-04-24 12:28:41 +03:00
u8 reset_timeout ;
2019-08-03 16:59:51 +03:00
u8 access_status ;
2019-12-26 14:13:25 +03:00
u16 shutdown_latency ;
2017-10-31 15:32:27 +03:00
struct kref refcount ;
} ;
/**
* pcie_device_get - Increment the pcie device reference count
*
* @ p : pcie_device object
*
* When ever this function called it will increment the
* reference count of the pcie device for which this function called .
*
*/
static inline void pcie_device_get ( struct _pcie_device * p )
{
kref_get ( & p - > refcount ) ;
}
/**
* pcie_device_free - Release the pcie device object
* @ r - kref object
*
* Free ' s the pcie device object . It will be called when reference count
* reaches to zero .
*/
static inline void pcie_device_free ( struct kref * r )
{
kfree ( container_of ( r , struct _pcie_device , refcount ) ) ;
}
/**
* pcie_device_put - Decrement the pcie device reference count
*
* @ p : pcie_device object
*
* When ever this function called it will decrement the
* reference count of the pcie device for which this function called .
*
* When refernce count reaches to Zero , this will call pcie_device_free to the
* pcie_device object .
*/
static inline void pcie_device_put ( struct _pcie_device * p )
{
kref_put ( & p - > refcount , pcie_device_free ) ;
}
2012-11-30 06:14:21 +04:00
/**
* struct _raid_device - raid volume link list
* @ list : sas device list
* @ starget : starget object
* @ sdev : scsi device struct ( volumes are single lun )
* @ wwid : unique identifier for the volume
* @ handle : device handle
2015-11-11 15:00:28 +03:00
* @ block_size : Block size of the volume
2012-11-30 06:14:21 +04:00
* @ id : target id
* @ channel : target channel
* @ volume_type : the raid level
* @ device_info : bitfield provides detailed info about the hidden components
* @ num_pds : number of hidden raid components
* @ responding : used in _scsih_raid_device_mark_responding
* @ percent_complete : resync percent complete
2015-11-11 15:00:28 +03:00
* @ direct_io_enabled : Whether direct io to PDs are allowed or not
* @ stripe_exponent : X where 2 powX is the stripe sz in blocks
* @ block_exponent : X where 2 powX is the block sz in bytes
* @ max_lba : Maximum number of LBA in the volume
* @ stripe_sz : Stripe Size of the volume
* @ device_info : Device info of the volume member disk
* @ pd_handle : Array of handles of the physical drives for direct I / O in le16
2012-11-30 06:14:21 +04:00
*/
# define MPT_MAX_WARPDRIVE_PDS 8
struct _raid_device {
struct list_head list ;
struct scsi_target * starget ;
struct scsi_device * sdev ;
u64 wwid ;
u16 handle ;
2015-11-11 15:00:28 +03:00
u16 block_sz ;
2012-11-30 06:14:21 +04:00
int id ;
int channel ;
u8 volume_type ;
u8 num_pds ;
u8 responding ;
u8 percent_complete ;
2015-11-11 15:00:28 +03:00
u8 direct_io_enabled ;
u8 stripe_exponent ;
u8 block_exponent ;
u64 max_lba ;
u32 stripe_sz ;
2012-11-30 06:14:21 +04:00
u32 device_info ;
2015-11-11 15:00:28 +03:00
u16 pd_handle [ MPT_MAX_WARPDRIVE_PDS ] ;
2012-11-30 06:14:21 +04:00
} ;
/**
* struct _boot_device - boot device info
2017-10-31 15:32:27 +03:00
*
* @ channel : sas , raid , or pcie channel
* @ device : holds pointer for struct _sas_device , struct _raid_device or
* struct _pcie_device
2012-11-30 06:14:21 +04:00
*/
struct _boot_device {
2017-10-31 15:32:27 +03:00
int channel ;
2012-11-30 06:14:21 +04:00
void * device ;
} ;
/**
* struct _sas_port - wide / narrow sas port information
* @ port_list : list of ports belonging to expander
* @ num_phys : number of phys belonging to this port
* @ remote_identify : attached device identification
* @ rphy : sas transport rphy object
* @ port : sas transport wide / narrow port object
2020-10-27 16:08:34 +03:00
* @ hba_port : hba port entry containing port ' s port number info
2012-11-30 06:14:21 +04:00
* @ phy_list : _sas_phy list objects belonging to this port
*/
struct _sas_port {
struct list_head port_list ;
u8 num_phys ;
struct sas_identify remote_identify ;
struct sas_rphy * rphy ;
struct sas_port * port ;
2020-10-27 16:08:34 +03:00
struct hba_port * hba_port ;
2012-11-30 06:14:21 +04:00
struct list_head phy_list ;
} ;
/**
* struct _sas_phy - phy information
* @ port_siblings : list of phys belonging to a port
* @ identify : phy identification
* @ remote_identify : attached device identification
* @ phy : sas transport phy object
* @ phy_id : unique phy id
* @ handle : device handle for this phy
* @ attached_handle : device handle for attached device
* @ phy_belongs_to_port : port has been created for this phy
2020-10-27 16:08:34 +03:00
* @ port : hba port entry containing port number info
2012-11-30 06:14:21 +04:00
*/
struct _sas_phy {
struct list_head port_siblings ;
struct sas_identify identify ;
struct sas_identify remote_identify ;
struct sas_phy * phy ;
u8 phy_id ;
u16 handle ;
u16 attached_handle ;
u8 phy_belongs_to_port ;
2020-10-27 16:08:43 +03:00
u8 hba_vphy ;
2020-10-27 16:08:34 +03:00
struct hba_port * port ;
2012-11-30 06:14:21 +04:00
} ;
/**
* struct _sas_node - sas_host / expander information
* @ list : list of expanders
* @ parent_dev : parent device class
* @ num_phys : number phys belonging to this sas_host / expander
* @ sas_address : sas address of this sas_host / expander
* @ handle : handle for this sas_host / expander
* @ sas_address_parent : sas address of parent expander or sas host
* @ enclosure_handle : handle for this a member of an enclosure
* @ device_info : bitwise defining capabilities of this sas_host / expander
* @ responding : used in _scsih_expander_device_mark_responding
* @ phy : a list of phys that make up this sas_host / expander
* @ sas_port_list : list of ports attached to this sas_host / expander
2020-10-27 16:08:34 +03:00
* @ port : hba port entry containing node ' s port number info
2020-10-27 16:08:42 +03:00
* @ rphy : sas_rphy object of this expander
2012-11-30 06:14:21 +04:00
*/
struct _sas_node {
struct list_head list ;
struct device * parent_dev ;
u8 num_phys ;
u64 sas_address ;
u16 handle ;
u64 sas_address_parent ;
u16 enclosure_handle ;
u64 enclosure_logical_id ;
u8 responding ;
2020-10-27 16:08:34 +03:00
struct hba_port * port ;
2012-11-30 06:14:21 +04:00
struct _sas_phy * phy ;
struct list_head sas_port_list ;
2020-10-27 16:08:42 +03:00
struct sas_rphy * rphy ;
2012-11-30 06:14:21 +04:00
} ;
2018-04-24 12:28:38 +03:00
/**
* struct _enclosure_node - enclosure information
* @ list : list of enclosures
* @ pg0 : enclosure pg0 ;
*/
struct _enclosure_node {
struct list_head list ;
Mpi2SasEnclosurePage0_t pg0 ;
} ;
2012-11-30 06:14:21 +04:00
/**
* enum reset_type - reset state
* @ FORCE_BIG_HAMMER : issue diagnostic reset
* @ SOFT_RESET : issue message_unit_reset , if fails to to big hammer
*/
enum reset_type {
FORCE_BIG_HAMMER ,
SOFT_RESET ,
} ;
2017-10-31 15:32:28 +03:00
/**
* struct pcie_sg_list - PCIe SGL buffer ( contiguous per I / O )
* @ pcie_sgl : PCIe native SGL for NVMe devices
* @ pcie_sgl_dma : physical address
*/
struct pcie_sg_list {
void * pcie_sgl ;
dma_addr_t pcie_sgl_dma ;
} ;
2012-11-30 06:14:21 +04:00
/**
* struct chain_tracker - firmware chain tracker
* @ chain_buffer : chain buffer
* @ chain_buffer_dma : physical address
* @ tracker_list : list of free request ( ioc - > free_chain_list )
*/
struct chain_tracker {
void * chain_buffer ;
dma_addr_t chain_buffer_dma ;
2018-04-24 12:28:32 +03:00
} ;
struct chain_lookup {
struct chain_tracker * chains_per_smid ;
atomic_t chain_offset ;
2012-11-30 06:14:21 +04:00
} ;
/**
* struct scsiio_tracker - scsi mf request tracker
* @ smid : system message id
* @ cb_idx : callback index
2015-11-11 15:00:28 +03:00
* @ direct_io : To indicate whether I / O is direct ( WARPDRIVE )
2018-01-04 15:57:11 +03:00
* @ chain_list : list of associated firmware chain tracker
2016-01-28 09:37:06 +03:00
* @ msix_io : IO ' s msix
2012-11-30 06:14:21 +04:00
*/
struct scsiio_tracker {
u16 smid ;
2019-05-31 15:14:39 +03:00
struct scsi_cmnd * scmd ;
2012-11-30 06:14:21 +04:00
u8 cb_idx ;
2015-11-11 15:00:28 +03:00
u8 direct_io ;
2017-10-31 15:32:28 +03:00
struct pcie_sg_list pcie_sg_list ;
2012-11-30 06:14:21 +04:00
struct list_head chain_list ;
2016-01-28 09:37:06 +03:00
u16 msix_io ;
2012-11-30 06:14:21 +04:00
} ;
/**
* struct request_tracker - firmware request tracker
* @ smid : system message id
* @ cb_idx : callback index
* @ tracker_list : list of free request ( ioc - > free_list )
*/
struct request_tracker {
u16 smid ;
u8 cb_idx ;
struct list_head tracker_list ;
} ;
/**
* struct _tr_list - target reset list
* @ handle : device handle
* @ state : state machine
*/
struct _tr_list {
struct list_head list ;
u16 handle ;
u16 state ;
} ;
2016-01-28 09:37:02 +03:00
/**
* struct _sc_list - delayed SAS_IO_UNIT_CONTROL message list
* @ handle : device handle
*/
struct _sc_list {
struct list_head list ;
u16 handle ;
} ;
/**
* struct _event_ack_list - delayed event acknowledgment list
* @ Event : Event ID
* @ EventContext : used to track the event uniquely
*/
struct _event_ack_list {
struct list_head list ;
2018-04-24 12:28:30 +03:00
U16 Event ;
U32 EventContext ;
2016-01-28 09:37:02 +03:00
} ;
2012-11-30 06:14:21 +04:00
/**
* struct adapter_reply_queue - the reply queue struct
* @ ioc : per adapter object
* @ msix_index : msix index into vector table
* @ vector : irq vector
* @ reply_post_host_index : head index in the pool where FW completes IO
* @ reply_post_free : reply post base virt address
* @ name : the name registered to request_irq ( )
* @ busy : isr is actively processing replies on another cpu
scsi: mpt3sas: Irq poll to avoid CPU hard lockups
Issue Description:
We have seen cpu lock up issue from fields if system has greater (more than
96) logical cpu count. SAS3.0 controller (Invader series) supports at max
96 msix vector and SAS3.5 product (Ventura) supports at max 128 msix
vectors.
This may be a generic issue (if PCI device supports completion on multiple
reply queues). Let me explain it w.r.t to mpt3sas supported h/w just to
simplify the problem and possible changes to handle such issues. IT HBA
(mpt3sas) supports multiple reply queues in completion path. Driver creates
MSI-x vectors for controller as "min of (FW supported Reply queue, Logical
CPUs)". If submitter is not interrupted via completion on same CPU, there
is a loop in the IO path. This behavior can cause hard/soft CPU lockups, IO
timeout, system sluggish etc.
Example - one CPU (e.g. CPU A) is busy submitting the IOs and another CPU
(e.g. CPU B) is busy with processing the corresponding IO's reply
descriptors from reply descriptor queue upon receiving the interrupts from
HBA. If the CPU A is continuously pumping the IOs then always CPU B (which
is executing the ISR) will see the valid reply descriptors in the reply
descriptor queue and it will be continuously processing those reply
descriptor in a loop without quitting the ISR handler.
Mpt3sas driver will exit ISR handler if it finds unused reply descriptor in
the reply descriptor queue. Since CPU A will be continuously sending the
IOs, CPU B may always see a valid reply descriptor (posted by HBA Firmware
after processing the IO) in the reply descriptor queue. In worst case,
driver will not quit from this loop in the ISR handler. Eventually, CPU
lockup will be detected by watchdog.
Above mentioned behavior is not common if "rq_affinity" set to 2 or
affinity_hint is honored by irqbalance as "exact". If rq_affinity is set
to 2, submitter will be always interrupted via completion on same CPU. If
irqbalance is using "exact" policy, interrupt will be delivered to
submitter CPU.
If CPU counts to MSI-X vectors (reply descriptor Queues) count ratio is not
1:1, we still have exposure of issue explained above and for that we don't
have any solution.
Exposure of soft/hard lockup if CPU count is more than MSI-x supported by
device.
If CPUs count to MSI-x vectors count ratio is not 1:1, (Other way, if CPU
counts to MSI-x vector count ratio is something like X:1, where X > 1) then
'exact' irqbalance policy OR rq_affinity = 2 won't help to avoid CPU
hard/soft lockups. There won't be any one to one mapping between CPU to
MSI-x vector instead one MSI-x interrupt (or reply descriptor queue) is
shared with group/set of CPUs and there is a possibility of having a loop
in the IO path within that CPU group and may observe lockups.
For example: Consider a system having two NUMA nodes and each node having
four logical CPUs and also consider that number of MSI-x vectors enabled on
the HBA is two, then CPUs count to MSI-x vector count ratio as 4:1. e.g.
MSIx vector 0 is affinity to CPU 0, CPU 1, CPU 2 & CPU 3 of NUMA node 0 and
MSI-x vector 1 is affinity to CPU 4, CPU 5, CPU 6 & CPU 7 of NUMA node 1.
numactl --hardware
available: 2 nodes (0-1)
node 0 cpus: 0 1 2 3 --> MSI-x 0
node 0 size: 65536 MB
node 0 free: 63176 MB
node 1 cpus: 4 5 6 7 -->MSI-x 1
node 1 size: 65536 MB
node 1 free: 63176 MB
Assume that user started an application which uses all the CPUs of NUMA
node 0 for issuing the IOs. Only one CPU from affinity list (it can be any
cpu since this behavior depends upon irqbalance) CPU0 will receive the
interrupts from MSIx vector 0 for all the IOs. Eventually, CPU 0 IO
submission percentage will be decreasing and ISR processing percentage will
be increasing as it is more busy with processing the interrupts. Gradually
IO submission percentage on CPU 0 will be zero and it's ISR processing
percentage will be 100 percentage as IO loop has already formed within the
NUMA node 0, i.e. CPU 1, CPU 2 & CPU 3 will be continuously busy with
submitting the heavy IOs and only CPU 0 is busy in the ISR path as it
always find the valid reply descriptor in the reply descriptor
queue. Eventually, we will observe the hard lockup here.
Chances of occurring of hard/soft lockups are directly proportional to
value of X. If value of X is high, then chances of observing CPU lockups is
high.
Solution: Use IRQ poll interface defined in " irq_poll.c". mpt3sas driver
will execute ISR routine in Softirq context and it will always quit the
loop based on budget provided in IRQ poll interface.
In these scenarios (i.e. where CPUs count to MSI-X vectors count ratio is
X:1 (where X > 1)), IRQ poll interface will avoid CPU hard lockups due to
voluntary exit from the reply queue processing based on budget. Note -
Only one MSI-x vector is busy doing processing.
Irqstat output:
IRQs / 1 second(s)
IRQ# TOTAL NODE0 NODE1 NODE2 NODE3 NAME
44 122871 122871 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix0
45 0 0 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix1
We use this approach only if cpu count is more than FW supported MSI-x
vector
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-15 10:40:27 +03:00
* @ os_irq : irq number
* @ irqpoll : irq_poll object
* @ irq_poll_scheduled : Tells whether irq poll is scheduled or not
2012-11-30 06:14:21 +04:00
* @ list : this list
*/
struct adapter_reply_queue {
struct MPT3SAS_ADAPTER * ioc ;
u8 msix_index ;
u32 reply_post_host_index ;
Mpi2ReplyDescriptorsUnion_t * reply_post_free ;
char name [ MPT_NAME_LENGTH ] ;
atomic_t busy ;
scsi: mpt3sas: Irq poll to avoid CPU hard lockups
Issue Description:
We have seen cpu lock up issue from fields if system has greater (more than
96) logical cpu count. SAS3.0 controller (Invader series) supports at max
96 msix vector and SAS3.5 product (Ventura) supports at max 128 msix
vectors.
This may be a generic issue (if PCI device supports completion on multiple
reply queues). Let me explain it w.r.t to mpt3sas supported h/w just to
simplify the problem and possible changes to handle such issues. IT HBA
(mpt3sas) supports multiple reply queues in completion path. Driver creates
MSI-x vectors for controller as "min of (FW supported Reply queue, Logical
CPUs)". If submitter is not interrupted via completion on same CPU, there
is a loop in the IO path. This behavior can cause hard/soft CPU lockups, IO
timeout, system sluggish etc.
Example - one CPU (e.g. CPU A) is busy submitting the IOs and another CPU
(e.g. CPU B) is busy with processing the corresponding IO's reply
descriptors from reply descriptor queue upon receiving the interrupts from
HBA. If the CPU A is continuously pumping the IOs then always CPU B (which
is executing the ISR) will see the valid reply descriptors in the reply
descriptor queue and it will be continuously processing those reply
descriptor in a loop without quitting the ISR handler.
Mpt3sas driver will exit ISR handler if it finds unused reply descriptor in
the reply descriptor queue. Since CPU A will be continuously sending the
IOs, CPU B may always see a valid reply descriptor (posted by HBA Firmware
after processing the IO) in the reply descriptor queue. In worst case,
driver will not quit from this loop in the ISR handler. Eventually, CPU
lockup will be detected by watchdog.
Above mentioned behavior is not common if "rq_affinity" set to 2 or
affinity_hint is honored by irqbalance as "exact". If rq_affinity is set
to 2, submitter will be always interrupted via completion on same CPU. If
irqbalance is using "exact" policy, interrupt will be delivered to
submitter CPU.
If CPU counts to MSI-X vectors (reply descriptor Queues) count ratio is not
1:1, we still have exposure of issue explained above and for that we don't
have any solution.
Exposure of soft/hard lockup if CPU count is more than MSI-x supported by
device.
If CPUs count to MSI-x vectors count ratio is not 1:1, (Other way, if CPU
counts to MSI-x vector count ratio is something like X:1, where X > 1) then
'exact' irqbalance policy OR rq_affinity = 2 won't help to avoid CPU
hard/soft lockups. There won't be any one to one mapping between CPU to
MSI-x vector instead one MSI-x interrupt (or reply descriptor queue) is
shared with group/set of CPUs and there is a possibility of having a loop
in the IO path within that CPU group and may observe lockups.
For example: Consider a system having two NUMA nodes and each node having
four logical CPUs and also consider that number of MSI-x vectors enabled on
the HBA is two, then CPUs count to MSI-x vector count ratio as 4:1. e.g.
MSIx vector 0 is affinity to CPU 0, CPU 1, CPU 2 & CPU 3 of NUMA node 0 and
MSI-x vector 1 is affinity to CPU 4, CPU 5, CPU 6 & CPU 7 of NUMA node 1.
numactl --hardware
available: 2 nodes (0-1)
node 0 cpus: 0 1 2 3 --> MSI-x 0
node 0 size: 65536 MB
node 0 free: 63176 MB
node 1 cpus: 4 5 6 7 -->MSI-x 1
node 1 size: 65536 MB
node 1 free: 63176 MB
Assume that user started an application which uses all the CPUs of NUMA
node 0 for issuing the IOs. Only one CPU from affinity list (it can be any
cpu since this behavior depends upon irqbalance) CPU0 will receive the
interrupts from MSIx vector 0 for all the IOs. Eventually, CPU 0 IO
submission percentage will be decreasing and ISR processing percentage will
be increasing as it is more busy with processing the interrupts. Gradually
IO submission percentage on CPU 0 will be zero and it's ISR processing
percentage will be 100 percentage as IO loop has already formed within the
NUMA node 0, i.e. CPU 1, CPU 2 & CPU 3 will be continuously busy with
submitting the heavy IOs and only CPU 0 is busy in the ISR path as it
always find the valid reply descriptor in the reply descriptor
queue. Eventually, we will observe the hard lockup here.
Chances of occurring of hard/soft lockups are directly proportional to
value of X. If value of X is high, then chances of observing CPU lockups is
high.
Solution: Use IRQ poll interface defined in " irq_poll.c". mpt3sas driver
will execute ISR routine in Softirq context and it will always quit the
loop based on budget provided in IRQ poll interface.
In these scenarios (i.e. where CPUs count to MSI-X vectors count ratio is
X:1 (where X > 1)), IRQ poll interface will avoid CPU hard lockups due to
voluntary exit from the reply queue processing based on budget. Note -
Only one MSI-x vector is busy doing processing.
Irqstat output:
IRQs / 1 second(s)
IRQ# TOTAL NODE0 NODE1 NODE2 NODE3 NAME
44 122871 122871 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix0
45 0 0 0 0 0 IR-PCI-MSI-edge mpt3sas0-msix1
We use this approach only if cpu count is more than FW supported MSI-x
vector
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-15 10:40:27 +03:00
u32 os_irq ;
struct irq_poll irqpoll ;
bool irq_poll_scheduled ;
bool irq_line_enable ;
2012-11-30 06:14:21 +04:00
struct list_head list ;
} ;
typedef void ( * MPT_ADD_SGE ) ( void * paddr , u32 flags_length , dma_addr_t dma_addr ) ;
/* SAS3.0 support */
typedef int ( * MPT_BUILD_SG_SCMD ) ( struct MPT3SAS_ADAPTER * ioc ,
2017-10-31 15:32:28 +03:00
struct scsi_cmnd * scmd , u16 smid , struct _pcie_device * pcie_device ) ;
2012-11-30 06:14:21 +04:00
typedef void ( * MPT_BUILD_SG ) ( struct MPT3SAS_ADAPTER * ioc , void * psge ,
dma_addr_t data_out_dma , size_t data_out_sz ,
dma_addr_t data_in_dma , size_t data_in_sz ) ;
typedef void ( * MPT_BUILD_ZERO_LEN_SGE ) ( struct MPT3SAS_ADAPTER * ioc ,
void * paddr ) ;
2017-10-31 15:32:28 +03:00
/* SAS3.5 support */
typedef void ( * NVME_BUILD_PRP ) ( struct MPT3SAS_ADAPTER * ioc , u16 smid ,
Mpi26NVMeEncapsulatedRequest_t * nvme_encap_request ,
dma_addr_t data_out_dma , size_t data_out_sz , dma_addr_t data_in_dma ,
size_t data_in_sz ) ;
2016-10-26 11:04:40 +03:00
/* To support atomic and non atomic descriptors*/
typedef void ( * PUT_SMID_IO_FP_HIP ) ( struct MPT3SAS_ADAPTER * ioc , u16 smid ,
u16 funcdep ) ;
typedef void ( * PUT_SMID_DEFAULT ) ( struct MPT3SAS_ADAPTER * ioc , u16 smid ) ;
2018-12-07 10:28:33 +03:00
typedef u32 ( * BASE_READ_REG ) ( const volatile void __iomem * addr ) ;
2019-05-31 15:14:38 +03:00
/*
* To get high iops reply queue ' s msix index when high iops mode is enabled
* else get the msix index of general reply queues .
*/
typedef u8 ( * GET_MSIX_INDEX ) ( struct MPT3SAS_ADAPTER * ioc ,
struct scsi_cmnd * scmd ) ;
2012-11-30 06:14:21 +04:00
/* IOC Facts and Port Facts converted from little endian to cpu */
union mpi3_version_union {
MPI2_VERSION_STRUCT Struct ;
u32 Word ;
} ;
struct mpt3sas_facts {
u16 MsgVersion ;
u16 HeaderVersion ;
u8 IOCNumber ;
u8 VP_ID ;
u8 VF_ID ;
u16 IOCExceptions ;
u16 IOCStatus ;
u32 IOCLogInfo ;
u8 MaxChainDepth ;
u8 WhoInit ;
u8 NumberOfPorts ;
u8 MaxMSIxVectors ;
u16 RequestCredit ;
u16 ProductID ;
u32 IOCCapabilities ;
union mpi3_version_union FWVersion ;
u16 IOCRequestFrameSize ;
2016-01-28 09:37:04 +03:00
u16 IOCMaxChainSegmentSize ;
2012-11-30 06:14:21 +04:00
u16 MaxInitiators ;
u16 MaxTargets ;
u16 MaxSasExpanders ;
u16 MaxEnclosures ;
u16 ProtocolFlags ;
u16 HighPriorityCredit ;
u16 MaxReplyDescriptorPostQueueDepth ;
u8 ReplyFrameSize ;
u8 MaxVolumes ;
u16 MaxDevHandle ;
u16 MaxPersistentEntries ;
u16 MinDevHandle ;
2017-10-31 15:32:28 +03:00
u8 CurrentHostPageSize ;
2012-11-30 06:14:21 +04:00
} ;
struct mpt3sas_port_facts {
u8 PortNumber ;
u8 VP_ID ;
u8 VF_ID ;
u8 PortType ;
u16 MaxPostedCmdBuffers ;
} ;
2014-09-12 14:05:31 +04:00
struct reply_post_struct {
Mpi2ReplyDescriptorsUnion_t * reply_post_free ;
dma_addr_t reply_post_free_dma ;
} ;
2020-10-27 16:08:43 +03:00
/**
* struct virtual_phy - vSES phy structure
* sas_address : SAS Address of vSES device
* phy_mask : vSES device ' s phy number
* flags : flags used to manage this structure
*/
struct virtual_phy {
struct list_head list ;
u64 sas_address ;
u32 phy_mask ;
u8 flags ;
} ;
# define MPT_VPHY_FLAG_DIRTY_PHY 0x01
2020-10-27 16:08:34 +03:00
/**
* struct hba_port - Saves each HBA ' s Wide / Narrow port info
* @ sas_address : sas address of this wide / narrow port ' s attached device
* @ phy_mask : HBA PHY ' s belonging to this port
* @ port_id : port number
* @ flags : hba port flags
2020-10-27 16:08:43 +03:00
* @ vphys_mask : mask of vSES devices Phy number
* @ vphys_list : list containing vSES device structures
2020-10-27 16:08:34 +03:00
*/
struct hba_port {
struct list_head list ;
u64 sas_address ;
u32 phy_mask ;
u8 port_id ;
u8 flags ;
2020-10-27 16:08:43 +03:00
u32 vphys_mask ;
struct list_head vphys_list ;
2020-10-27 16:08:34 +03:00
} ;
/* hba port flags */
# define HBA_PORT_FLAG_DIRTY_PORT 0x01
# define HBA_PORT_FLAG_NEW_PORT 0x02
# define MULTIPATH_DISABLED_PORT_ID 0xFF
2021-02-04 06:37:23 +03:00
/**
* struct htb_rel_query - diagnostic buffer release reason
* @ unique_id - unique id associated with this buffer .
* @ buffer_rel_condition - Release condition ioctl / sysfs / reset
* @ reserved
* @ trigger_type - Master / Event / scsi / MPI
* @ trigger_info_dwords - Data Correspondig to trigger type
*/
struct htb_rel_query {
u16 buffer_rel_condition ;
u16 reserved ;
u32 trigger_type ;
u32 trigger_info_dwords [ 2 ] ;
} ;
/* Buffer_rel_condition bit fields */
/* Bit 0 - Diag Buffer not Released */
# define MPT3_DIAG_BUFFER_NOT_RELEASED (0x00)
/* Bit 0 - Diag Buffer Released */
# define MPT3_DIAG_BUFFER_RELEASED (0x01)
/*
* Bit 1 - Diag Buffer Released by IOCTL ,
* This bit is valid only if Bit 0 is one
*/
# define MPT3_DIAG_BUFFER_REL_IOCTL (0x02 | MPT3_DIAG_BUFFER_RELEASED)
/*
* Bit 2 - Diag Buffer Released by Trigger ,
* This bit is valid only if Bit 0 is one
*/
# define MPT3_DIAG_BUFFER_REL_TRIGGER (0x04 | MPT3_DIAG_BUFFER_RELEASED)
/*
* Bit 3 - Diag Buffer Released by SysFs ,
* This bit is valid only if Bit 0 is one
*/
# define MPT3_DIAG_BUFFER_REL_SYSFS (0x08 | MPT3_DIAG_BUFFER_RELEASED)
/* DIAG RESET Master trigger flags */
# define MPT_DIAG_RESET_ISSUED_BY_DRIVER 0x00000000
# define MPT_DIAG_RESET_ISSUED_BY_USER 0x00000001
2012-11-30 06:14:21 +04:00
typedef void ( * MPT3SAS_FLUSH_RUNNING_CMDS ) ( struct MPT3SAS_ADAPTER * ioc ) ;
/**
* struct MPT3SAS_ADAPTER - per adapter struct
* @ list : ioc_list
* @ shost : shost object
* @ id : unique adapter id
* @ cpu_count : number online cpus
* @ name : generic ioc string
* @ tmp_string : tmp string used for logging
* @ pdev : pci pdev object
* @ pio_chip : physical io register space
* @ chip : memory mapped register space
* @ chip_phys : physical addrss prior to mapping
* @ logging_level : see mpt3sas_debug . h
* @ fwfault_debug : debuging FW timeouts
* @ ir_firmware : IR firmware present
* @ bars : bitmask of BAR ' s that must be configured
* @ mask_interrupts : ignore interrupt
2017-10-31 15:32:27 +03:00
* @ pci_access_mutex : Mutex to synchronize ioctl , sysfs show path and
* pci resource handling
2012-11-30 06:14:21 +04:00
* @ fault_reset_work_q_name : fw fault work queue
* @ fault_reset_work_q : " "
* @ fault_reset_work : " "
* @ firmware_event_name : fw event work queue
* @ firmware_event_thread : " "
* @ fw_event_lock :
* @ fw_event_list : list of fw events
2020-07-30 11:03:45 +03:00
* @ current_evet : current processing firmware event
* @ fw_event_cleanup : set to one while cleaning up the fw events
2012-11-30 06:14:21 +04:00
* @ aen_event_read_flag : event log was read
* @ broadcast_aen_busy : broadcast aen waiting to be serviced
* @ shost_recovery : host reset in progress
* @ ioc_reset_in_progress_lock :
* @ ioc_link_reset_in_progress : phy / hard reset in progress
* @ ignore_loginfos : ignore loginfos during task management
* @ remove_host : flag for when driver unloads , to avoid sending dev resets
* @ pci_error_recovery : flag to prevent ioc access until slot reset completes
* @ wait_for_discovery_to_complete : flag set at driver load time when
* waiting on reporting devices
* @ is_driver_loading : flag set at driver load time
* @ port_enable_failed : flag set when port enable has failed
* @ start_scan : flag set from scan_start callback , cleared from _mpt3sas_fw_work
* @ start_scan_failed : means port enable failed , return ' s the ioc_status
* @ msix_enable : flag indicating msix is enabled
* @ msix_vector_count : number msix vectors
* @ cpu_msix_table : table for mapping cpus to msix index
* @ cpu_msix_table_sz : table size
2019-02-15 10:40:28 +03:00
* @ total_io_cnt : Gives total IO count , used to load balance the interrupts
2019-12-26 14:13:28 +03:00
* @ ioc_coredump_loop : will have non - zero value when FW is in CoreDump state
2020-11-26 12:43:04 +03:00
* @ timestamp_update_count : Counter to fire timeSync command
* time_sync_interval : Time sync interval read from man page 11
2019-05-31 15:14:38 +03:00
* @ high_iops_outstanding : used to load balance the interrupts
* within high iops reply queues
2019-02-15 10:40:28 +03:00
* @ msix_load_balance : Enables load balancing of interrupts across
* the multiple MSIXs
2012-11-30 06:14:21 +04:00
* @ schedule_dead_ioc_flush_running_cmds : callback to flush pending commands
2019-02-15 10:40:29 +03:00
* @ thresh_hold : Max number of reply descriptors processed
* before updating Host Index
2019-08-03 16:59:53 +03:00
* @ drv_support_bitmap : driver ' s supported feature bit map
scsi: mpt3sas: Handle RDPQ DMA allocation in same 4G region
For INVADER_SERIES, each set of 8 reply queues (0 - 7, 8 - 15,..), and for
VENTURA_SERIES, each set of 16 reply queues (0 - 15, 16 - 31,..) need to be
within the same 4 GB boundary. Driver uses limitation of VENTURA_SERIES to
manage INVADER_SERIES as well. The driver is allocating the DMA able
memory for RDPQs accordingly.
1) At driver load, set DMA mask to 64 and allocate memory for RDPQs
2) Check if allocated resources for RDPQ are in the same 4GB range
3) If #2 is true, continue with 64 bit DMA and go to #6
4) If #2 is false, then free all the resources from #1
5) Set DMA mask to 32 and allocate RDPQs
6) Proceed with driver loading and other allocations
Link: https://lore.kernel.org/r/1587626596-1044-5-git-send-email-suganath-prabu.subramani@broadcom.com
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-04-23 10:23:15 +03:00
* @ use_32bit_dma : Flag to use 32 bit consistent dma mask
2012-11-30 06:14:21 +04:00
* @ scsi_io_cb_idx : shost generated commands
* @ tm_cb_idx : task management commands
* @ scsih_cb_idx : scsih internal commands
* @ transport_cb_idx : transport internal commands
* @ ctl_cb_idx : clt internal commands
* @ base_cb_idx : base internal commands
* @ config_cb_idx : base internal commands
* @ tm_tr_cb_idx : device removal target reset handshake
* @ tm_tr_volume_cb_idx : volume removal target reset
* @ base_cmds :
* @ transport_cmds :
* @ scsih_cmds :
* @ tm_cmds :
* @ ctl_cmds :
* @ config_cmds :
* @ base_add_sg_single : handler for either 32 / 64 bit sgl ' s
* @ event_type : bits indicating which events to log
* @ event_context : unique id for each logged event
* @ event_log : event log pointer
* @ event_masks : events that are masked
2019-12-26 14:13:25 +03:00
* @ max_shutdown_latency : timeout value for NVMe shutdown operation ,
* which is equal that NVMe drive ' s RTD3 Entry Latency
* which has reported maximum RTD3 Entry Latency value
* among attached NVMe drives .
2012-11-30 06:14:21 +04:00
* @ facts : static facts data
2019-08-03 16:59:48 +03:00
* @ prev_fw_facts : previous fw facts data
2012-11-30 06:14:21 +04:00
* @ pfacts : static port facts data
* @ manu_pg0 : static manufacturing page 0
* @ manu_pg10 : static manufacturing page 10
* @ manu_pg11 : static manufacturing page 11
* @ bios_pg2 : static bios page 2
* @ bios_pg3 : static bios page 3
* @ ioc_pg8 : static ioc page 8
* @ iounit_pg0 : static iounit page 0
* @ iounit_pg1 : static iounit page 1
2015-01-12 09:08:56 +03:00
* @ iounit_pg8 : static iounit page 8
2012-11-30 06:14:21 +04:00
* @ sas_hba : sas host object
* @ sas_expander_list : expander object list
2018-04-24 12:28:38 +03:00
* @ enclosure_list : enclosure object list
2012-11-30 06:14:21 +04:00
* @ sas_node_lock :
* @ sas_device_list : sas device object list
* @ sas_device_init_list : sas device object list ( used only at init time )
* @ sas_device_lock :
2017-10-31 15:32:27 +03:00
* @ pcie_device_list : pcie device object list
* @ pcie_device_init_list : pcie device object list ( used only at init time )
* @ pcie_device_lock :
2012-11-30 06:14:21 +04:00
* @ io_missing_delay : time for IO completed by fw when PDR enabled
* @ device_missing_delay : time for device missing by fw when PDR enabled
* @ sas_id : used for setting volume target IDs
2017-10-31 15:32:27 +03:00
* @ pcie_target_id : used for setting pcie target IDs
2012-11-30 06:14:21 +04:00
* @ blocking_handles : bitmask used to identify which devices need blocking
* @ pd_handles : bitmask for PD handles
* @ pd_handles_sz : size of pd_handle bitmask
* @ config_page_sz : config page size
* @ config_page : reserve memory for config page payload
* @ config_page_dma :
* @ hba_queue_depth : hba request queue depth
* @ sge_size : sg element size for either 32 / 64 bit
* @ scsiio_depth : SCSI_IO queue depth
* @ request_sz : per request frame size
* @ request : pool of request frames
* @ request_dma :
* @ request_dma_sz :
* @ scsi_lookup : firmware request tracker list
* @ scsi_lookup_lock :
* @ free_list : free list of request
* @ pending_io_count :
* @ reset_wq :
* @ chain : pool of chains
* @ chain_dma :
* @ max_sges_in_main_message : number sg elements in main message
* @ max_sges_in_chain_message : number sg elements per chain
* @ chains_needed_per_io : max chains per io
* @ chain_depth : total chains allocated
2016-01-28 09:37:04 +03:00
* @ chain_segment_sz : gives the max number of
* SGEs accommodate on single chain buffer
2012-11-30 06:14:21 +04:00
* @ hi_priority_smid :
* @ hi_priority :
* @ hi_priority_dma :
* @ hi_priority_depth :
* @ hpr_lookup :
* @ hpr_free_list :
* @ internal_smid :
* @ internal :
* @ internal_dma :
* @ internal_depth :
* @ internal_lookup :
* @ internal_free_list :
* @ sense : pool of sense
* @ sense_dma :
* @ sense_dma_pool :
* @ reply_depth : hba reply queue depth :
* @ reply_sz : per reply frame size :
* @ reply : pool of replys :
* @ reply_dma :
* @ reply_dma_pool :
* @ reply_free_queue_depth : reply free depth
* @ reply_free : pool for reply free queue ( 32 bit addr )
* @ reply_free_dma :
* @ reply_free_dma_pool :
* @ reply_free_host_index : tail index in pool to insert free replys
* @ reply_post_queue_depth : reply post queue depth
2014-09-12 14:05:31 +04:00
* @ reply_post_struct : struct for reply_post_free physical & virt address
* @ rdpq_array_capable : FW supports multiple reply queue addresses in ioc_init
* @ rdpq_array_enable : rdpq_array support is enabled in the driver
* @ rdpq_array_enable_assigned : this ensures that rdpq_array_enable flag
* is assigned only ones
2012-11-30 06:14:21 +04:00
* @ reply_queue_count : number of reply queue ' s
* @ reply_queue_list : link list contaning the reply queue info
2015-06-30 09:54:47 +03:00
* @ msix96_vector : 96 MSI - X vector support
* @ replyPostRegisterIndex : index of next position in Reply Desc Post Queue
2012-11-30 06:14:21 +04:00
* @ delayed_tr_list : target reset link list
* @ delayed_tr_volume_list : volume target reset link list
2016-01-28 09:37:02 +03:00
* @ delayed_sc_list :
* @ delayed_event_ack_list :
2015-11-11 15:00:33 +03:00
* @ temp_sensors_count : flag to carry the number of temperature sensors
* @ pci_access_mutex : Mutex to synchronize ioctl , sysfs show path and
* pci resource handling . PCI resource freeing will lead to free
* vital hardware / memory resource , which might be in use by cli / sysfs
* path functions resulting in Null pointer reference followed by kernel
* crash . To avoid the above race condition we use mutex syncrhonization
* which ensures the syncrhonization between cli / sysfs_show path .
2019-05-31 15:14:35 +03:00
* @ atomic_desc_capable : Atomic Request Descriptor support .
2019-05-31 15:14:38 +03:00
* @ GET_MSIX_INDEX : Get the msix index of high iops queues .
2020-10-27 16:08:46 +03:00
* @ multipath_on_hba : flag to determine multipath on hba is enabled or not
2020-10-27 16:08:34 +03:00
* @ port_table_list : list containing HBA ' s wide / narrow port ' s info
2012-11-30 06:14:21 +04:00
*/
struct MPT3SAS_ADAPTER {
struct list_head list ;
struct Scsi_Host * shost ;
u8 id ;
int cpu_count ;
char name [ MPT_NAME_LENGTH ] ;
2017-07-14 15:06:55 +03:00
char driver_name [ MPT_NAME_LENGTH - 8 ] ;
2012-11-30 06:14:21 +04:00
char tmp_string [ MPT_STRING_LENGTH ] ;
struct pci_dev * pdev ;
Mpi2SystemInterfaceRegs_t __iomem * chip ;
2018-03-01 16:07:07 +03:00
phys_addr_t chip_phys ;
2012-11-30 06:14:21 +04:00
int logging_level ;
int fwfault_debug ;
u8 ir_firmware ;
int bars ;
u8 mask_interrupts ;
/* fw fault handler */
char fault_reset_work_q_name [ 20 ] ;
struct workqueue_struct * fault_reset_work_q ;
struct delayed_work fault_reset_work ;
/* fw event handler */
char firmware_event_name [ 20 ] ;
struct workqueue_struct * firmware_event_thread ;
spinlock_t fw_event_lock ;
struct list_head fw_event_list ;
2020-07-30 11:03:45 +03:00
struct fw_event_work * current_event ;
u8 fw_events_cleanup ;
2012-11-30 06:14:21 +04:00
/* misc flags */
int aen_event_read_flag ;
u8 broadcast_aen_busy ;
u16 broadcast_aen_pending ;
u8 shost_recovery ;
2017-01-23 12:56:08 +03:00
u8 got_task_abort_from_ioctl ;
2012-11-30 06:14:21 +04:00
struct mutex reset_in_progress_mutex ;
spinlock_t ioc_reset_in_progress_lock ;
u8 ioc_link_reset_in_progress ;
u8 ignore_loginfos ;
u8 remove_host ;
u8 pci_error_recovery ;
u8 wait_for_discovery_to_complete ;
u8 is_driver_loading ;
u8 port_enable_failed ;
u8 start_scan ;
u16 start_scan_failed ;
u8 msix_enable ;
u16 msix_vector_count ;
u8 * cpu_msix_table ;
u16 cpu_msix_table_sz ;
2015-11-11 15:00:28 +03:00
resource_size_t __iomem * * reply_post_host_index ;
2012-11-30 06:14:21 +04:00
u32 ioc_reset_count ;
MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds ;
2015-11-11 15:00:27 +03:00
u32 non_operational_loop ;
2019-12-26 14:13:28 +03:00
u8 ioc_coredump_loop ;
2020-11-26 12:43:04 +03:00
u32 timestamp_update_count ;
u32 time_sync_interval ;
2019-02-15 10:40:28 +03:00
atomic64_t total_io_cnt ;
2019-05-31 15:14:38 +03:00
atomic64_t high_iops_outstanding ;
2019-02-15 10:40:28 +03:00
bool msix_load_balance ;
2019-02-15 10:40:29 +03:00
u16 thresh_hold ;
2019-05-31 15:14:36 +03:00
u8 high_iops_queues ;
2019-08-03 16:59:53 +03:00
u32 drv_support_bitmap ;
2021-03-05 13:28:58 +03:00
u32 dma_mask ;
2019-08-22 09:19:01 +03:00
bool enable_sdev_max_qd ;
scsi: mpt3sas: Handle RDPQ DMA allocation in same 4G region
For INVADER_SERIES, each set of 8 reply queues (0 - 7, 8 - 15,..), and for
VENTURA_SERIES, each set of 16 reply queues (0 - 15, 16 - 31,..) need to be
within the same 4 GB boundary. Driver uses limitation of VENTURA_SERIES to
manage INVADER_SERIES as well. The driver is allocating the DMA able
memory for RDPQs accordingly.
1) At driver load, set DMA mask to 64 and allocate memory for RDPQs
2) Check if allocated resources for RDPQ are in the same 4GB range
3) If #2 is true, continue with 64 bit DMA and go to #6
4) If #2 is false, then free all the resources from #1
5) Set DMA mask to 32 and allocate RDPQs
6) Proceed with driver loading and other allocations
Link: https://lore.kernel.org/r/1587626596-1044-5-git-send-email-suganath-prabu.subramani@broadcom.com
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Suganath Prabu <suganath-prabu.subramani@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2020-04-23 10:23:15 +03:00
bool use_32bit_dma ;
2012-11-30 06:14:21 +04:00
/* internal commands, callback index */
u8 scsi_io_cb_idx ;
u8 tm_cb_idx ;
u8 transport_cb_idx ;
u8 scsih_cb_idx ;
u8 ctl_cb_idx ;
u8 base_cb_idx ;
u8 port_enable_cb_idx ;
u8 config_cb_idx ;
u8 tm_tr_cb_idx ;
u8 tm_tr_volume_cb_idx ;
u8 tm_sas_control_cb_idx ;
struct _internal_cmd base_cmds ;
struct _internal_cmd port_enable_cmds ;
struct _internal_cmd transport_cmds ;
struct _internal_cmd scsih_cmds ;
struct _internal_cmd tm_cmds ;
struct _internal_cmd ctl_cmds ;
struct _internal_cmd config_cmds ;
MPT_ADD_SGE base_add_sg_single ;
/* function ptr for either IEEE or MPI sg elements */
MPT_BUILD_SG_SCMD build_sg_scmd ;
MPT_BUILD_SG build_sg ;
MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge ;
u16 sge_size_ieee ;
2015-11-11 15:00:22 +03:00
u16 hba_mpi_version_belonged ;
2012-11-30 06:14:21 +04:00
/* function ptr for MPI sg elements only */
MPT_BUILD_SG build_sg_mpi ;
MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi ;
2017-10-31 15:32:29 +03:00
/* function ptr for NVMe PRP elements only */
NVME_BUILD_PRP build_nvme_prp ;
2012-11-30 06:14:21 +04:00
/* event log */
u32 event_type [ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS ] ;
u32 event_context ;
void * event_log ;
u32 event_masks [ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS ] ;
2018-04-24 12:28:41 +03:00
u8 tm_custom_handling ;
u8 nvme_abort_timeout ;
2019-12-26 14:13:25 +03:00
u16 max_shutdown_latency ;
2018-04-24 12:28:41 +03:00
2012-11-30 06:14:21 +04:00
/* static config pages */
struct mpt3sas_facts facts ;
2019-08-03 16:59:48 +03:00
struct mpt3sas_facts prev_fw_facts ;
2012-11-30 06:14:21 +04:00
struct mpt3sas_port_facts * pfacts ;
Mpi2ManufacturingPage0_t manu_pg0 ;
struct Mpi2ManufacturingPage10_t manu_pg10 ;
struct Mpi2ManufacturingPage11_t manu_pg11 ;
Mpi2BiosPage2_t bios_pg2 ;
Mpi2BiosPage3_t bios_pg3 ;
Mpi2IOCPage8_t ioc_pg8 ;
Mpi2IOUnitPage0_t iounit_pg0 ;
Mpi2IOUnitPage1_t iounit_pg1 ;
2015-01-12 09:08:56 +03:00
Mpi2IOUnitPage8_t iounit_pg8 ;
2019-05-31 15:14:41 +03:00
Mpi2IOCPage1_t ioc_pg1_copy ;
2012-11-30 06:14:21 +04:00
struct _boot_device req_boot_device ;
struct _boot_device req_alt_boot_device ;
struct _boot_device current_boot_device ;
/* sas hba, expander, and device list */
struct _sas_node sas_hba ;
struct list_head sas_expander_list ;
2018-04-24 12:28:38 +03:00
struct list_head enclosure_list ;
2012-11-30 06:14:21 +04:00
spinlock_t sas_node_lock ;
struct list_head sas_device_list ;
struct list_head sas_device_init_list ;
spinlock_t sas_device_lock ;
2017-10-31 15:32:27 +03:00
struct list_head pcie_device_list ;
struct list_head pcie_device_init_list ;
spinlock_t pcie_device_lock ;
2012-11-30 06:14:21 +04:00
struct list_head raid_device_list ;
spinlock_t raid_device_lock ;
u8 io_missing_delay ;
u16 device_missing_delay ;
int sas_id ;
2017-10-31 15:32:27 +03:00
int pcie_target_id ;
2012-11-30 06:14:21 +04:00
void * blocking_handles ;
void * pd_handles ;
u16 pd_handles_sz ;
2016-10-26 11:04:34 +03:00
void * pend_os_device_add ;
u16 pend_os_device_add_sz ;
2012-11-30 06:14:21 +04:00
/* config page */
u16 config_page_sz ;
void * config_page ;
dma_addr_t config_page_dma ;
2018-02-07 13:51:48 +03:00
void * config_vaddr ;
2012-11-30 06:14:21 +04:00
/* scsiio request */
u16 hba_queue_depth ;
u16 sge_size ;
u16 scsiio_depth ;
u16 request_sz ;
u8 * request ;
dma_addr_t request_dma ;
u32 request_dma_sz ;
2018-01-04 15:57:11 +03:00
struct pcie_sg_list * pcie_sg_lookup ;
2012-11-30 06:14:21 +04:00
spinlock_t scsi_lookup_lock ;
int pending_io_count ;
wait_queue_head_t reset_wq ;
2021-02-02 12:58:32 +03:00
u16 * io_queue_num ;
2012-11-30 06:14:21 +04:00
2017-10-31 15:32:28 +03:00
/* PCIe SGL */
struct dma_pool * pcie_sgl_dma_pool ;
/* Host Page Size */
u32 page_size ;
2012-11-30 06:14:21 +04:00
/* chain */
2018-04-24 12:28:32 +03:00
struct chain_lookup * chain_lookup ;
2012-11-30 06:14:21 +04:00
struct list_head free_chain_list ;
struct dma_pool * chain_dma_pool ;
ulong chain_pages ;
u16 max_sges_in_main_message ;
u16 max_sges_in_chain_message ;
u16 chains_needed_per_io ;
u32 chain_depth ;
2016-01-28 09:37:04 +03:00
u16 chain_segment_sz ;
2018-01-04 15:57:11 +03:00
u16 chains_per_prp_buffer ;
2012-11-30 06:14:21 +04:00
/* hi-priority queue */
u16 hi_priority_smid ;
u8 * hi_priority ;
dma_addr_t hi_priority_dma ;
u16 hi_priority_depth ;
struct request_tracker * hpr_lookup ;
struct list_head hpr_free_list ;
/* internal queue */
u16 internal_smid ;
u8 * internal ;
dma_addr_t internal_dma ;
u16 internal_depth ;
struct request_tracker * internal_lookup ;
struct list_head internal_free_list ;
/* sense */
u8 * sense ;
dma_addr_t sense_dma ;
struct dma_pool * sense_dma_pool ;
/* reply */
u16 reply_sz ;
u8 * reply ;
dma_addr_t reply_dma ;
u32 reply_dma_max_address ;
u32 reply_dma_min_address ;
struct dma_pool * reply_dma_pool ;
/* reply free queue */
u16 reply_free_queue_depth ;
__le32 * reply_free ;
dma_addr_t reply_free_dma ;
struct dma_pool * reply_free_dma_pool ;
u32 reply_free_host_index ;
/* reply post queue */
u16 reply_post_queue_depth ;
2014-09-12 14:05:31 +04:00
struct reply_post_struct * reply_post ;
u8 rdpq_array_capable ;
u8 rdpq_array_enable ;
u8 rdpq_array_enable_assigned ;
2012-11-30 06:14:21 +04:00
struct dma_pool * reply_post_free_dma_pool ;
2018-04-24 12:28:31 +03:00
struct dma_pool * reply_post_free_array_dma_pool ;
Mpi2IOCInitRDPQArrayEntry * reply_post_free_array ;
dma_addr_t reply_post_free_array_dma ;
2012-11-30 06:14:21 +04:00
u8 reply_queue_count ;
struct list_head reply_queue_list ;
2016-10-26 11:04:38 +03:00
u8 combined_reply_queue ;
u8 combined_reply_index_count ;
2019-06-24 17:42:55 +03:00
u8 smp_affinity_enable ;
2015-06-30 09:54:47 +03:00
/* reply post register index */
resource_size_t * * replyPostRegisterIndex ;
2012-11-30 06:14:21 +04:00
struct list_head delayed_tr_list ;
struct list_head delayed_tr_volume_list ;
2016-01-28 09:37:02 +03:00
struct list_head delayed_sc_list ;
struct list_head delayed_event_ack_list ;
2015-01-12 09:08:56 +03:00
u8 temp_sensors_count ;
2015-11-11 15:00:33 +03:00
struct mutex pci_access_mutex ;
2012-11-30 06:14:21 +04:00
/* diag buffer support */
u8 * diag_buffer [ MPI2_DIAG_BUF_TYPE_COUNT ] ;
u32 diag_buffer_sz [ MPI2_DIAG_BUF_TYPE_COUNT ] ;
dma_addr_t diag_buffer_dma [ MPI2_DIAG_BUF_TYPE_COUNT ] ;
u8 diag_buffer_status [ MPI2_DIAG_BUF_TYPE_COUNT ] ;
u32 unique_id [ MPI2_DIAG_BUF_TYPE_COUNT ] ;
u32 product_specific [ MPI2_DIAG_BUF_TYPE_COUNT ] [ 23 ] ;
u32 diagnostic_flags [ MPI2_DIAG_BUF_TYPE_COUNT ] ;
u32 ring_buffer_offset ;
u32 ring_buffer_sz ;
2021-02-04 06:37:23 +03:00
struct htb_rel_query htb_rel ;
u8 reset_from_user ;
2015-11-11 15:00:28 +03:00
u8 is_warpdrive ;
2018-02-07 13:51:45 +03:00
u8 is_mcpu_endpoint ;
2015-11-11 15:00:28 +03:00
u8 hide_ir_msg ;
u8 mfg_pg10_hide_flag ;
u8 hide_drives ;
2012-11-30 06:14:21 +04:00
spinlock_t diag_trigger_lock ;
u8 diag_trigger_active ;
2019-05-31 15:14:35 +03:00
u8 atomic_desc_capable ;
2018-12-07 10:28:33 +03:00
BASE_READ_REG base_readl ;
2012-11-30 06:14:21 +04:00
struct SL_WH_MASTER_TRIGGER_T diag_trigger_master ;
struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event ;
struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi ;
struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi ;
2020-11-26 12:43:05 +03:00
u8 supports_trigger_pages ;
2016-10-26 11:04:34 +03:00
void * device_remove_in_progress ;
u16 device_remove_in_progress_sz ;
2016-10-26 11:04:37 +03:00
u8 is_gen35_ioc ;
2018-12-07 10:28:32 +03:00
u8 is_aero_ioc ;
2020-04-28 09:45:22 +03:00
struct dentry * debugfs_root ;
struct dentry * ioc_dump ;
2016-10-26 11:04:40 +03:00
PUT_SMID_IO_FP_HIP put_smid_scsi_io ;
2019-05-31 15:14:34 +03:00
PUT_SMID_IO_FP_HIP put_smid_fast_path ;
PUT_SMID_IO_FP_HIP put_smid_hi_priority ;
PUT_SMID_DEFAULT put_smid_default ;
2019-05-31 15:14:38 +03:00
GET_MSIX_INDEX get_msix_index_for_smlio ;
2020-10-27 16:08:34 +03:00
2020-10-27 16:08:46 +03:00
u8 multipath_on_hba ;
2020-10-27 16:08:34 +03:00
struct list_head port_table_list ;
2012-11-30 06:14:21 +04:00
} ;
2020-04-28 09:45:22 +03:00
struct mpt3sas_debugfs_buffer {
void * buf ;
u32 len ;
} ;
2019-08-03 16:59:53 +03:00
# define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
2021-02-04 06:37:23 +03:00
# define MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY 0x00000002
2019-08-03 16:59:53 +03:00
2012-11-30 06:14:21 +04:00
typedef u8 ( * MPT_CALLBACK ) ( struct MPT3SAS_ADAPTER * ioc , u16 smid , u8 msix_index ,
u32 reply ) ;
/* base shared API */
extern struct list_head mpt3sas_ioc_list ;
2015-11-11 15:00:22 +03:00
extern char driver_name [ MPT_NAME_LENGTH ] ;
2015-11-11 15:00:33 +03:00
/* spinlock on list operations over IOCs
* Case : when multiple warpdrive cards ( IOCs ) are in use
* Each IOC will added to the ioc list structure on initialization .
* Watchdog threads run at regular intervals to check IOC for any
* fault conditions which will trigger the dead_ioc thread to
* deallocate pci resource , resulting deleting the IOC netry from list ,
* this deletion need to protected by spinlock to enusre that
* ioc removal is syncrhonized , if not synchronized it might lead to
* list_del corruption as the ioc list is traversed in cli path .
*/
extern spinlock_t gioc_lock ;
2015-11-11 15:00:22 +03:00
2012-11-30 06:14:21 +04:00
void mpt3sas_base_start_watchdog ( struct MPT3SAS_ADAPTER * ioc ) ;
void mpt3sas_base_stop_watchdog ( struct MPT3SAS_ADAPTER * ioc ) ;
int mpt3sas_base_attach ( struct MPT3SAS_ADAPTER * ioc ) ;
void mpt3sas_base_detach ( struct MPT3SAS_ADAPTER * ioc ) ;
int mpt3sas_base_map_resources ( struct MPT3SAS_ADAPTER * ioc ) ;
void mpt3sas_base_free_resources ( struct MPT3SAS_ADAPTER * ioc ) ;
2018-04-24 12:28:38 +03:00
void mpt3sas_free_enclosure_list ( struct MPT3SAS_ADAPTER * ioc ) ;
2016-07-29 07:38:21 +03:00
int mpt3sas_base_hard_reset_handler ( struct MPT3SAS_ADAPTER * ioc ,
2012-11-30 06:14:21 +04:00
enum reset_type type ) ;
void * mpt3sas_base_get_msg_frame ( struct MPT3SAS_ADAPTER * ioc , u16 smid ) ;
void * mpt3sas_base_get_sense_buffer ( struct MPT3SAS_ADAPTER * ioc , u16 smid ) ;
__le32 mpt3sas_base_get_sense_buffer_dma ( struct MPT3SAS_ADAPTER * ioc ,
u16 smid ) ;
2017-10-31 15:32:28 +03:00
void * mpt3sas_base_get_pcie_sgl ( struct MPT3SAS_ADAPTER * ioc , u16 smid ) ;
2017-11-06 16:35:16 +03:00
dma_addr_t mpt3sas_base_get_pcie_sgl_dma ( struct MPT3SAS_ADAPTER * ioc , u16 smid ) ;
2020-07-30 11:03:48 +03:00
void mpt3sas_base_sync_reply_irqs ( struct MPT3SAS_ADAPTER * ioc , u8 poll ) ;
2020-07-30 11:03:46 +03:00
void mpt3sas_base_mask_interrupts ( struct MPT3SAS_ADAPTER * ioc ) ;
void mpt3sas_base_unmask_interrupts ( struct MPT3SAS_ADAPTER * ioc ) ;
2012-11-30 06:14:21 +04:00
2018-02-14 13:16:37 +03:00
void mpt3sas_base_put_smid_fast_path ( struct MPT3SAS_ADAPTER * ioc , u16 smid ,
u16 handle ) ;
void mpt3sas_base_put_smid_hi_priority ( struct MPT3SAS_ADAPTER * ioc , u16 smid ,
u16 msix_task ) ;
void mpt3sas_base_put_smid_nvme_encap ( struct MPT3SAS_ADAPTER * ioc , u16 smid ) ;
void mpt3sas_base_put_smid_default ( struct MPT3SAS_ADAPTER * ioc , u16 smid ) ;
2012-11-30 06:14:21 +04:00
/* hi-priority queue */
u16 mpt3sas_base_get_smid_hpr ( struct MPT3SAS_ADAPTER * ioc , u8 cb_idx ) ;
u16 mpt3sas_base_get_smid_scsiio ( struct MPT3SAS_ADAPTER * ioc , u8 cb_idx ,
2018-01-04 15:57:11 +03:00
struct scsi_cmnd * scmd ) ;
void mpt3sas_base_clear_st ( struct MPT3SAS_ADAPTER * ioc ,
struct scsiio_tracker * st ) ;
2012-11-30 06:14:21 +04:00
u16 mpt3sas_base_get_smid ( struct MPT3SAS_ADAPTER * ioc , u8 cb_idx ) ;
void mpt3sas_base_free_smid ( struct MPT3SAS_ADAPTER * ioc , u16 smid ) ;
void mpt3sas_base_initialize_callback_handler ( void ) ;
u8 mpt3sas_base_register_callback_handler ( MPT_CALLBACK cb_func ) ;
void mpt3sas_base_release_callback_handler ( u8 cb_idx ) ;
u8 mpt3sas_base_done ( struct MPT3SAS_ADAPTER * ioc , u16 smid , u8 msix_index ,
u32 reply ) ;
u8 mpt3sas_port_enable_done ( struct MPT3SAS_ADAPTER * ioc , u16 smid ,
u8 msix_index , u32 reply ) ;
void * mpt3sas_base_get_reply_virt_addr ( struct MPT3SAS_ADAPTER * ioc ,
u32 phys_addr ) ;
u32 mpt3sas_base_get_iocstate ( struct MPT3SAS_ADAPTER * ioc , int cooked ) ;
void mpt3sas_base_fault_info ( struct MPT3SAS_ADAPTER * ioc , u16 fault_code ) ;
2019-12-26 14:13:29 +03:00
# define mpt3sas_print_fault_code(ioc, fault_code) \
do { pr_err ( " %s fault info from func: %s \n " , ioc - > name , __func__ ) ; \
mpt3sas_base_fault_info ( ioc , fault_code ) ; } while ( 0 )
2019-12-26 14:13:27 +03:00
void mpt3sas_base_coredump_info ( struct MPT3SAS_ADAPTER * ioc , u16 fault_code ) ;
2019-12-26 14:13:29 +03:00
# define mpt3sas_print_coredump_info(ioc, fault_code) \
do { pr_err ( " %s fault info from func: %s \n " , ioc - > name , __func__ ) ; \
mpt3sas_base_coredump_info ( ioc , fault_code ) ; } while ( 0 )
2019-12-26 14:13:27 +03:00
int mpt3sas_base_wait_for_coredump_completion ( struct MPT3SAS_ADAPTER * ioc ,
const char * caller ) ;
2012-11-30 06:14:21 +04:00
int mpt3sas_base_sas_iounit_control ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2SasIoUnitControlReply_t * mpi_reply ,
Mpi2SasIoUnitControlRequest_t * mpi_request ) ;
int mpt3sas_base_scsi_enclosure_processor ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2SepReply_t * mpi_reply , Mpi2SepRequest_t * mpi_request ) ;
void mpt3sas_base_validate_event_type ( struct MPT3SAS_ADAPTER * ioc ,
u32 * event_type ) ;
void mpt3sas_halt_firmware ( struct MPT3SAS_ADAPTER * ioc ) ;
void mpt3sas_base_update_missing_delay ( struct MPT3SAS_ADAPTER * ioc ,
u16 device_missing_delay , u8 io_missing_delay ) ;
int mpt3sas_port_enable ( struct MPT3SAS_ADAPTER * ioc ) ;
2018-02-17 01:39:58 +03:00
void
mpt3sas_wait_for_commands_to_complete ( struct MPT3SAS_ADAPTER * ioc ) ;
2018-05-31 13:34:50 +03:00
u8 mpt3sas_base_check_cmd_timeout ( struct MPT3SAS_ADAPTER * ioc ,
u8 status , void * mpi_request , int sz ) ;
2019-12-26 14:13:31 +03:00
# define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \
do { ioc_err ( ioc , " In func: %s \n " , __func__ ) ; \
issue_reset = mpt3sas_base_check_cmd_timeout ( ioc , \
status , mpi_request , sz ) ; } while ( 0 )
2018-10-31 16:23:33 +03:00
int mpt3sas_wait_for_ioc ( struct MPT3SAS_ADAPTER * ioc , int wait_count ) ;
2012-11-30 06:14:21 +04:00
/* scsih shared API */
2018-01-04 15:57:11 +03:00
struct scsi_cmnd * mpt3sas_scsih_scsi_lookup_get ( struct MPT3SAS_ADAPTER * ioc ,
u16 smid ) ;
2012-11-30 06:14:21 +04:00
u8 mpt3sas_scsih_event_callback ( struct MPT3SAS_ADAPTER * ioc , u8 msix_index ,
u32 reply ) ;
2018-06-16 00:42:00 +03:00
void mpt3sas_scsih_pre_reset_handler ( struct MPT3SAS_ADAPTER * ioc ) ;
2019-12-26 14:13:26 +03:00
void mpt3sas_scsih_clear_outstanding_scsi_tm_commands (
struct MPT3SAS_ADAPTER * ioc ) ;
2018-06-16 00:42:00 +03:00
void mpt3sas_scsih_reset_done_handler ( struct MPT3SAS_ADAPTER * ioc ) ;
2012-11-30 06:14:21 +04:00
2020-07-30 11:03:47 +03:00
int mpt3sas_scsih_issue_tm ( struct MPT3SAS_ADAPTER * ioc , u16 handle ,
uint channel , uint id , u64 lun , u8 type , u16 smid_task ,
u16 msix_task , u8 timeout , u8 tr_method ) ;
2016-07-29 07:38:20 +03:00
int mpt3sas_scsih_issue_locked_tm ( struct MPT3SAS_ADAPTER * ioc , u16 handle ,
2020-07-30 11:03:47 +03:00
uint channel , uint id , u64 lun , u8 type , u16 smid_task ,
u16 msix_task , u8 timeout , u8 tr_method ) ;
2016-07-29 07:38:20 +03:00
2012-11-30 06:14:21 +04:00
void mpt3sas_scsih_set_tm_flag ( struct MPT3SAS_ADAPTER * ioc , u16 handle ) ;
void mpt3sas_scsih_clear_tm_flag ( struct MPT3SAS_ADAPTER * ioc , u16 handle ) ;
2020-10-27 16:08:38 +03:00
void mpt3sas_expander_remove ( struct MPT3SAS_ADAPTER * ioc , u64 sas_address ,
struct hba_port * port ) ;
2012-11-30 06:14:21 +04:00
void mpt3sas_device_remove_by_sas_address ( struct MPT3SAS_ADAPTER * ioc ,
2020-10-27 16:08:38 +03:00
u64 sas_address , struct hba_port * port ) ;
2016-01-28 09:37:02 +03:00
u8 mpt3sas_check_for_pending_internal_cmds ( struct MPT3SAS_ADAPTER * ioc ,
u16 smid ) ;
2020-10-27 16:08:38 +03:00
struct hba_port *
2020-10-27 16:08:44 +03:00
mpt3sas_get_port_by_id ( struct MPT3SAS_ADAPTER * ioc , u8 port ,
u8 bypass_dirty_port_flag ) ;
2012-11-30 06:14:21 +04:00
struct _sas_node * mpt3sas_scsih_expander_find_by_handle (
struct MPT3SAS_ADAPTER * ioc , u16 handle ) ;
struct _sas_node * mpt3sas_scsih_expander_find_by_sas_address (
2020-10-27 16:08:38 +03:00
struct MPT3SAS_ADAPTER * ioc , u64 sas_address ,
struct hba_port * port ) ;
2015-11-11 15:00:30 +03:00
struct _sas_device * mpt3sas_get_sdev_by_addr (
2020-10-27 16:08:38 +03:00
struct MPT3SAS_ADAPTER * ioc , u64 sas_address ,
struct hba_port * port ) ;
2015-11-11 15:00:30 +03:00
struct _sas_device * __mpt3sas_get_sdev_by_addr (
2020-10-27 16:08:38 +03:00
struct MPT3SAS_ADAPTER * ioc , u64 sas_address ,
struct hba_port * port ) ;
2017-10-31 15:32:30 +03:00
struct _sas_device * mpt3sas_get_sdev_by_handle ( struct MPT3SAS_ADAPTER * ioc ,
u16 handle ) ;
struct _pcie_device * mpt3sas_get_pdev_by_handle ( struct MPT3SAS_ADAPTER * ioc ,
u16 handle ) ;
2012-11-30 06:14:21 +04:00
void mpt3sas_port_enable_complete ( struct MPT3SAS_ADAPTER * ioc ) ;
2015-11-11 15:00:35 +03:00
struct _raid_device *
mpt3sas_raid_device_find_by_handle ( struct MPT3SAS_ADAPTER * ioc , u16 handle ) ;
2019-08-22 09:19:01 +03:00
void mpt3sas_scsih_change_queue_depth ( struct scsi_device * sdev , int qdepth ) ;
2020-10-27 16:08:40 +03:00
struct _sas_device *
__mpt3sas_get_sdev_by_rphy ( struct MPT3SAS_ADAPTER * ioc , struct sas_rphy * rphy ) ;
2020-10-27 16:08:43 +03:00
struct virtual_phy *
mpt3sas_get_vphy_by_phy ( struct MPT3SAS_ADAPTER * ioc ,
struct hba_port * port , u32 phy ) ;
2015-11-11 15:00:18 +03:00
2012-11-30 06:14:21 +04:00
/* config shared API */
u8 mpt3sas_config_done ( struct MPT3SAS_ADAPTER * ioc , u16 smid , u8 msix_index ,
u32 reply ) ;
int mpt3sas_config_get_number_hba_phys ( struct MPT3SAS_ADAPTER * ioc ,
u8 * num_phys ) ;
int mpt3sas_config_get_manufacturing_pg0 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2ManufacturingPage0_t * config_page ) ;
int mpt3sas_config_get_manufacturing_pg7 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2ManufacturingPage7_t * config_page ,
u16 sz ) ;
int mpt3sas_config_get_manufacturing_pg10 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply ,
struct Mpi2ManufacturingPage10_t * config_page ) ;
int mpt3sas_config_get_manufacturing_pg11 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply ,
struct Mpi2ManufacturingPage11_t * config_page ) ;
int mpt3sas_config_set_manufacturing_pg11 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply ,
struct Mpi2ManufacturingPage11_t * config_page ) ;
int mpt3sas_config_get_bios_pg2 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2BiosPage2_t * config_page ) ;
int mpt3sas_config_get_bios_pg3 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2BiosPage3_t * config_page ) ;
int mpt3sas_config_get_iounit_pg0 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2IOUnitPage0_t * config_page ) ;
int mpt3sas_config_get_sas_device_pg0 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2SasDevicePage0_t * config_page ,
u32 form , u32 handle ) ;
int mpt3sas_config_get_sas_device_pg1 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2SasDevicePage1_t * config_page ,
u32 form , u32 handle ) ;
2017-10-31 15:32:30 +03:00
int mpt3sas_config_get_pcie_device_pg0 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi26PCIeDevicePage0_t * config_page ,
u32 form , u32 handle ) ;
int mpt3sas_config_get_pcie_device_pg2 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi26PCIeDevicePage2_t * config_page ,
u32 form , u32 handle ) ;
2012-11-30 06:14:21 +04:00
int mpt3sas_config_get_sas_iounit_pg0 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2SasIOUnitPage0_t * config_page ,
u16 sz ) ;
int mpt3sas_config_get_iounit_pg1 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2IOUnitPage1_t * config_page ) ;
2015-11-11 15:00:29 +03:00
int mpt3sas_config_get_iounit_pg3 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2IOUnitPage3_t * config_page , u16 sz ) ;
2012-11-30 06:14:21 +04:00
int mpt3sas_config_set_iounit_pg1 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2IOUnitPage1_t * config_page ) ;
2015-01-12 09:08:56 +03:00
int mpt3sas_config_get_iounit_pg8 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2IOUnitPage8_t * config_page ) ;
2012-11-30 06:14:21 +04:00
int mpt3sas_config_get_sas_iounit_pg1 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2SasIOUnitPage1_t * config_page ,
u16 sz ) ;
int mpt3sas_config_set_sas_iounit_pg1 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2SasIOUnitPage1_t * config_page ,
u16 sz ) ;
2019-05-31 15:14:41 +03:00
int mpt3sas_config_get_ioc_pg1 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2IOCPage1_t * config_page ) ;
int mpt3sas_config_set_ioc_pg1 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2IOCPage1_t * config_page ) ;
2012-11-30 06:14:21 +04:00
int mpt3sas_config_get_ioc_pg8 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2IOCPage8_t * config_page ) ;
int mpt3sas_config_get_expander_pg0 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2ExpanderPage0_t * config_page ,
u32 form , u32 handle ) ;
int mpt3sas_config_get_expander_pg1 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2ExpanderPage1_t * config_page ,
u32 phy_number , u16 handle ) ;
int mpt3sas_config_get_enclosure_pg0 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2SasEnclosurePage0_t * config_page ,
u32 form , u32 handle ) ;
int mpt3sas_config_get_phy_pg0 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2SasPhyPage0_t * config_page , u32 phy_number ) ;
int mpt3sas_config_get_phy_pg1 ( struct MPT3SAS_ADAPTER * ioc , Mpi2ConfigReply_t
* mpi_reply , Mpi2SasPhyPage1_t * config_page , u32 phy_number ) ;
int mpt3sas_config_get_raid_volume_pg1 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2RaidVolPage1_t * config_page , u32 form ,
u32 handle ) ;
int mpt3sas_config_get_number_pds ( struct MPT3SAS_ADAPTER * ioc , u16 handle ,
u8 * num_pds ) ;
int mpt3sas_config_get_raid_volume_pg0 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2RaidVolPage0_t * config_page , u32 form ,
u32 handle , u16 sz ) ;
int mpt3sas_config_get_phys_disk_pg0 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi2RaidPhysDiskPage0_t * config_page ,
u32 form , u32 form_specific ) ;
int mpt3sas_config_get_volume_handle ( struct MPT3SAS_ADAPTER * ioc , u16 pd_handle ,
u16 * volume_handle ) ;
int mpt3sas_config_get_volume_wwid ( struct MPT3SAS_ADAPTER * ioc ,
u16 volume_handle , u64 * wwid ) ;
2020-11-26 12:43:05 +03:00
int
mpt3sas_config_get_driver_trigger_pg0 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi26DriverTriggerPage0_t * config_page ) ;
2020-11-26 12:43:06 +03:00
int
mpt3sas_config_get_driver_trigger_pg1 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi26DriverTriggerPage1_t * config_page ) ;
int
2020-11-26 12:43:07 +03:00
mpt3sas_config_get_driver_trigger_pg2 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi26DriverTriggerPage2_t * config_page ) ;
int
2020-11-26 12:43:08 +03:00
mpt3sas_config_get_driver_trigger_pg3 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi26DriverTriggerPage3_t * config_page ) ;
int
2020-11-26 12:43:09 +03:00
mpt3sas_config_get_driver_trigger_pg4 ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2ConfigReply_t * mpi_reply , Mpi26DriverTriggerPage4_t * config_page ) ;
int
2020-11-26 12:43:06 +03:00
mpt3sas_config_update_driver_trigger_pg1 ( struct MPT3SAS_ADAPTER * ioc ,
struct SL_WH_MASTER_TRIGGER_T * master_tg , bool set ) ;
2020-11-26 12:43:07 +03:00
int
mpt3sas_config_update_driver_trigger_pg2 ( struct MPT3SAS_ADAPTER * ioc ,
struct SL_WH_EVENT_TRIGGERS_T * event_tg , bool set ) ;
2020-11-26 12:43:08 +03:00
int
mpt3sas_config_update_driver_trigger_pg3 ( struct MPT3SAS_ADAPTER * ioc ,
struct SL_WH_SCSI_TRIGGERS_T * scsi_tg , bool set ) ;
2020-11-26 12:43:09 +03:00
int
mpt3sas_config_update_driver_trigger_pg4 ( struct MPT3SAS_ADAPTER * ioc ,
struct SL_WH_MPI_TRIGGERS_T * mpi_tg , bool set ) ;
2012-11-30 06:14:21 +04:00
/* ctl shared API */
extern struct device_attribute * mpt3sas_host_attrs [ ] ;
extern struct device_attribute * mpt3sas_dev_attrs [ ] ;
2015-11-11 15:00:35 +03:00
void mpt3sas_ctl_init ( ushort hbas_to_enumerate ) ;
void mpt3sas_ctl_exit ( ushort hbas_to_enumerate ) ;
2012-11-30 06:14:21 +04:00
u8 mpt3sas_ctl_done ( struct MPT3SAS_ADAPTER * ioc , u16 smid , u8 msix_index ,
u32 reply ) ;
2018-06-16 00:42:00 +03:00
void mpt3sas_ctl_pre_reset_handler ( struct MPT3SAS_ADAPTER * ioc ) ;
2019-12-26 14:13:26 +03:00
void mpt3sas_ctl_clear_outstanding_ioctls ( struct MPT3SAS_ADAPTER * ioc ) ;
2018-06-16 00:42:00 +03:00
void mpt3sas_ctl_reset_done_handler ( struct MPT3SAS_ADAPTER * ioc ) ;
2012-11-30 06:14:21 +04:00
u8 mpt3sas_ctl_event_callback ( struct MPT3SAS_ADAPTER * ioc ,
u8 msix_index , u32 reply ) ;
void mpt3sas_ctl_add_to_event_log ( struct MPT3SAS_ADAPTER * ioc ,
Mpi2EventNotificationReply_t * mpi_reply ) ;
void mpt3sas_enable_diag_buffer ( struct MPT3SAS_ADAPTER * ioc ,
2017-05-09 01:57:50 +03:00
u8 bits_to_register ) ;
2012-11-30 06:14:21 +04:00
int mpt3sas_send_diag_release ( struct MPT3SAS_ADAPTER * ioc , u8 buffer_type ,
u8 * issue_reset ) ;
/* transport shared API */
2015-11-11 15:00:19 +03:00
extern struct scsi_transport_template * mpt3sas_transport_template ;
2012-11-30 06:14:21 +04:00
u8 mpt3sas_transport_done ( struct MPT3SAS_ADAPTER * ioc , u16 smid , u8 msix_index ,
u32 reply ) ;
struct _sas_port * mpt3sas_transport_port_add ( struct MPT3SAS_ADAPTER * ioc ,
2020-10-27 16:08:37 +03:00
u16 handle , u64 sas_address , struct hba_port * port ) ;
2012-11-30 06:14:21 +04:00
void mpt3sas_transport_port_remove ( struct MPT3SAS_ADAPTER * ioc , u64 sas_address ,
2020-10-27 16:08:37 +03:00
u64 sas_address_parent , struct hba_port * port ) ;
2012-11-30 06:14:21 +04:00
int mpt3sas_transport_add_host_phy ( struct MPT3SAS_ADAPTER * ioc , struct _sas_phy
* mpt3sas_phy , Mpi2SasPhyPage0_t phy_pg0 , struct device * parent_dev ) ;
int mpt3sas_transport_add_expander_phy ( struct MPT3SAS_ADAPTER * ioc ,
struct _sas_phy * mpt3sas_phy , Mpi2ExpanderPage1_t expander_pg1 ,
struct device * parent_dev ) ;
void mpt3sas_transport_update_links ( struct MPT3SAS_ADAPTER * ioc ,
2020-10-27 16:08:37 +03:00
u64 sas_address , u16 handle , u8 phy_number , u8 link_rate ,
struct hba_port * port ) ;
2012-11-30 06:14:21 +04:00
extern struct sas_function_template mpt3sas_transport_functions ;
extern struct scsi_transport_template * mpt3sas_transport_template ;
2020-10-27 16:08:39 +03:00
void
mpt3sas_transport_del_phy_from_an_existing_port ( struct MPT3SAS_ADAPTER * ioc ,
struct _sas_node * sas_node , struct _sas_phy * mpt3sas_phy ) ;
void
mpt3sas_transport_add_phy_to_an_existing_port ( struct MPT3SAS_ADAPTER * ioc ,
struct _sas_node * sas_node , struct _sas_phy * mpt3sas_phy ,
u64 sas_address , struct hba_port * port ) ;
2012-11-30 06:14:21 +04:00
/* trigger data externs */
void mpt3sas_send_trigger_data_event ( struct MPT3SAS_ADAPTER * ioc ,
struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data ) ;
void mpt3sas_process_trigger_data ( struct MPT3SAS_ADAPTER * ioc ,
struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data ) ;
void mpt3sas_trigger_master ( struct MPT3SAS_ADAPTER * ioc ,
2020-06-09 19:13:13 +03:00
u32 trigger_bitmask ) ;
2012-11-30 06:14:21 +04:00
void mpt3sas_trigger_event ( struct MPT3SAS_ADAPTER * ioc , u16 event ,
u16 log_entry_qualifier ) ;
void mpt3sas_trigger_scsi ( struct MPT3SAS_ADAPTER * ioc , u8 sense_key ,
u8 asc , u8 ascq ) ;
void mpt3sas_trigger_mpi ( struct MPT3SAS_ADAPTER * ioc , u16 ioc_status ,
u32 loginfo ) ;
2015-11-11 15:00:35 +03:00
/* warpdrive APIs */
u8 mpt3sas_get_num_volumes ( struct MPT3SAS_ADAPTER * ioc ) ;
void mpt3sas_init_warpdrive_properties ( struct MPT3SAS_ADAPTER * ioc ,
struct _raid_device * raid_device ) ;
void
mpt3sas_setup_direct_io ( struct MPT3SAS_ADAPTER * ioc , struct scsi_cmnd * scmd ,
2018-01-04 15:57:11 +03:00
struct _raid_device * raid_device , Mpi25SCSIIORequest_t * mpi_request ) ;
2015-11-11 15:00:35 +03:00
2016-12-13 03:31:40 +03:00
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp ( struct scsi_device * sdev ) ;
2020-04-28 09:45:22 +03:00
void mpt3sas_setup_debugfs ( struct MPT3SAS_ADAPTER * ioc ) ;
void mpt3sas_destroy_debugfs ( struct MPT3SAS_ADAPTER * ioc ) ;
void mpt3sas_init_debugfs ( void ) ;
void mpt3sas_exit_debugfs ( void ) ;
2019-08-03 16:59:50 +03:00
/**
* _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device
* @ device_info : bitfield providing information about the device .
* Context : none
*
* Returns 1 if scsi device .
*/
static inline int
mpt3sas_scsih_is_pcie_scsi_device ( u32 device_info )
{
if ( ( device_info &
MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE ) = = MPI26_PCIE_DEVINFO_SCSI )
return 1 ;
else
return 0 ;
}
2012-11-30 06:14:21 +04:00
# endif /* MPT3SAS_BASE_H_INCLUDED */