2009-05-22 22:51:39 +04:00
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters . *
2016-07-06 22:36:13 +03:00
* Copyright ( C ) 2009 - 2016 Emulex . All rights reserved . *
2009-05-22 22:51:39 +04:00
* EMULEX and SLI are trademarks of Emulex . *
* www . emulex . com *
* *
* This program is free software ; you can redistribute it and / or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation . *
* This program is distributed in the hope that it will be useful . *
* ALL EXPRESS OR IMPLIED CONDITIONS , REPRESENTATIONS AND *
* WARRANTIES , INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY , *
* FITNESS FOR A PARTICULAR PURPOSE , OR NON - INFRINGEMENT , ARE *
* DISCLAIMED , EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID . See the GNU General Public License for *
* more details , a copy of which can be found in the file COPYING *
* included with this package . *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# define LPFC_ACTIVE_MBOX_WAIT_CNT 100
2010-10-22 19:06:38 +04:00
# define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
# define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
# define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
2009-05-22 22:51:39 +04:00
# define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
# define LPFC_RPI_LOW_WATER_MARK 10
2010-02-12 22:41:27 +03:00
2010-10-22 19:06:08 +04:00
# define LPFC_UNREG_FCF 1
# define LPFC_SKIP_UNREG_FCF 0
2010-02-12 22:41:27 +03:00
/* Amount of time in seconds for waiting FCF rediscovery to complete */
# define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
2009-05-22 22:51:39 +04:00
/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
# define LPFC_NEMBED_MBOX_SGL_CNT 254
2012-08-03 20:36:13 +04:00
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
2017-02-13 00:52:30 +03:00
# define LPFC_HBA_IO_CHAN_MIN 0
# define LPFC_HBA_IO_CHAN_MAX 32
# define LPFC_FCP_IO_CHAN_DEF 4
# define LPFC_NVME_IO_CHAN_DEF 0
2009-05-22 22:51:39 +04:00
2014-02-20 18:56:45 +04:00
/* Number of channels used for Flash Optimized Fabric (FOF) operations */
# define LPFC_FOF_IO_CHAN_NUM 1
2009-05-22 22:51:39 +04:00
/*
* Provide the default FCF Record attributes used by the driver
* when nonFIP mode is configured and there is no other default
* FCF Record attributes .
*/
# define LPFC_FCOE_FCF_DEF_INDEX 0
# define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
# define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
2010-06-09 02:31:37 +04:00
# define LPFC_FCOE_NULL_VID 0xFFF
# define LPFC_FCOE_IGNORE_VID 0xFFFF
2009-05-22 22:51:39 +04:00
/* First 3 bytes of default FCF MAC is specified by FC_MAP */
# define LPFC_FCOE_FCF_MAC3 0xFF
# define LPFC_FCOE_FCF_MAC4 0xFF
# define LPFC_FCOE_FCF_MAC5 0xFE
# define LPFC_FCOE_FCF_MAP0 0x0E
# define LPFC_FCOE_FCF_MAP1 0xFC
# define LPFC_FCOE_FCF_MAP2 0x00
2010-06-07 23:24:29 +04:00
# define LPFC_FCOE_MAX_RCV_SIZE 0x800
2009-05-22 22:51:39 +04:00
# define LPFC_FCOE_FKA_ADV_PER 0
# define LPFC_FCOE_FIP_PRIORITY 0x80
2009-10-02 23:16:45 +04:00
# define sli4_sid_from_fc_hdr(fc_hdr) \
( ( fc_hdr ) - > fh_s_id [ 0 ] < < 16 | \
( fc_hdr ) - > fh_s_id [ 1 ] < < 8 | \
( fc_hdr ) - > fh_s_id [ 2 ] )
2012-05-10 05:19:03 +04:00
# define sli4_did_from_fc_hdr(fc_hdr) \
( ( fc_hdr ) - > fh_d_id [ 0 ] < < 16 | \
( fc_hdr ) - > fh_d_id [ 1 ] < < 8 | \
( fc_hdr ) - > fh_d_id [ 2 ] )
2009-11-18 23:39:44 +03:00
# define sli4_fctl_from_fc_hdr(fc_hdr) \
( ( fc_hdr ) - > fh_f_ctl [ 0 ] < < 16 | \
( fc_hdr ) - > fh_f_ctl [ 1 ] < < 8 | \
( fc_hdr ) - > fh_f_ctl [ 2 ] )
2012-05-10 05:19:03 +04:00
# define sli4_type_from_fc_hdr(fc_hdr) \
( ( fc_hdr ) - > fh_type )
2011-07-23 02:36:33 +04:00
# define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
2012-10-31 22:44:33 +04:00
# define INT_FW_UPGRADE 0
# define RUN_FW_UPGRADE 1
2009-05-22 22:51:39 +04:00
enum lpfc_sli4_queue_type {
LPFC_EQ ,
LPFC_GCQ ,
LPFC_MCQ ,
LPFC_WCQ ,
LPFC_RCQ ,
LPFC_MQ ,
LPFC_WQ ,
LPFC_HRQ ,
LPFC_DRQ
} ;
/* The queue sub-type defines the functional purpose of the queue */
enum lpfc_sli4_queue_subtype {
LPFC_NONE ,
LPFC_MBOX ,
LPFC_FCP ,
LPFC_ELS ,
2017-02-13 00:52:30 +03:00
LPFC_NVME ,
2017-02-13 00:52:34 +03:00
LPFC_NVMET ,
2017-02-13 00:52:30 +03:00
LPFC_NVME_LS ,
2009-05-22 22:51:39 +04:00
LPFC_USOL
} ;
union sli4_qe {
void * address ;
struct lpfc_eqe * eqe ;
struct lpfc_cqe * cqe ;
struct lpfc_mcqe * mcqe ;
struct lpfc_wcqe_complete * wcqe_complete ;
struct lpfc_wcqe_release * wcqe_release ;
struct sli4_wcqe_xri_aborted * wcqe_xri_aborted ;
struct lpfc_rcqe_complete * rcqe_complete ;
struct lpfc_mqe * mqe ;
union lpfc_wqe * wqe ;
2013-07-16 02:33:23 +04:00
union lpfc_wqe128 * wqe128 ;
2009-05-22 22:51:39 +04:00
struct lpfc_rqe * rqe ;
} ;
2017-02-13 00:52:30 +03:00
/* RQ buffer list */
struct lpfc_rqb {
uint16_t entry_count ; /* Current number of RQ slots */
uint16_t buffer_count ; /* Current number of buffers posted */
struct list_head rqb_buffer_list ; /* buffers assigned to this HBQ */
/* Callback for HBQ buffer allocation */
struct rqb_dmabuf * ( * rqb_alloc_buffer ) ( struct lpfc_hba * ) ;
/* Callback for HBQ buffer free */
void ( * rqb_free_buffer ) ( struct lpfc_hba * ,
struct rqb_dmabuf * ) ;
} ;
2009-05-22 22:51:39 +04:00
struct lpfc_queue {
struct list_head list ;
2017-02-13 00:52:30 +03:00
struct list_head wq_list ;
2009-05-22 22:51:39 +04:00
enum lpfc_sli4_queue_type type ;
enum lpfc_sli4_queue_subtype subtype ;
struct lpfc_hba * phba ;
struct list_head child_list ;
2017-02-13 00:52:30 +03:00
struct list_head page_list ;
struct list_head sgl_list ;
2009-05-22 22:51:39 +04:00
uint32_t entry_count ; /* Number of entries to support on the queue */
uint32_t entry_size ; /* Size of each queue entry. */
2011-10-11 05:32:10 +04:00
uint32_t entry_repost ; /* Count of entries before doorbell is rung */
# define LPFC_QUEUE_MIN_REPOST 8
2009-05-22 22:51:39 +04:00
uint32_t queue_id ; /* Queue ID assigned by the hardware */
2011-02-16 20:40:06 +03:00
uint32_t assoc_qid ; /* Queue ID associated with, for CQ/WQ/MQ */
2009-05-22 22:51:39 +04:00
uint32_t page_count ; /* Number of pages allocated for this queue */
uint32_t host_index ; /* The host's index for putting or getting */
uint32_t hba_index ; /* The last known hba index for get or put */
2012-08-03 20:35:13 +04:00
2012-08-03 20:35:54 +04:00
struct lpfc_sli_ring * pring ; /* ptr to io ring associated with q */
2017-02-13 00:52:30 +03:00
struct lpfc_rqb * rqbp ; /* ptr to RQ buffers */
2012-08-03 20:35:54 +04:00
2017-02-13 00:52:30 +03:00
uint16_t sgl_list_cnt ;
2013-01-04 00:44:00 +04:00
uint16_t db_format ;
# define LPFC_DB_RING_FORMAT 0x01
# define LPFC_DB_LIST_FORMAT 0x02
void __iomem * db_regaddr ;
2012-08-03 20:35:13 +04:00
/* For q stats */
uint32_t q_cnt_1 ;
uint32_t q_cnt_2 ;
uint32_t q_cnt_3 ;
uint64_t q_cnt_4 ;
/* defines for EQ stats */
# define EQ_max_eqe q_cnt_1
# define EQ_no_entry q_cnt_2
# define EQ_badstate q_cnt_3
# define EQ_processed q_cnt_4
/* defines for CQ stats */
# define CQ_mbox q_cnt_1
# define CQ_max_cqe q_cnt_1
# define CQ_release_wqe q_cnt_2
# define CQ_xri_aborted q_cnt_3
# define CQ_wq q_cnt_4
/* defines for WQ stats */
# define WQ_overflow q_cnt_1
# define WQ_posted q_cnt_4
/* defines for RQ stats */
# define RQ_no_posted_buf q_cnt_1
# define RQ_no_buf_found q_cnt_2
# define RQ_buf_trunc q_cnt_3
# define RQ_rcv_buf q_cnt_4
2017-02-13 00:52:30 +03:00
uint64_t isr_timestamp ;
struct lpfc_queue * assoc_qp ;
2009-05-22 22:51:39 +04:00
union sli4_qe qe [ 1 ] ; /* array to index entries (must be last) */
} ;
struct lpfc_sli4_link {
2012-09-29 19:32:37 +04:00
uint16_t speed ;
2009-05-22 22:51:39 +04:00
uint8_t duplex ;
uint8_t status ;
2010-12-16 01:57:33 +03:00
uint8_t type ;
uint8_t number ;
2009-05-22 22:51:39 +04:00
uint8_t fault ;
2010-01-27 07:08:29 +03:00
uint16_t logical_speed ;
2010-12-16 01:57:33 +03:00
uint16_t topology ;
2009-05-22 22:51:39 +04:00
} ;
2010-02-12 22:41:27 +03:00
struct lpfc_fcf_rec {
uint8_t fabric_name [ 8 ] ;
uint8_t switch_name [ 8 ] ;
2009-05-22 22:51:39 +04:00
uint8_t mac_addr [ 6 ] ;
uint16_t fcf_indx ;
2010-02-12 22:41:27 +03:00
uint32_t priority ;
uint16_t vlan_id ;
uint32_t addr_mode ;
uint32_t flag ;
# define BOOT_ENABLE 0x01
# define RECORD_VALID 0x02
} ;
2011-07-23 02:37:52 +04:00
struct lpfc_fcf_pri_rec {
uint16_t fcf_index ;
# define LPFC_FCF_ON_PRI_LIST 0x0001
# define LPFC_FCF_FLOGI_FAILED 0x0002
uint16_t flag ;
uint32_t priority ;
} ;
struct lpfc_fcf_pri {
struct list_head list ;
struct lpfc_fcf_pri_rec fcf_rec ;
} ;
/*
* Maximum FCF table index , it is for driver internal book keeping , it
* just needs to be no less than the supported HBA ' s FCF table size .
*/
# define LPFC_SLI4_FCF_TBL_INDX_MAX 32
2010-02-12 22:41:27 +03:00
struct lpfc_fcf {
2009-05-22 22:51:39 +04:00
uint16_t fcfi ;
uint32_t fcf_flag ;
# define FCF_AVAILABLE 0x01 /* FCF available for discovery */
# define FCF_REGISTERED 0x02 /* FCF registered with FW */
2010-02-12 22:41:27 +03:00
# define FCF_SCAN_DONE 0x04 /* FCF table scan done */
# define FCF_IN_USE 0x08 /* Atleast one discovery completed */
2010-02-26 22:15:57 +03:00
# define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
# define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
# define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
# define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
# define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
# define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
# define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
2010-10-22 19:06:08 +04:00
# define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
2009-05-22 22:51:39 +04:00
uint32_t addr_mode ;
2010-03-15 18:24:56 +03:00
uint32_t eligible_fcf_cnt ;
2010-02-12 22:41:27 +03:00
struct lpfc_fcf_rec current_rec ;
struct lpfc_fcf_rec failover_rec ;
2011-07-23 02:37:52 +04:00
struct list_head fcf_pri_list ;
struct lpfc_fcf_pri fcf_pri [ LPFC_SLI4_FCF_TBL_INDX_MAX ] ;
uint32_t current_fcf_scan_pri ;
2010-02-12 22:41:27 +03:00
struct timer_list redisc_wait ;
2010-02-26 22:15:57 +03:00
unsigned long * fcf_rr_bmask ; /* Eligible FCF indexes for RR failover */
2009-05-22 22:51:39 +04:00
} ;
2010-02-26 22:15:57 +03:00
2009-05-22 22:51:39 +04:00
# define LPFC_REGION23_SIGNATURE "RG23"
# define LPFC_REGION23_VERSION 1
# define LPFC_REGION23_LAST_REC 0xff
2009-07-19 18:01:10 +04:00
# define DRIVER_SPECIFIC_TYPE 0xA2
# define LINUX_DRIVER_ID 0x20
# define PORT_STE_TYPE 0x1
2009-05-22 22:51:39 +04:00
struct lpfc_fip_param_hdr {
uint8_t type ;
# define FCOE_PARAM_TYPE 0xA0
uint8_t length ;
# define FCOE_PARAM_LENGTH 2
uint8_t parm_version ;
# define FIPP_VERSION 0x01
uint8_t parm_flags ;
# define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
# define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
# define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
2009-10-02 23:16:51 +04:00
# define FIPP_MODE_ON 0x1
2009-05-22 22:51:39 +04:00
# define FIPP_MODE_OFF 0x0
# define FIPP_VLAN_VALID 0x1
} ;
struct lpfc_fcoe_params {
uint8_t fc_map [ 3 ] ;
uint8_t reserved1 ;
uint16_t vlan_tag ;
uint8_t reserved [ 2 ] ;
} ;
struct lpfc_fcf_conn_hdr {
uint8_t type ;
# define FCOE_CONN_TBL_TYPE 0xA1
uint8_t length ; /* words */
uint8_t reserved [ 2 ] ;
} ;
struct lpfc_fcf_conn_rec {
uint16_t flags ;
# define FCFCNCT_VALID 0x0001
# define FCFCNCT_BOOT 0x0002
# define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
# define FCFCNCT_FBNM_VALID 0x0008
# define FCFCNCT_SWNM_VALID 0x0010
# define FCFCNCT_VLAN_VALID 0x0020
# define FCFCNCT_AM_VALID 0x0040
# define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
# define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
uint16_t vlan_tag ;
uint8_t fabric_name [ 8 ] ;
uint8_t switch_name [ 8 ] ;
} ;
struct lpfc_fcf_conn_entry {
struct list_head list ;
struct lpfc_fcf_conn_rec conn_rec ;
} ;
/*
* Define the host ' s bootstrap mailbox . This structure contains
* the member attributes needed to create , use , and destroy the
* bootstrap mailbox region .
*
* The macro definitions for the bmbx data structure are defined
* in lpfc_hw4 . h with the register definition .
*/
struct lpfc_bmbx {
struct lpfc_dmabuf * dmabuf ;
struct dma_address dma_address ;
void * avirt ;
dma_addr_t aphys ;
uint32_t bmbx_size ;
} ;
# define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
# define LPFC_EQE_SIZE_4B 4
# define LPFC_EQE_SIZE_16B 16
# define LPFC_CQE_SIZE 16
# define LPFC_WQE_SIZE 64
2013-07-16 02:33:23 +04:00
# define LPFC_WQE128_SIZE 128
2009-05-22 22:51:39 +04:00
# define LPFC_MQE_SIZE 256
# define LPFC_RQE_SIZE 8
# define LPFC_EQE_DEF_COUNT 1024
2011-12-13 22:21:35 +04:00
# define LPFC_CQE_DEF_COUNT 1024
2009-06-11 01:22:44 +04:00
# define LPFC_WQE_DEF_COUNT 256
2013-07-16 02:33:23 +04:00
# define LPFC_WQE128_DEF_COUNT 128
2017-02-13 00:52:30 +03:00
# define LPFC_WQE128_MAX_COUNT 256
2009-05-22 22:51:39 +04:00
# define LPFC_MQE_DEF_COUNT 16
# define LPFC_RQE_DEF_COUNT 512
# define LPFC_QUEUE_NOARM false
# define LPFC_QUEUE_REARM true
/*
* SLI4 CT field defines
*/
# define SLI4_CT_RPI 0
# define SLI4_CT_VPI 1
# define SLI4_CT_VFI 2
# define SLI4_CT_FCFI 3
/*
* SLI4 specific data structures
*/
struct lpfc_max_cfg_param {
uint16_t max_xri ;
uint16_t xri_base ;
uint16_t xri_used ;
uint16_t max_rpi ;
uint16_t rpi_base ;
uint16_t rpi_used ;
uint16_t max_vpi ;
uint16_t vpi_base ;
uint16_t vpi_used ;
uint16_t max_vfi ;
uint16_t vfi_base ;
uint16_t vfi_used ;
uint16_t max_fcfi ;
uint16_t fcfi_used ;
uint16_t max_eq ;
uint16_t max_rq ;
uint16_t max_cq ;
uint16_t max_wq ;
} ;
struct lpfc_hba ;
/* SLI4 HBA multi-fcp queue handler struct */
2017-02-13 00:52:30 +03:00
struct lpfc_hba_eq_hdl {
2009-05-22 22:51:39 +04:00
uint32_t idx ;
struct lpfc_hba * phba ;
2017-02-13 00:52:30 +03:00
atomic_t hba_eq_in_use ;
struct cpumask * cpumask ;
/* CPU affinitsed to or 0xffffffff if multiple */
uint32_t cpu ;
# define LPFC_MULTI_CPU_AFFINITY 0xffffffff
2009-05-22 22:51:39 +04:00
} ;
2010-02-12 22:42:03 +03:00
/* Port Capabilities for SLI4 Parameters */
struct lpfc_pc_sli4_params {
uint32_t supported ;
uint32_t if_type ;
uint32_t sli_rev ;
uint32_t sli_family ;
uint32_t featurelevel_1 ;
uint32_t featurelevel_2 ;
uint32_t proto_types ;
# define LPFC_SLI4_PROTO_FCOE 0x0000001
# define LPFC_SLI4_PROTO_FC 0x0000002
# define LPFC_SLI4_PROTO_NIC 0x0000004
# define LPFC_SLI4_PROTO_ISCSI 0x0000008
# define LPFC_SLI4_PROTO_RDMA 0x0000010
uint32_t sge_supp_len ;
uint32_t if_page_sz ;
uint32_t rq_db_window ;
uint32_t loopbk_scope ;
2014-02-20 18:56:45 +04:00
uint32_t oas_supported ;
2010-02-12 22:42:03 +03:00
uint32_t eq_pages_max ;
uint32_t eqe_size ;
uint32_t cq_pages_max ;
uint32_t cqe_size ;
uint32_t mq_pages_max ;
uint32_t mqe_size ;
uint32_t mq_elem_cnt ;
uint32_t wq_pages_max ;
uint32_t wqe_size ;
uint32_t rq_pages_max ;
uint32_t rqe_size ;
uint32_t hdr_pages_max ;
uint32_t hdr_size ;
uint32_t hdr_pp_align ;
uint32_t sgl_pages_max ;
uint32_t sgl_pp_align ;
2011-02-16 20:39:24 +03:00
uint8_t cqv ;
uint8_t mqv ;
uint8_t wqv ;
uint8_t rqv ;
2013-07-16 02:33:23 +04:00
uint8_t wqsize ;
# define LPFC_WQ_SZ64_SUPPORT 1
# define LPFC_WQ_SZ128_SUPPORT 2
2017-02-13 00:52:30 +03:00
uint8_t wqpcnt ;
2010-02-12 22:42:03 +03:00
} ;
2011-05-24 19:42:11 +04:00
struct lpfc_iov {
uint32_t pf_number ;
uint32_t vf_number ;
} ;
2011-10-11 05:33:25 +04:00
struct lpfc_sli4_lnk_info {
uint8_t lnk_dv ;
# define LPFC_LNK_DAT_INVAL 0
# define LPFC_LNK_DAT_VAL 1
uint8_t lnk_tp ;
# define LPFC_LNK_GE 0x0 /* FCoE */
# define LPFC_LNK_FC 0x1 /* FC */
uint8_t lnk_no ;
2015-12-17 02:12:05 +03:00
uint8_t optic_state ;
2011-10-11 05:33:25 +04:00
} ;
2017-02-13 00:52:30 +03:00
# define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
2014-02-20 18:56:45 +04:00
LPFC_FOF_IO_CHAN_NUM )
2012-08-03 20:36:33 +04:00
# define LPFC_SLI4_HANDLER_NAME_SZ 16
2013-04-18 04:19:16 +04:00
/* Used for IRQ vector to CPU mapping */
struct lpfc_vector_map_info {
uint16_t phys_id ;
uint16_t core_id ;
uint16_t irq ;
uint16_t channel_id ;
} ;
# define LPFC_VECTOR_MAP_EMPTY 0xffff
2009-05-22 22:51:39 +04:00
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem * conf_regs_memmap_p ; /* Kernel memory mapped address for
PCI BAR0 , config space registers */
void __iomem * ctrl_regs_memmap_p ; /* Kernel memory mapped address for
PCI BAR1 , control registers */
void __iomem * drbl_regs_memmap_p ; /* Kernel memory mapped address for
PCI BAR2 , doorbell registers */
2010-12-16 01:57:46 +03:00
union {
struct {
/* IF Type 0, BAR 0 PCI cfg space reg mem map */
void __iomem * UERRLOregaddr ;
void __iomem * UERRHIregaddr ;
void __iomem * UEMASKLOregaddr ;
void __iomem * UEMASKHIregaddr ;
} if_type0 ;
struct {
/* IF Type 2, BAR 0 PCI cfg space reg mem map. */
void __iomem * STATUSregaddr ;
void __iomem * CTRLregaddr ;
void __iomem * ERR1regaddr ;
2011-12-13 22:22:37 +04:00
# define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
# define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
2010-12-16 01:57:46 +03:00
void __iomem * ERR2regaddr ;
2011-12-13 22:22:37 +04:00
# define SLIPORT_ERR2_REG_FW_RESTART 0x0
# define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
# define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
# define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
# define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
# define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
# define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
2010-12-16 01:57:46 +03:00
} if_type2 ;
} u ;
/* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
void __iomem * PSMPHRregaddr ;
/* Well-known SLI INTF register memory map. */
void __iomem * SLIINTFregaddr ;
/* IF type 0, BAR 1 function CSR register memory map */
void __iomem * ISRregaddr ; /* HST_ISR register */
void __iomem * IMRregaddr ; /* HST_IMR register */
void __iomem * ISCRregaddr ; /* HST_ISCR register */
/* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
void __iomem * RQDBregaddr ; /* RQ_DOORBELL register */
void __iomem * WQDBregaddr ; /* WQ_DOORBELL register */
void __iomem * EQCQDBregaddr ; /* EQCQ_DOORBELL register */
void __iomem * MQDBregaddr ; /* MQ_DOORBELL register */
void __iomem * BMBXregaddr ; /* BootStrap MBX register */
2009-05-22 22:51:39 +04:00
2009-11-18 23:41:10 +03:00
uint32_t ue_mask_lo ;
uint32_t ue_mask_hi ;
2016-07-06 22:35:56 +03:00
uint32_t ue_to_sr ;
uint32_t ue_to_rp ;
2010-02-12 22:42:03 +03:00
struct lpfc_register sli_intf ;
struct lpfc_pc_sli4_params pc_sli4_params ;
2014-02-20 18:56:45 +04:00
uint8_t handler_name [ LPFC_SLI4_HANDLER_CNT ] [ LPFC_SLI4_HANDLER_NAME_SZ ] ;
2017-02-13 00:52:30 +03:00
struct lpfc_hba_eq_hdl * hba_eq_hdl ; /* HBA per-WQ handle */
2012-08-03 20:36:13 +04:00
2009-05-22 22:51:39 +04:00
/* Pointers to the constructed SLI4 queues */
2017-02-13 00:52:30 +03:00
struct lpfc_queue * * hba_eq ; /* Event queues for HBA */
struct lpfc_queue * * fcp_cq ; /* Fast-path FCP compl queue */
struct lpfc_queue * * nvme_cq ; /* Fast-path NVME compl queue */
2017-02-13 00:52:35 +03:00
struct lpfc_queue * * nvmet_cqset ; /* Fast-path NVMET CQ Set queues */
struct lpfc_queue * * nvmet_mrq_hdr ; /* Fast-path NVMET hdr MRQs */
struct lpfc_queue * * nvmet_mrq_data ; /* Fast-path NVMET data MRQs */
2017-02-13 00:52:30 +03:00
struct lpfc_queue * * fcp_wq ; /* Fast-path FCP work queue */
struct lpfc_queue * * nvme_wq ; /* Fast-path NVME work queue */
2012-08-03 20:36:13 +04:00
uint16_t * fcp_cq_map ;
2017-02-13 00:52:30 +03:00
uint16_t * nvme_cq_map ;
struct list_head lpfc_wq_list ;
2012-08-03 20:36:13 +04:00
struct lpfc_queue * mbx_cq ; /* Slow-path mailbox complete queue */
struct lpfc_queue * els_cq ; /* Slow-path ELS response complete queue */
2017-02-13 00:52:30 +03:00
struct lpfc_queue * nvmels_cq ; /* NVME LS complete queue */
2009-05-22 22:51:39 +04:00
struct lpfc_queue * mbx_wq ; /* Slow-path MBOX work queue */
struct lpfc_queue * els_wq ; /* Slow-path ELS work queue */
2017-02-13 00:52:30 +03:00
struct lpfc_queue * nvmels_wq ; /* NVME LS work queue */
2009-05-22 22:51:39 +04:00
struct lpfc_queue * hdr_rq ; /* Slow-path Header Receive queue */
struct lpfc_queue * dat_rq ; /* Slow-path Data Receive queue */
2017-02-13 00:52:30 +03:00
struct lpfc_name wwnn ;
struct lpfc_name wwpn ;
2013-09-06 20:19:27 +04:00
uint32_t fw_func_mode ; /* FW function protocol mode */
2013-01-04 00:44:00 +04:00
uint32_t ulp0_mode ; /* ULP0 protocol mode */
uint32_t ulp1_mode ; /* ULP1 protocol mode */
2014-02-20 18:56:45 +04:00
struct lpfc_queue * fof_eq ; /* Flash Optimized Fabric Event queue */
/* Optimized Access Storage specific queues/structures */
struct lpfc_queue * oas_cq ; /* OAS completion queue */
struct lpfc_queue * oas_wq ; /* OAS Work queue */
struct lpfc_sli_ring * oas_ring ;
uint64_t oas_next_lun ;
uint8_t oas_next_tgt_wwpn [ 8 ] ;
uint8_t oas_next_vpt_wwpn [ 8 ] ;
2009-05-22 22:51:39 +04:00
/* Setup information for various queue parameters */
int eq_esize ;
int eq_ecount ;
int cq_esize ;
int cq_ecount ;
int wq_esize ;
int wq_ecount ;
int mq_esize ;
int mq_ecount ;
int rq_esize ;
int rq_ecount ;
# define LPFC_SP_EQ_MAX_INTR_SEC 10000
# define LPFC_FP_EQ_MAX_INTR_SEC 10000
uint32_t intr_enable ;
struct lpfc_bmbx bmbx ;
struct lpfc_max_cfg_param max_cfg_param ;
2011-05-24 19:44:12 +04:00
uint16_t extents_in_use ; /* must allocate resource extents. */
uint16_t rpi_hdrs_in_use ; /* must post rpi hdrs if set. */
2009-05-22 22:51:39 +04:00
uint16_t next_xri ; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi ;
2017-02-13 00:52:30 +03:00
uint16_t nvme_xri_max ;
uint16_t nvme_xri_cnt ;
uint16_t nvme_xri_start ;
2009-05-22 22:51:39 +04:00
uint16_t scsi_xri_max ;
uint16_t scsi_xri_cnt ;
2011-05-24 19:44:12 +04:00
uint16_t scsi_xri_start ;
2017-02-13 00:52:30 +03:00
uint16_t els_xri_cnt ;
2017-02-13 00:52:34 +03:00
uint16_t nvmet_xri_cnt ;
2017-02-13 00:52:30 +03:00
struct list_head lpfc_els_sgl_list ;
2009-05-22 22:51:39 +04:00
struct list_head lpfc_abts_els_sgl_list ;
2017-02-13 00:52:34 +03:00
struct list_head lpfc_nvmet_sgl_list ;
struct list_head lpfc_abts_nvmet_sgl_list ;
2009-05-22 22:51:39 +04:00
struct list_head lpfc_abts_scsi_buf_list ;
2017-02-13 00:52:30 +03:00
struct list_head lpfc_abts_nvme_buf_list ;
2009-05-22 22:51:39 +04:00
struct lpfc_sglq * * lpfc_sglq_active_list ;
struct list_head lpfc_rpi_hdr_list ;
unsigned long * rpi_bmask ;
2011-05-24 19:44:12 +04:00
uint16_t * rpi_ids ;
2009-05-22 22:51:39 +04:00
uint16_t rpi_count ;
2011-05-24 19:44:12 +04:00
struct list_head lpfc_rpi_blk_list ;
unsigned long * xri_bmask ;
uint16_t * xri_ids ;
struct list_head lpfc_xri_blk_list ;
unsigned long * vfi_bmask ;
uint16_t * vfi_ids ;
uint16_t vfi_count ;
struct list_head lpfc_vfi_blk_list ;
2009-05-22 22:51:39 +04:00
struct lpfc_sli4_flags sli4_flags ;
2009-10-02 23:17:02 +04:00
struct list_head sp_queue_event ;
2009-05-22 22:51:39 +04:00
struct list_head sp_cqe_event_pool ;
struct list_head sp_asynce_work_queue ;
struct list_head sp_fcp_xri_aborted_work_queue ;
struct list_head sp_els_xri_aborted_work_queue ;
struct list_head sp_unsol_work_queue ;
struct lpfc_sli4_link link_state ;
2011-10-11 05:33:25 +04:00
struct lpfc_sli4_lnk_info lnk_info ;
uint32_t pport_name_sta ;
# define LPFC_SLI4_PPNAME_NON 0
# define LPFC_SLI4_PPNAME_GET 1
2011-05-24 19:42:11 +04:00
struct lpfc_iov iov ;
2017-02-13 00:52:30 +03:00
spinlock_t abts_nvme_buf_list_lock ; /* list of aborted SCSI IOs */
2009-05-22 22:51:39 +04:00
spinlock_t abts_scsi_buf_list_lock ; /* list of aborted SCSI IOs */
2017-02-13 00:52:30 +03:00
spinlock_t sgl_list_lock ; /* list of aborted els IOs */
2017-02-13 00:52:34 +03:00
spinlock_t nvmet_io_lock ;
2015-05-21 20:55:18 +03:00
uint32_t physical_port ;
2013-04-18 04:19:16 +04:00
/* CPU to vector mapping information */
struct lpfc_vector_map_info * cpu_map ;
uint16_t num_online_cpu ;
uint16_t num_present_cpu ;
2014-02-20 18:57:18 +04:00
uint16_t curr_disp_cpu ;
2017-02-13 00:52:35 +03:00
uint16_t nvmet_mrq_post_idx ;
2009-05-22 22:51:39 +04:00
} ;
enum lpfc_sge_type {
GEN_BUFF_TYPE ,
2017-02-13 00:52:30 +03:00
SCSI_BUFF_TYPE ,
2017-02-13 00:52:34 +03:00
NVMET_BUFF_TYPE
2009-05-22 22:51:39 +04:00
} ;
2010-02-26 22:14:23 +03:00
enum lpfc_sgl_state {
SGL_FREED ,
SGL_ALLOCATED ,
SGL_XRI_ABORTED
} ;
2009-05-22 22:51:39 +04:00
struct lpfc_sglq {
/* lpfc_sglqs are used in double linked lists */
struct list_head list ;
struct list_head clist ;
enum lpfc_sge_type buff_type ; /* is this a scsi sgl */
2010-02-26 22:14:23 +03:00
enum lpfc_sgl_state state ;
2010-11-21 07:11:55 +03:00
struct lpfc_nodelist * ndlp ; /* ndlp associated with IO */
2009-05-22 22:51:39 +04:00
uint16_t iotag ; /* pre-assigned IO tag */
2011-05-24 19:44:12 +04:00
uint16_t sli4_lxritag ; /* logical pre-assigned xri. */
2009-05-22 22:51:39 +04:00
uint16_t sli4_xritag ; /* pre-assigned XRI, (OXID) tag. */
struct sli4_sge * sgl ; /* pre-assigned SGL */
void * virt ; /* virtual address. */
dma_addr_t phys ; /* physical address */
} ;
struct lpfc_rpi_hdr {
struct list_head list ;
uint32_t len ;
struct lpfc_dmabuf * dmabuf ;
uint32_t page_count ;
uint32_t start_rpi ;
} ;
2011-05-24 19:44:12 +04:00
struct lpfc_rsrc_blks {
struct list_head list ;
uint16_t rsrc_start ;
uint16_t rsrc_size ;
uint16_t rsrc_used ;
} ;
2015-05-21 20:55:21 +03:00
struct lpfc_rdp_context {
struct lpfc_nodelist * ndlp ;
uint16_t ox_id ;
uint16_t rx_id ;
READ_LNK_VAR link_stat ;
uint8_t page_a0 [ DMP_SFF_PAGE_A0_SIZE ] ;
uint8_t page_a2 [ DMP_SFF_PAGE_A2_SIZE ] ;
void ( * cmpl ) ( struct lpfc_hba * , struct lpfc_rdp_context * , int ) ;
} ;
2015-05-21 20:55:18 +03:00
struct lpfc_lcb_context {
uint8_t sub_command ;
uint8_t type ;
uint8_t frequency ;
uint16_t ox_id ;
uint16_t rx_id ;
struct lpfc_nodelist * ndlp ;
} ;
2009-05-22 22:51:39 +04:00
/*
* SLI4 specific function prototypes
*/
int lpfc_pci_function_reset ( struct lpfc_hba * ) ;
2011-10-11 05:32:10 +04:00
int lpfc_sli4_pdev_status_reg_wait ( struct lpfc_hba * ) ;
2009-05-22 22:51:39 +04:00
int lpfc_sli4_hba_setup ( struct lpfc_hba * ) ;
int lpfc_sli4_config ( struct lpfc_hba * , struct lpfcMboxq * , uint8_t ,
uint8_t , uint32_t , bool ) ;
void lpfc_sli4_mbox_cmd_free ( struct lpfc_hba * , struct lpfcMboxq * ) ;
void lpfc_sli4_mbx_sge_set ( struct lpfcMboxq * , uint32_t , dma_addr_t , uint32_t ) ;
void lpfc_sli4_mbx_sge_get ( struct lpfcMboxq * , uint32_t ,
struct lpfc_mbx_sge * ) ;
2010-02-26 22:15:57 +03:00
int lpfc_sli4_mbx_read_fcf_rec ( struct lpfc_hba * , struct lpfcMboxq * ,
uint16_t ) ;
2009-05-22 22:51:39 +04:00
void lpfc_sli4_hba_reset ( struct lpfc_hba * ) ;
struct lpfc_queue * lpfc_sli4_queue_alloc ( struct lpfc_hba * , uint32_t ,
uint32_t ) ;
void lpfc_sli4_queue_free ( struct lpfc_queue * ) ;
2014-09-03 20:57:55 +04:00
int lpfc_eq_create ( struct lpfc_hba * , struct lpfc_queue * , uint32_t ) ;
2017-02-13 00:52:30 +03:00
int lpfc_modify_hba_eq_delay ( struct lpfc_hba * phba , uint32_t startq ) ;
2014-09-03 20:57:55 +04:00
int lpfc_cq_create ( struct lpfc_hba * , struct lpfc_queue * ,
2009-05-22 22:51:39 +04:00
struct lpfc_queue * , uint32_t , uint32_t ) ;
2017-02-13 00:52:35 +03:00
int lpfc_cq_create_set ( struct lpfc_hba * phba , struct lpfc_queue * * cqp ,
struct lpfc_queue * * eqp , uint32_t type ,
uint32_t subtype ) ;
2010-04-06 22:48:51 +04:00
int32_t lpfc_mq_create ( struct lpfc_hba * , struct lpfc_queue * ,
struct lpfc_queue * , uint32_t ) ;
2014-09-03 20:57:55 +04:00
int lpfc_wq_create ( struct lpfc_hba * , struct lpfc_queue * ,
2009-05-22 22:51:39 +04:00
struct lpfc_queue * , uint32_t ) ;
2014-09-03 20:57:55 +04:00
int lpfc_rq_create ( struct lpfc_hba * , struct lpfc_queue * ,
2009-05-22 22:51:39 +04:00
struct lpfc_queue * , struct lpfc_queue * , uint32_t ) ;
2017-02-13 00:52:35 +03:00
int lpfc_mrq_create ( struct lpfc_hba * phba , struct lpfc_queue * * hrqp ,
struct lpfc_queue * * drqp , struct lpfc_queue * * cqp ,
uint32_t subtype ) ;
2011-10-11 05:32:10 +04:00
void lpfc_rq_adjust_repost ( struct lpfc_hba * , struct lpfc_queue * , int ) ;
2014-09-03 20:57:55 +04:00
int lpfc_eq_destroy ( struct lpfc_hba * , struct lpfc_queue * ) ;
int lpfc_cq_destroy ( struct lpfc_hba * , struct lpfc_queue * ) ;
int lpfc_mq_destroy ( struct lpfc_hba * , struct lpfc_queue * ) ;
int lpfc_wq_destroy ( struct lpfc_hba * , struct lpfc_queue * ) ;
int lpfc_rq_destroy ( struct lpfc_hba * , struct lpfc_queue * ,
2009-05-22 22:51:39 +04:00
struct lpfc_queue * ) ;
int lpfc_sli4_queue_setup ( struct lpfc_hba * ) ;
void lpfc_sli4_queue_unset ( struct lpfc_hba * ) ;
int lpfc_sli4_post_sgl ( struct lpfc_hba * , dma_addr_t , dma_addr_t , uint16_t ) ;
int lpfc_sli4_repost_scsi_sgl_list ( struct lpfc_hba * ) ;
2017-02-13 00:52:32 +03:00
int lpfc_repost_nvme_sgl_list ( struct lpfc_hba * phba ) ;
2009-05-22 22:51:39 +04:00
uint16_t lpfc_sli4_next_xritag ( struct lpfc_hba * ) ;
2013-10-10 20:19:53 +04:00
void lpfc_sli4_free_xri ( struct lpfc_hba * , int ) ;
2009-05-22 22:51:39 +04:00
int lpfc_sli4_post_async_mbox ( struct lpfc_hba * ) ;
int lpfc_sli4_post_scsi_sgl_block ( struct lpfc_hba * , struct list_head * , int ) ;
struct lpfc_cq_event * __lpfc_sli4_cq_event_alloc ( struct lpfc_hba * ) ;
struct lpfc_cq_event * lpfc_sli4_cq_event_alloc ( struct lpfc_hba * ) ;
void __lpfc_sli4_cq_event_release ( struct lpfc_hba * , struct lpfc_cq_event * ) ;
void lpfc_sli4_cq_event_release ( struct lpfc_hba * , struct lpfc_cq_event * ) ;
int lpfc_sli4_init_rpi_hdrs ( struct lpfc_hba * ) ;
int lpfc_sli4_post_rpi_hdr ( struct lpfc_hba * , struct lpfc_rpi_hdr * ) ;
int lpfc_sli4_post_all_rpi_hdrs ( struct lpfc_hba * ) ;
struct lpfc_rpi_hdr * lpfc_sli4_create_rpi_hdr ( struct lpfc_hba * ) ;
void lpfc_sli4_remove_rpi_hdrs ( struct lpfc_hba * ) ;
int lpfc_sli4_alloc_rpi ( struct lpfc_hba * ) ;
void lpfc_sli4_free_rpi ( struct lpfc_hba * , int ) ;
void lpfc_sli4_remove_rpis ( struct lpfc_hba * ) ;
void lpfc_sli4_async_event_proc ( struct lpfc_hba * ) ;
2010-02-12 22:41:27 +03:00
void lpfc_sli4_fcf_redisc_event_proc ( struct lpfc_hba * ) ;
2012-01-19 01:24:06 +04:00
int lpfc_sli4_resume_rpi ( struct lpfc_nodelist * ,
void ( * ) ( struct lpfc_hba * , LPFC_MBOXQ_t * ) , void * ) ;
2009-05-22 22:51:39 +04:00
void lpfc_sli4_fcp_xri_abort_event_proc ( struct lpfc_hba * ) ;
void lpfc_sli4_els_xri_abort_event_proc ( struct lpfc_hba * ) ;
void lpfc_sli4_fcp_xri_aborted ( struct lpfc_hba * ,
struct sli4_wcqe_xri_aborted * ) ;
void lpfc_sli4_els_xri_aborted ( struct lpfc_hba * ,
struct sli4_wcqe_xri_aborted * ) ;
2011-02-16 20:39:35 +03:00
void lpfc_sli4_vport_delete_els_xri_aborted ( struct lpfc_vport * ) ;
void lpfc_sli4_vport_delete_fcp_xri_aborted ( struct lpfc_vport * ) ;
2009-05-22 22:51:39 +04:00
int lpfc_sli4_brdreset ( struct lpfc_hba * ) ;
int lpfc_sli4_add_fcf_record ( struct lpfc_hba * , struct fcf_record * ) ;
void lpfc_sli_remove_dflt_fcf ( struct lpfc_hba * ) ;
int lpfc_sli4_get_els_iocb_cnt ( struct lpfc_hba * ) ;
2017-02-13 00:52:30 +03:00
int lpfc_sli4_get_iocb_cnt ( struct lpfc_hba * phba ) ;
2010-11-21 07:11:48 +03:00
int lpfc_sli4_init_vpi ( struct lpfc_vport * ) ;
2009-05-22 22:51:39 +04:00
uint32_t lpfc_sli4_cq_release ( struct lpfc_queue * , bool ) ;
uint32_t lpfc_sli4_eq_release ( struct lpfc_queue * , bool ) ;
void lpfc_sli4_fcfi_unreg ( struct lpfc_hba * , uint16_t ) ;
2010-02-26 22:15:57 +03:00
int lpfc_sli4_fcf_scan_read_fcf_rec ( struct lpfc_hba * , uint16_t ) ;
int lpfc_sli4_fcf_rr_read_fcf_rec ( struct lpfc_hba * , uint16_t ) ;
int lpfc_sli4_read_fcf_rec ( struct lpfc_hba * , uint16_t ) ;
void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec ( struct lpfc_hba * , LPFC_MBOXQ_t * ) ;
void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec ( struct lpfc_hba * , LPFC_MBOXQ_t * ) ;
void lpfc_mbx_cmpl_read_fcf_rec ( struct lpfc_hba * , LPFC_MBOXQ_t * ) ;
int lpfc_sli4_unregister_fcf ( struct lpfc_hba * ) ;
2009-05-22 22:51:39 +04:00
int lpfc_sli4_post_status_check ( struct lpfc_hba * ) ;
2011-10-11 05:32:43 +04:00
uint8_t lpfc_sli_config_mbox_subsys_get ( struct lpfc_hba * , LPFC_MBOXQ_t * ) ;
uint8_t lpfc_sli_config_mbox_opcode_get ( struct lpfc_hba * , LPFC_MBOXQ_t * ) ;