2019-06-01 11:08:54 +03:00
/* SPDX-License-Identifier: GPL-2.0-only */
2017-02-15 17:28:23 +03:00
/*
* QLogic FCoE Offload Driver
2018-04-25 16:09:04 +03:00
* Copyright ( c ) 2016 - 2018 Cavium Inc .
2017-02-15 17:28:23 +03:00
*/
# ifndef _QEDFC_H_
# define _QEDFC_H_
# include <scsi/libfcoe.h>
# include <scsi/libfc.h>
# include <scsi/fc/fc_fip.h>
# include <scsi/fc/fc_fc2.h>
# include <scsi/scsi_tcq.h>
/* qedf_hsi.h needs to before included any qed includes */
# include "qedf_hsi.h"
# include <linux/qed/qed_if.h>
# include <linux/qed/qed_fcoe_if.h>
# include <linux/qed/qed_ll2_if.h>
# include "qedf_version.h"
# include "qedf_dbg.h"
2017-03-11 19:39:18 +03:00
# include "drv_fcoe_fw_funcs.h"
2017-02-15 17:28:23 +03:00
/* Helpers to extract upper and lower 32-bits of pointer */
# define U64_HI(val) ((u32)(((u64)(val)) >> 32))
# define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
# define QEDF_DESCR "QLogic FCoE Offload Driver"
# define QEDF_MODULE_NAME "qedf"
# define QEDF_FLOGI_RETRY_CNT 3
# define QEDF_RPORT_RETRY_CNT 255
# define QEDF_MAX_SESSIONS 1024
# define QEDF_MAX_PAYLOAD 2048
# define QEDF_MAX_BDS_PER_CMD 256
# define QEDF_MAX_BD_LEN 0xffff
# define QEDF_BD_SPLIT_SZ 0x1000
# define QEDF_PAGE_SIZE 4096
# define QED_HW_DMA_BOUNDARY 0xfff
# define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
# define QEDF_MFS (QEDF_MAX_PAYLOAD + \
sizeof ( struct fc_frame_header ) )
# define QEDF_MAX_NPIV 64
# define QEDF_TM_TIMEOUT 10
2019-03-26 10:38:38 +03:00
# define QEDF_ABORT_TIMEOUT (10 * 1000)
# define QEDF_CLEANUP_TIMEOUT 1
2017-02-15 17:28:23 +03:00
# define QEDF_MAX_CDB_LEN 16
2019-08-23 12:52:41 +03:00
# define QEDF_LL2_BUF_SIZE 2500 /* Buffer size required for LL2 Rx */
2017-02-15 17:28:23 +03:00
# define UPSTREAM_REMOVE 1
# define UPSTREAM_KEEP 1
struct qedf_mp_req {
uint32_t req_len ;
void * req_buf ;
dma_addr_t req_buf_dma ;
2017-03-11 19:39:18 +03:00
struct scsi_sge * mp_req_bd ;
2017-02-15 17:28:23 +03:00
dma_addr_t mp_req_bd_dma ;
struct fc_frame_header req_fc_hdr ;
uint32_t resp_len ;
void * resp_buf ;
dma_addr_t resp_buf_dma ;
2017-03-11 19:39:18 +03:00
struct scsi_sge * mp_resp_bd ;
2017-02-15 17:28:23 +03:00
dma_addr_t mp_resp_bd_dma ;
struct fc_frame_header resp_fc_hdr ;
} ;
struct qedf_els_cb_arg {
struct qedf_ioreq * aborted_io_req ;
struct qedf_ioreq * io_req ;
u8 op ; /* Used to keep track of ELS op */
uint16_t l2_oxid ;
u32 offset ; /* Used for sequence cleanup */
u8 r_ctl ; /* Used for sequence cleanup */
} ;
enum qedf_ioreq_event {
2019-03-26 10:38:38 +03:00
QEDF_IOREQ_EV_NONE ,
2017-02-15 17:28:23 +03:00
QEDF_IOREQ_EV_ABORT_SUCCESS ,
QEDF_IOREQ_EV_ABORT_FAILED ,
QEDF_IOREQ_EV_SEND_RRQ ,
QEDF_IOREQ_EV_ELS_TMO ,
QEDF_IOREQ_EV_ELS_ERR_DETECT ,
QEDF_IOREQ_EV_ELS_FLUSH ,
QEDF_IOREQ_EV_CLEANUP_SUCCESS ,
QEDF_IOREQ_EV_CLEANUP_FAILED ,
} ;
# define FC_GOOD 0
# define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
# define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
# define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
# define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
# define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
struct qedf_ioreq {
struct list_head link ;
uint16_t xid ;
struct scsi_cmnd * sc_cmd ;
# define QEDF_SCSI_CMD 1
# define QEDF_TASK_MGMT_CMD 2
# define QEDF_ABTS 3
# define QEDF_ELS 4
# define QEDF_CLEANUP 5
# define QEDF_SEQ_CLEANUP 6
u8 cmd_type ;
# define QEDF_CMD_OUTSTANDING 0x0
# define QEDF_CMD_IN_ABORT 0x1
# define QEDF_CMD_IN_CLEANUP 0x2
# define QEDF_CMD_SRR_SENT 0x3
2019-03-26 10:38:37 +03:00
# define QEDF_CMD_DIRTY 0x4
# define QEDF_CMD_ERR_SCSI_DONE 0x5
2017-02-15 17:28:23 +03:00
u8 io_req_flags ;
2017-03-11 19:39:18 +03:00
uint8_t tm_flags ;
2017-02-15 17:28:23 +03:00
struct qedf_rport * fcport ;
scsi: qedf: Cleanup rrq_work after QEDF_CMD_OUTSTANDING is cleared
Here is the relevant logs for the problem we are solving:
qedf_flush_active_ios:1707]:3: Flush active i/o's num=0x17 fcport=0xffff948168fbcc80 port_id=0x550200 scsi_id=0.
qedf_flush_active_ios:1708]:3: Locking flush mutex.
qedf_flush_active_ios:1758]:3: Not outstanding, xid=0xaaf, cmd_type=3 refcount=1.
qedf_flush_active_ios:1896]:3: Flushed 0x16 I/Os, active=0x1.
qedf_flush_active_ios:1901]:3: Flushed 0x16 I/Os, active=0x1 cnt=60.
qedf_send_rrq:295]:3: Sending RRQ orig io = ffffb48b8f7d7158, orig_xid = 0xaaf
qedf_initiate_els:37]:3: Sending ELS
qedf_initiate_els:68]:3: initiate_els els_req = 0xffffb48b8f6d3098 cb_arg = ffff948fd5e4de80 xid = 4c6
qedf_init_mp_req:2172]:3: Entered.
qedf_init_mp_task:727]:3: Initializing MP task for cmd_type=4
qedf_initiate_els:134]:3: Ringing doorbell for ELS req
qedf_flush_active_ios:1901]:3: Flushed 0x16 I/Os, active=0x2 cnt=20.
qedf_cmd_timeout:96]:3: ELS timeout, xid=0x4c6.
qedf_rrq_compl:186]:3: Entered.
qedf_rrq_compl:204]:3: rrq_compl: orig io = ffffb48b8f7d7158, orig xid = 0xaaf, rrq_xid = 0x4c6, refcount=1
qedf_flush_active_ios:1935]:3: Unlocking flush mutex.
qedf_upload_connection:1579]:3: Uploading connection port_id=550200.
We found an ABTS command for which CMD_OUTSTANDING was cleared (line 3).
For this command, delayed send_rrq was queued, but would take 10 secs to
execute. Adding capability to detect that (based on io_req->state that is
being introduced), and attempt to cancel rrq_work. If we succeed, we drop
the reference and free the io_req. If we cannot, then the els will get sent
out and we will wait for 10 secs for it to complete.
Signed-off-by: Shyam Sundar <ssundar@marvell.com>
Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-03-26 10:38:55 +03:00
# define QEDF_CMD_ST_INACTIVE 0
# define QEDFC_CMD_ST_IO_ACTIVE 1
# define QEDFC_CMD_ST_ABORT_ACTIVE 2
# define QEDFC_CMD_ST_ABORT_ACTIVE_EH 3
# define QEDFC_CMD_ST_CLEANUP_ACTIVE 4
# define QEDFC_CMD_ST_CLEANUP_ACTIVE_EH 5
# define QEDFC_CMD_ST_RRQ_ACTIVE 6
# define QEDFC_CMD_ST_RRQ_WAIT 7
# define QEDFC_CMD_ST_OXID_RETIRE_WAIT 8
# define QEDFC_CMD_ST_TMF_ACTIVE 9
# define QEDFC_CMD_ST_DRAIN_ACTIVE 10
# define QEDFC_CMD_ST_CLEANED 11
# define QEDFC_CMD_ST_ELS_ACTIVE 12
atomic_t state ;
2017-02-15 17:28:23 +03:00
unsigned long flags ;
enum qedf_ioreq_event event ;
size_t data_xfer_len ;
scsi: qedf: Cleanup rrq_work after QEDF_CMD_OUTSTANDING is cleared
Here is the relevant logs for the problem we are solving:
qedf_flush_active_ios:1707]:3: Flush active i/o's num=0x17 fcport=0xffff948168fbcc80 port_id=0x550200 scsi_id=0.
qedf_flush_active_ios:1708]:3: Locking flush mutex.
qedf_flush_active_ios:1758]:3: Not outstanding, xid=0xaaf, cmd_type=3 refcount=1.
qedf_flush_active_ios:1896]:3: Flushed 0x16 I/Os, active=0x1.
qedf_flush_active_ios:1901]:3: Flushed 0x16 I/Os, active=0x1 cnt=60.
qedf_send_rrq:295]:3: Sending RRQ orig io = ffffb48b8f7d7158, orig_xid = 0xaaf
qedf_initiate_els:37]:3: Sending ELS
qedf_initiate_els:68]:3: initiate_els els_req = 0xffffb48b8f6d3098 cb_arg = ffff948fd5e4de80 xid = 4c6
qedf_init_mp_req:2172]:3: Entered.
qedf_init_mp_task:727]:3: Initializing MP task for cmd_type=4
qedf_initiate_els:134]:3: Ringing doorbell for ELS req
qedf_flush_active_ios:1901]:3: Flushed 0x16 I/Os, active=0x2 cnt=20.
qedf_cmd_timeout:96]:3: ELS timeout, xid=0x4c6.
qedf_rrq_compl:186]:3: Entered.
qedf_rrq_compl:204]:3: rrq_compl: orig io = ffffb48b8f7d7158, orig xid = 0xaaf, rrq_xid = 0x4c6, refcount=1
qedf_flush_active_ios:1935]:3: Unlocking flush mutex.
qedf_upload_connection:1579]:3: Uploading connection port_id=550200.
We found an ABTS command for which CMD_OUTSTANDING was cleared (line 3).
For this command, delayed send_rrq was queued, but would take 10 secs to
execute. Adding capability to detect that (based on io_req->state that is
being introduced), and attempt to cancel rrq_work. If we succeed, we drop
the reference and free the io_req. If we cannot, then the els will get sent
out and we will wait for 10 secs for it to complete.
Signed-off-by: Shyam Sundar <ssundar@marvell.com>
Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-03-26 10:38:55 +03:00
/* ID: 001: Alloc cmd (qedf_alloc_cmd) */
/* ID: 002: Initiate ABTS (qedf_initiate_abts) */
/* ID: 003: For RRQ (qedf_process_abts_compl) */
2017-02-15 17:28:23 +03:00
struct kref refcount ;
struct qedf_cmd_mgr * cmd_mgr ;
struct io_bdt * bd_tbl ;
struct delayed_work timeout_work ;
struct completion tm_done ;
struct completion abts_done ;
2019-03-26 10:38:39 +03:00
struct completion cleanup_done ;
2017-12-27 20:30:06 +03:00
struct e4_fcoe_task_context * task ;
2017-03-11 19:39:18 +03:00
struct fcoe_task_params * task_params ;
struct scsi_sgl_task_params * sgl_task_params ;
2017-02-15 17:28:23 +03:00
int idx ;
2019-03-26 10:38:37 +03:00
int lun ;
2017-02-15 17:28:23 +03:00
/*
* Need to allocate enough room for both sense data and FCP response data
* which has a max length of 8 bytes according to spec .
*/
# define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8)
uint8_t * sense_buffer ;
dma_addr_t sense_buffer_dma ;
u32 fcp_resid ;
u32 fcp_rsp_len ;
u32 fcp_sns_len ;
u8 cdb_status ;
u8 fcp_status ;
u8 fcp_rsp_code ;
u8 scsi_comp_flags ;
# define QEDF_MAX_REUSE 0xfff
u16 reuse_count ;
struct qedf_mp_req mp_req ;
void ( * cb_func ) ( struct qedf_els_cb_arg * cb_arg ) ;
struct qedf_els_cb_arg * cb_arg ;
int fp_idx ;
unsigned int cpu ;
unsigned int int_cpu ;
2019-03-26 10:38:36 +03:00
# define QEDF_IOREQ_UNKNOWN_SGE 1
# define QEDF_IOREQ_SLOW_SGE 2
# define QEDF_IOREQ_FAST_SGE 3
2017-02-15 17:28:23 +03:00
u8 sge_type ;
struct delayed_work rrq_work ;
/* Used for sequence level recovery; i.e. REC/SRR */
uint32_t rx_buf_off ;
uint32_t tx_buf_off ;
uint32_t rx_id ;
uint32_t task_retry_identifier ;
/*
* Used to tell if we need to return a SCSI command
* during some form of error processing .
*/
bool return_scsi_cmd_on_abts ;
2019-03-26 10:38:37 +03:00
unsigned int alloc ;
2017-02-15 17:28:23 +03:00
} ;
extern struct workqueue_struct * qedf_io_wq ;
struct qedf_rport {
spinlock_t rport_lock ;
# define QEDF_RPORT_SESSION_READY 1
# define QEDF_RPORT_UPLOADING_CONNECTION 2
2018-04-25 16:08:45 +03:00
# define QEDF_RPORT_IN_RESET 3
2019-03-26 10:38:38 +03:00
# define QEDF_RPORT_IN_LUN_RESET 4
# define QEDF_RPORT_IN_TARGET_RESET 5
2017-02-15 17:28:23 +03:00
unsigned long flags ;
2019-03-26 10:38:38 +03:00
int lun_reset_lun ;
2017-02-15 17:28:23 +03:00
unsigned long retry_delay_timestamp ;
struct fc_rport * rport ;
struct fc_rport_priv * rdata ;
struct qedf_ctx * qedf ;
u32 handle ; /* Handle from qed */
u32 fw_cid ; /* fw_cid from qed */
void __iomem * p_doorbell ;
/* Send queue management */
atomic_t free_sqes ;
2019-03-26 10:38:37 +03:00
atomic_t ios_to_queue ;
2017-02-15 17:28:23 +03:00
atomic_t num_active_ios ;
struct fcoe_wqe * sq ;
dma_addr_t sq_dma ;
u16 sq_prod_idx ;
u16 fw_sq_prod_idx ;
u16 sq_con_idx ;
u32 sq_mem_size ;
void * sq_pbl ;
dma_addr_t sq_pbl_dma ;
u32 sq_pbl_size ;
u32 sid ;
2017-03-11 19:39:18 +03:00
# define QEDF_RPORT_TYPE_DISK 0
# define QEDF_RPORT_TYPE_TAPE 1
2017-02-15 17:28:23 +03:00
uint dev_type ; /* Disk or tape */
struct list_head peers ;
} ;
/* Used to contain LL2 skb's in ll2_skb_list */
struct qedf_skb_work {
struct work_struct work ;
struct sk_buff * skb ;
struct qedf_ctx * qedf ;
} ;
struct qedf_fastpath {
# define QEDF_SB_ID_NULL 0xffff
u16 sb_id ;
struct qed_sb_info * sb_info ;
struct qedf_ctx * qedf ;
/* Keep track of number of completions on this fastpath */
unsigned long completions ;
uint32_t cq_num_entries ;
} ;
/* Used to pass fastpath information needed to process CQEs */
struct qedf_io_work {
struct work_struct work ;
struct fcoe_cqe cqe ;
struct qedf_ctx * qedf ;
struct fc_frame * fp ;
} ;
struct qedf_glbl_q_params {
u64 hw_p_cq ; /* Completion queue PBL */
u64 hw_p_rq ; /* Request queue PBL */
u64 hw_p_cmdq ; /* Command queue PBL */
} ;
struct global_queue {
struct fcoe_cqe * cq ;
dma_addr_t cq_dma ;
u32 cq_mem_size ;
u32 cq_cons_idx ; /* Completion queue consumer index */
u32 cq_prod_idx ;
void * cq_pbl ;
dma_addr_t cq_pbl_dma ;
u32 cq_pbl_size ;
} ;
/* I/O tracing entry */
# define QEDF_IO_TRACE_SIZE 2048
struct qedf_io_log {
# define QEDF_IO_TRACE_REQ 0
# define QEDF_IO_TRACE_RSP 1
uint8_t direction ;
uint16_t task_id ;
uint32_t port_id ; /* Remote port fabric ID */
int lun ;
2017-05-04 00:23:57 +03:00
unsigned char op ; /* SCSI CDB */
2017-02-15 17:28:23 +03:00
uint8_t lba [ 4 ] ;
unsigned int bufflen ; /* SCSI buffer length */
unsigned int sg_count ; /* Number of SG elements */
int result ; /* Result passed back to mid-layer */
unsigned long jiffies ; /* Time stamp when I/O logged */
int refcount ; /* Reference count for task id */
unsigned int req_cpu ; /* CPU that the task is queued on */
unsigned int int_cpu ; /* Interrupt CPU that the task is received on */
unsigned int rsp_cpu ; /* CPU that task is returned on */
u8 sge_type ; /* Did we take the slow, single or fast SGE path */
} ;
/* Number of entries in BDQ */
# define QEDF_BDQ_SIZE 256
# define QEDF_BDQ_BUF_SIZE 2072
/* DMA coherent buffers for BDQ */
struct qedf_bdq_buf {
void * buf_addr ;
dma_addr_t buf_dma ;
} ;
/* Main adapter struct */
struct qedf_ctx {
struct qedf_dbg_ctx dbg_ctx ;
struct fcoe_ctlr ctlr ;
struct fc_lport * lport ;
u8 data_src_addr [ ETH_ALEN ] ;
# define QEDF_LINK_DOWN 0
# define QEDF_LINK_UP 1
atomic_t link_state ;
# define QEDF_DCBX_PENDING 0
# define QEDF_DCBX_DONE 1
atomic_t dcbx ;
# define QEDF_NULL_VLAN_ID -1
# define QEDF_FALLBACK_VLAN 1002
# define QEDF_DEFAULT_PRIO 3
int vlan_id ;
2018-04-25 16:08:52 +03:00
u8 prio ;
2017-02-15 17:28:23 +03:00
struct qed_dev * cdev ;
struct qed_dev_fcoe_info dev_info ;
struct qed_int_info int_info ;
uint16_t last_command ;
spinlock_t hba_lock ;
struct pci_dev * pdev ;
u64 wwnn ;
u64 wwpn ;
u8 __aligned ( 16 ) mac [ ETH_ALEN ] ;
struct list_head fcports ;
atomic_t num_offloads ;
unsigned int curr_conn_id ;
struct workqueue_struct * ll2_recv_wq ;
struct workqueue_struct * link_update_wq ;
struct delayed_work link_update ;
struct delayed_work link_recovery ;
struct completion flogi_compl ;
struct completion fipvlan_compl ;
/*
* Used to tell if we ' re in the window where we are waiting for
* the link to come back up before informting fcoe that the link is
* done .
*/
atomic_t link_down_tmo_valid ;
# define QEDF_TIMER_INTERVAL (1 * HZ)
struct timer_list timer ; /* One second book keeping timer */
# define QEDF_DRAIN_ACTIVE 1
# define QEDF_LL2_STARTED 2
# define QEDF_UNLOADING 3
# define QEDF_GRCDUMP_CAPTURE 4
# define QEDF_IN_RECOVERY 5
# define QEDF_DBG_STOP_IO 6
2020-04-16 11:43:13 +03:00
# define QEDF_PROBING 8
2017-02-15 17:28:23 +03:00
unsigned long flags ; /* Miscellaneous state flags */
int fipvlan_retries ;
u8 num_queues ;
struct global_queue * * global_queues ;
/* Pointer to array of queue structures */
struct qedf_glbl_q_params * p_cpuq ;
/* Physical address of array of queue structures */
dma_addr_t hw_p_cpuq ;
struct qedf_bdq_buf bdq [ QEDF_BDQ_SIZE ] ;
void * bdq_pbl ;
dma_addr_t bdq_pbl_dma ;
size_t bdq_pbl_mem_size ;
void * bdq_pbl_list ;
dma_addr_t bdq_pbl_list_dma ;
u8 bdq_pbl_list_num_entries ;
void __iomem * bdq_primary_prod ;
void __iomem * bdq_secondary_prod ;
uint16_t bdq_prod_idx ;
/* Structure for holding all the fastpath for this qedf_ctx */
struct qedf_fastpath * fp_array ;
struct qed_fcoe_tid tasks ;
struct qedf_cmd_mgr * cmd_mgr ;
/* Holds the PF parameters we pass to qed to start he FCoE function */
struct qed_pf_params pf_params ;
/* Used to time middle path ELS and TM commands */
struct workqueue_struct * timer_work_queue ;
# define QEDF_IO_WORK_MIN 64
mempool_t * io_mempool ;
struct workqueue_struct * dpc_wq ;
2020-04-16 11:43:12 +03:00
struct delayed_work recovery_work ;
2020-09-07 15:14:41 +03:00
struct delayed_work board_disable_work ;
2018-04-25 16:08:58 +03:00
struct delayed_work grcdump_work ;
2020-04-16 11:43:06 +03:00
struct delayed_work stag_work ;
2017-02-15 17:28:23 +03:00
u32 slow_sge_ios ;
u32 fast_sge_ios ;
uint8_t * grcdump ;
uint32_t grcdump_size ;
struct qedf_io_log io_trace_buf [ QEDF_IO_TRACE_SIZE ] ;
spinlock_t io_trace_lock ;
uint16_t io_trace_idx ;
bool stop_io_on_error ;
u32 flogi_cnt ;
u32 flogi_failed ;
2020-04-16 11:43:06 +03:00
u32 flogi_pending ;
2017-02-15 17:28:23 +03:00
/* Used for fc statistics */
2018-05-22 10:28:43 +03:00
struct mutex stats_mutex ;
2017-02-15 17:28:23 +03:00
u64 input_requests ;
u64 output_requests ;
u64 control_requests ;
u64 packet_aborts ;
u64 alloc_failures ;
2018-05-22 10:28:43 +03:00
u8 lun_resets ;
u8 target_resets ;
u8 task_set_fulls ;
u8 busy ;
2019-03-26 10:38:38 +03:00
/* Used for flush routine */
struct mutex flush_mutex ;
2017-02-15 17:28:23 +03:00
} ;
struct io_bdt {
struct qedf_ioreq * io_req ;
2017-03-11 19:39:18 +03:00
struct scsi_sge * bd_tbl ;
2017-02-15 17:28:23 +03:00
dma_addr_t bd_tbl_dma ;
u16 bd_valid ;
} ;
struct qedf_cmd_mgr {
struct qedf_ctx * qedf ;
u16 idx ;
struct io_bdt * * io_bdt_pool ;
2017-03-11 19:39:18 +03:00
# define FCOE_PARAMS_NUM_TASKS 2048
2017-02-15 17:28:23 +03:00
struct qedf_ioreq cmds [ FCOE_PARAMS_NUM_TASKS ] ;
spinlock_t lock ;
atomic_t free_list_cnt ;
} ;
/* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info
* Usage :
*
* void * ptr ;
* ptr = qedf_get_task_mem ( & qedf - > tasks , 128 ) ;
*/
static inline void * qedf_get_task_mem ( struct qed_fcoe_tid * info , u32 tid )
{
return ( void * ) ( info - > blocks [ tid / info - > num_tids_per_block ] +
( tid % info - > num_tids_per_block ) * info - > size ) ;
}
static inline void qedf_stop_all_io ( struct qedf_ctx * qedf )
{
set_bit ( QEDF_DBG_STOP_IO , & qedf - > flags ) ;
}
/*
* Externs
*/
2019-04-22 08:44:55 +03:00
/*
* ( QEDF_LOG_NPIV | QEDF_LOG_SESS | QEDF_LOG_LPORT | QEDF_LOG_ELS | QEDF_LOG_MQ
* | QEDF_LOG_IO | QEDF_LOG_UNSOL | QEDF_LOG_SCSI_TM | QEDF_LOG_MP_REQ |
* QEDF_LOG_EVT | QEDF_LOG_CONN | QEDF_LOG_DISC | QEDF_LOG_INFO )
*/
2017-02-15 17:28:23 +03:00
# define QEDF_DEFAULT_LOG_MASK 0x3CFB6
extern const struct qed_fcoe_ops * qed_ops ;
extern uint qedf_dump_frames ;
extern uint qedf_io_tracing ;
extern uint qedf_stop_io_on_error ;
extern uint qedf_link_down_tmo ;
2020-04-16 11:43:07 +03:00
# define QEDF_RETRY_DELAY_MAX 600 /* 60 seconds */
2017-02-15 17:28:23 +03:00
extern bool qedf_retry_delay ;
extern uint qedf_debug ;
extern struct qedf_cmd_mgr * qedf_cmd_mgr_alloc ( struct qedf_ctx * qedf ) ;
extern void qedf_cmd_mgr_free ( struct qedf_cmd_mgr * cmgr ) ;
extern int qedf_queuecommand ( struct Scsi_Host * host ,
struct scsi_cmnd * sc_cmd ) ;
extern void qedf_fip_send ( struct fcoe_ctlr * fip , struct sk_buff * skb ) ;
extern u8 * qedf_get_src_mac ( struct fc_lport * lport ) ;
extern void qedf_fip_recv ( struct qedf_ctx * qedf , struct sk_buff * skb ) ;
extern void qedf_fcoe_send_vlan_req ( struct qedf_ctx * qedf ) ;
extern void qedf_scsi_completion ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * io_req ) ;
extern void qedf_process_warning_compl ( struct qedf_ctx * qedf ,
struct fcoe_cqe * cqe , struct qedf_ioreq * io_req ) ;
extern void qedf_process_error_detect ( struct qedf_ctx * qedf ,
struct fcoe_cqe * cqe , struct qedf_ioreq * io_req ) ;
extern void qedf_flush_active_ios ( struct qedf_rport * fcport , int lun ) ;
extern void qedf_release_cmd ( struct kref * ref ) ;
extern int qedf_initiate_abts ( struct qedf_ioreq * io_req ,
bool return_scsi_cmd_on_abts ) ;
extern void qedf_process_abts_compl ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * io_req ) ;
extern struct qedf_ioreq * qedf_alloc_cmd ( struct qedf_rport * fcport ,
u8 cmd_type ) ;
extern struct device_attribute * qedf_host_attrs [ ] ;
extern void qedf_cmd_timer_set ( struct qedf_ctx * qedf , struct qedf_ioreq * io_req ,
unsigned int timer_msec ) ;
extern int qedf_init_mp_req ( struct qedf_ioreq * io_req ) ;
extern void qedf_init_mp_task ( struct qedf_ioreq * io_req ,
2017-12-27 20:30:06 +03:00
struct e4_fcoe_task_context * task_ctx , struct fcoe_wqe * sqe ) ;
2017-03-11 19:39:18 +03:00
extern u16 qedf_get_sqe_idx ( struct qedf_rport * fcport ) ;
2017-02-15 17:28:23 +03:00
extern void qedf_ring_doorbell ( struct qedf_rport * fcport ) ;
extern void qedf_process_els_compl ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * els_req ) ;
extern int qedf_send_rrq ( struct qedf_ioreq * aborted_io_req ) ;
extern int qedf_send_adisc ( struct qedf_rport * fcport , struct fc_frame * fp ) ;
extern int qedf_initiate_cleanup ( struct qedf_ioreq * io_req ,
bool return_scsi_cmd_on_abts ) ;
extern void qedf_process_cleanup_compl ( struct qedf_ctx * qedf ,
struct fcoe_cqe * cqe , struct qedf_ioreq * io_req ) ;
extern int qedf_initiate_tmf ( struct scsi_cmnd * sc_cmd , u8 tm_flags ) ;
extern void qedf_process_tmf_compl ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * io_req ) ;
extern void qedf_process_cqe ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ) ;
extern void qedf_scsi_done ( struct qedf_ctx * qedf , struct qedf_ioreq * io_req ,
int result ) ;
extern void qedf_set_vlan_id ( struct qedf_ctx * qedf , int vlan_id ) ;
extern void qedf_create_sysfs_ctx_attr ( struct qedf_ctx * qedf ) ;
extern void qedf_remove_sysfs_ctx_attr ( struct qedf_ctx * qedf ) ;
extern void qedf_capture_grc_dump ( struct qedf_ctx * qedf ) ;
2019-03-26 10:38:47 +03:00
bool qedf_wait_for_upload ( struct qedf_ctx * qedf ) ;
2017-02-15 17:28:23 +03:00
extern void qedf_process_unsol_compl ( struct qedf_ctx * qedf , uint16_t que_idx ,
struct fcoe_cqe * cqe ) ;
extern void qedf_restart_rport ( struct qedf_rport * fcport ) ;
extern int qedf_send_rec ( struct qedf_ioreq * orig_io_req ) ;
extern int qedf_post_io_req ( struct qedf_rport * fcport ,
struct qedf_ioreq * io_req ) ;
extern void qedf_process_seq_cleanup_compl ( struct qedf_ctx * qedf ,
struct fcoe_cqe * cqe , struct qedf_ioreq * io_req ) ;
extern int qedf_send_flogi ( struct qedf_ctx * qedf ) ;
2018-05-22 10:28:43 +03:00
extern void qedf_get_protocol_tlv_data ( void * dev , void * data ) ;
2017-02-15 17:28:23 +03:00
extern void qedf_fp_io_handler ( struct work_struct * work ) ;
2018-05-22 10:28:44 +03:00
extern void qedf_get_generic_tlv_data ( void * dev , struct qed_generic_tlvs * data ) ;
2018-04-25 16:08:58 +03:00
extern void qedf_wq_grcdump ( struct work_struct * work ) ;
2019-03-26 10:38:47 +03:00
void qedf_stag_change_work ( struct work_struct * work ) ;
void qedf_ctx_soft_reset ( struct fc_lport * lport ) ;
2020-09-07 15:14:41 +03:00
extern void qedf_board_disable_work ( struct work_struct * work ) ;
extern void qedf_schedule_hw_err_handler ( void * dev ,
enum qed_hw_err_type err_type ) ;
2017-02-15 17:28:23 +03:00
# define FCOE_WORD_TO_BYTE 4
# define QEDF_MAX_TASK_NUM 0xFFFF
2020-09-07 15:14:39 +03:00
# define QL45xxx 0x165C
# define QL41xxx 0x8080
# define MAX_CT_PAYLOAD 2048
# define DISCOVERED_PORTS 4
# define NUMBER_OF_PORTS 1
2017-02-15 17:28:23 +03:00
struct fip_vlan {
struct ethhdr eth ;
struct fip_header fip ;
struct {
struct fip_mac_desc mac ;
struct fip_wwn_desc wwnn ;
} desc ;
} ;
/* SQ/CQ Sizes */
# define GBL_RSVD_TASKS 16
# define NUM_TASKS_PER_CONNECTION 1024
# define NUM_RW_TASKS_PER_CONNECTION 512
# define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
# define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
# define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION
# define QEDF_FCOE_PARAMS_GL_RQ_PI 0
# define QEDF_FCOE_PARAMS_GL_CMD_PI 1
# define QEDF_READ (1 << 1)
# define QEDF_WRITE (1 << 0)
# define MAX_FIBRE_LUNS 0xffffffff
2017-07-25 12:19:21 +03:00
# define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
num_online_cpus ( ) )
2017-02-15 17:28:23 +03:00
/*
* PCI function probe defines
*/
/* Probe/remove called during normal PCI probe */
# define QEDF_MODE_NORMAL 0
/* Probe/remove called from qed error recovery */
# define QEDF_MODE_RECOVERY 1
# define SUPPORTED_25000baseKR_Full (1<<27)
# define SUPPORTED_50000baseKR2_Full (1<<28)
# define SUPPORTED_100000baseKR4_Full (1<<29)
# define SUPPORTED_100000baseCR4_Full (1<<30)
# endif