Merge patch series "lpfc: Update lpfc to revision 14.2.0.12"
Justin Tee <justintee8345@gmail.com> says: Update lpfc to revision 14.2.0.12 This patch set contains fixes flagged by code analyzer tools, introduces a new CQE status to handle DMA errors, and replaces the usage of blk interrupts with threaded interrupts. The patches were cut against Martin's 6.4/scsi-queue tree. Link: https://lore.kernel.org/r/20230417191558.83100-1-justintee8345@gmail.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
808e87a511
@ -5858,8 +5858,8 @@ int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
|
||||
module_param(lpfc_fabric_cgn_frequency, int, 0444);
|
||||
MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
|
||||
|
||||
int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
|
||||
module_param(lpfc_acqe_cgn_frequency, int, 0444);
|
||||
unsigned char lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
|
||||
module_param(lpfc_acqe_cgn_frequency, byte, 0444);
|
||||
MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
|
||||
|
||||
int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */
|
||||
|
@ -134,7 +134,6 @@ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
|
||||
struct lpfc_nodelist *ndlp);
|
||||
void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_iocbq *rspiocb);
|
||||
int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
|
||||
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
|
||||
void lpfc_disc_list_loopmap(struct lpfc_vport *);
|
||||
void lpfc_disc_start(struct lpfc_vport *);
|
||||
@ -248,6 +247,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
|
||||
irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
|
||||
irqreturn_t lpfc_sli4_intr_handler(int, void *);
|
||||
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
|
||||
irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id);
|
||||
|
||||
int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
|
||||
uint32_t len);
|
||||
@ -664,7 +664,7 @@ extern int lpfc_enable_nvmet_cnt;
|
||||
extern unsigned long long lpfc_enable_nvmet[];
|
||||
extern int lpfc_no_hba_reset_cnt;
|
||||
extern unsigned long lpfc_no_hba_reset[];
|
||||
extern int lpfc_acqe_cgn_frequency;
|
||||
extern unsigned char lpfc_acqe_cgn_frequency;
|
||||
extern int lpfc_fabric_cgn_frequency;
|
||||
extern int lpfc_use_cgn_signal;
|
||||
|
||||
|
@ -5205,14 +5205,9 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
|
||||
*
|
||||
* This routine is the completion callback function to the Logout (LOGO)
|
||||
* Accept (ACC) Response ELS command. This routine is invoked to indicate
|
||||
* the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
|
||||
* release the ndlp if it has the last reference remaining (reference count
|
||||
* is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
|
||||
* field to NULL to inform the following lpfc_els_free_iocb() routine no
|
||||
* ndlp reference count needs to be decremented. Otherwise, the ndlp
|
||||
* reference use-count shall be decremented by the lpfc_els_free_iocb()
|
||||
* routine. Finally, the lpfc_els_free_iocb() is invoked to release the
|
||||
* IOCB data structure.
|
||||
* the completion of the LOGO process. If the node has transitioned to NPR,
|
||||
* this routine unregisters the RPI if it is still registered. The
|
||||
* lpfc_els_free_iocb() is invoked to release the IOCB data structure.
|
||||
**/
|
||||
static void
|
||||
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
@ -5253,19 +5248,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
(ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
|
||||
goto out;
|
||||
|
||||
/* NPort Recovery mode or node is just allocated */
|
||||
if (!lpfc_nlp_not_used(ndlp)) {
|
||||
/* A LOGO is completing and the node is in NPR state.
|
||||
* Just unregister the RPI because the node is still
|
||||
* required.
|
||||
*/
|
||||
if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
|
||||
lpfc_unreg_rpi(vport, ndlp);
|
||||
} else {
|
||||
/* Indicate the node has already released, should
|
||||
* not reference to it from within lpfc_els_free_iocb.
|
||||
*/
|
||||
cmdiocb->ndlp = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
out:
|
||||
/*
|
||||
@ -5285,9 +5270,8 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
* RPI (Remote Port Index) mailbox command to the @phba. It simply releases
|
||||
* the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
|
||||
* decrements the ndlp reference count held for this completion callback
|
||||
* function. After that, it invokes the lpfc_nlp_not_used() to check
|
||||
* whether there is only one reference left on the ndlp. If so, it will
|
||||
* perform one more decrement and trigger the release of the ndlp.
|
||||
* function. After that, it invokes the lpfc_drop_node to check
|
||||
* whether it is appropriate to release the node.
|
||||
**/
|
||||
void
|
||||
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
|
@ -4333,13 +4333,14 @@ out:
|
||||
|
||||
/* If the node is not registered with the scsi or nvme
|
||||
* transport, remove the fabric node. The failed reg_login
|
||||
* is terminal.
|
||||
* is terminal and forces the removal of the last node
|
||||
* reference.
|
||||
*/
|
||||
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
|
||||
spin_lock_irq(&ndlp->lock);
|
||||
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
|
||||
spin_unlock_irq(&ndlp->lock);
|
||||
lpfc_nlp_not_used(ndlp);
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
|
||||
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
|
||||
@ -6704,25 +6705,6 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
|
||||
return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
|
||||
}
|
||||
|
||||
/* This routine free's the specified nodelist if it is not in use
|
||||
* by any other discovery thread. This routine returns 1 if the
|
||||
* ndlp has been freed. A return value of 0 indicates the ndlp is
|
||||
* not yet been released.
|
||||
*/
|
||||
int
|
||||
lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
|
||||
"node not used: did:x%x flg:x%x refcnt:x%x",
|
||||
ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref));
|
||||
|
||||
if (kref_read(&ndlp->kref) == 1)
|
||||
if (lpfc_nlp_put(ndlp))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_fcf_inuse - Check if FCF can be unregistered.
|
||||
* @phba: Pointer to hba context object.
|
||||
|
@ -536,9 +536,9 @@ struct sli4_wcqe_xri_aborted {
|
||||
/* completion queue entry structure for rqe completion */
|
||||
struct lpfc_rcqe {
|
||||
uint32_t word0;
|
||||
#define lpfc_rcqe_bindex_SHIFT 16
|
||||
#define lpfc_rcqe_bindex_MASK 0x0000FFF
|
||||
#define lpfc_rcqe_bindex_WORD word0
|
||||
#define lpfc_rcqe_iv_SHIFT 31
|
||||
#define lpfc_rcqe_iv_MASK 0x00000001
|
||||
#define lpfc_rcqe_iv_WORD word0
|
||||
#define lpfc_rcqe_status_SHIFT 8
|
||||
#define lpfc_rcqe_status_MASK 0x000000FF
|
||||
#define lpfc_rcqe_status_WORD word0
|
||||
@ -546,6 +546,7 @@ struct lpfc_rcqe {
|
||||
#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
|
||||
#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
|
||||
#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
|
||||
#define FC_STATUS_RQ_DMA_FAILURE 0x14 /* DMA failure */
|
||||
uint32_t word1;
|
||||
#define lpfc_rcqe_fcf_id_v1_SHIFT 0
|
||||
#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
|
||||
@ -4813,8 +4814,8 @@ struct cmf_sync_wqe {
|
||||
#define cmf_sync_cqid_WORD word11
|
||||
uint32_t read_bytes;
|
||||
uint32_t word13;
|
||||
#define cmf_sync_period_SHIFT 16
|
||||
#define cmf_sync_period_MASK 0x0000ffff
|
||||
#define cmf_sync_period_SHIFT 24
|
||||
#define cmf_sync_period_MASK 0x000000ff
|
||||
#define cmf_sync_period_WORD word13
|
||||
uint32_t word14;
|
||||
uint32_t word15;
|
||||
|
@ -1279,7 +1279,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
|
||||
/*
|
||||
* lpfc_idle_stat_delay_work - idle_stat tracking
|
||||
*
|
||||
* This routine tracks per-cq idle_stat and determines polling decisions.
|
||||
* This routine tracks per-eq idle_stat and determines polling decisions.
|
||||
*
|
||||
* Return codes:
|
||||
* None
|
||||
@ -1290,7 +1290,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
|
||||
struct lpfc_hba *phba = container_of(to_delayed_work(work),
|
||||
struct lpfc_hba,
|
||||
idle_stat_delay_work);
|
||||
struct lpfc_queue *cq;
|
||||
struct lpfc_queue *eq;
|
||||
struct lpfc_sli4_hdw_queue *hdwq;
|
||||
struct lpfc_idle_stat *idle_stat;
|
||||
u32 i, idle_percent;
|
||||
@ -1306,10 +1306,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
|
||||
|
||||
for_each_present_cpu(i) {
|
||||
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
|
||||
cq = hdwq->io_cq;
|
||||
eq = hdwq->hba_eq;
|
||||
|
||||
/* Skip if we've already handled this cq's primary CPU */
|
||||
if (cq->chann != i)
|
||||
/* Skip if we've already handled this eq's primary CPU */
|
||||
if (eq->chann != i)
|
||||
continue;
|
||||
|
||||
idle_stat = &phba->sli4_hba.idle_stat[i];
|
||||
@ -1333,9 +1333,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
|
||||
idle_percent = 100 - idle_percent;
|
||||
|
||||
if (idle_percent < 15)
|
||||
cq->poll_mode = LPFC_QUEUE_WORK;
|
||||
eq->poll_mode = LPFC_QUEUE_WORK;
|
||||
else
|
||||
cq->poll_mode = LPFC_IRQ_POLL;
|
||||
eq->poll_mode = LPFC_THREADED_IRQ;
|
||||
|
||||
idle_stat->prev_idle = wall_idle;
|
||||
idle_stat->prev_wall = wall;
|
||||
@ -4357,6 +4357,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
|
||||
struct lpfc_sli4_hdw_queue *qp;
|
||||
struct lpfc_io_buf *lpfc_cmd;
|
||||
int idx, cnt;
|
||||
unsigned long iflags;
|
||||
|
||||
qp = phba->sli4_hba.hdwq;
|
||||
cnt = 0;
|
||||
@ -4371,12 +4372,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
|
||||
lpfc_cmd->hdwq_no = idx;
|
||||
lpfc_cmd->hdwq = qp;
|
||||
lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
|
||||
spin_lock(&qp->io_buf_list_put_lock);
|
||||
spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
|
||||
list_add_tail(&lpfc_cmd->list,
|
||||
&qp->lpfc_io_buf_list_put);
|
||||
qp->put_io_bufs++;
|
||||
qp->total_io_bufs++;
|
||||
spin_unlock(&qp->io_buf_list_put_lock);
|
||||
spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
|
||||
iflags);
|
||||
}
|
||||
}
|
||||
return cnt;
|
||||
@ -13117,8 +13119,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
}
|
||||
eqhdl->irq = rc;
|
||||
|
||||
rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
|
||||
name, eqhdl);
|
||||
rc = request_threaded_irq(eqhdl->irq,
|
||||
&lpfc_sli4_hba_intr_handler,
|
||||
&lpfc_sli4_hba_intr_handler_th,
|
||||
IRQF_ONESHOT, name, eqhdl);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"0486 MSI-X fast-path (%d) "
|
||||
|
@ -1893,13 +1893,30 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
pnvme_rport->port_id,
|
||||
pnvme_fcreq);
|
||||
|
||||
lpfc_nbuf = freqpriv->nvme_buf;
|
||||
if (!lpfc_nbuf) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"6140 NVME IO req has no matching lpfc nvme "
|
||||
"io buffer. Skipping abort req.\n");
|
||||
return;
|
||||
} else if (!lpfc_nbuf->nvmeCmd) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"6141 lpfc NVME IO req has no nvme_fcreq "
|
||||
"io buffer. Skipping abort req.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Guard against IO completion being called at same time */
|
||||
spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags);
|
||||
|
||||
/* If the hba is getting reset, this flag is set. It is
|
||||
* cleared when the reset is complete and rings reestablished.
|
||||
*/
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
spin_lock(&phba->hbalock);
|
||||
/* driver queued commands are in process of being flushed */
|
||||
if (phba->hba_flag & HBA_IOQ_FLUSH) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
spin_unlock(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"6139 Driver in reset cleanup - flushing "
|
||||
"NVME Req now. hba_flag x%x\n",
|
||||
@ -1907,25 +1924,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
return;
|
||||
}
|
||||
|
||||
lpfc_nbuf = freqpriv->nvme_buf;
|
||||
if (!lpfc_nbuf) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"6140 NVME IO req has no matching lpfc nvme "
|
||||
"io buffer. Skipping abort req.\n");
|
||||
return;
|
||||
} else if (!lpfc_nbuf->nvmeCmd) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"6141 lpfc NVME IO req has no nvme_fcreq "
|
||||
"io buffer. Skipping abort req.\n");
|
||||
return;
|
||||
}
|
||||
nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
|
||||
|
||||
/* Guard against IO completion being called at same time */
|
||||
spin_lock(&lpfc_nbuf->buf_lock);
|
||||
|
||||
/*
|
||||
* The lpfc_nbuf and the mapped nvme_fcreq in the driver's
|
||||
* state must match the nvme_fcreq passed by the nvme
|
||||
@ -1971,8 +1971,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
|
||||
lpfc_nvme_abort_fcreq_cmpl);
|
||||
|
||||
spin_unlock(&lpfc_nbuf->buf_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
spin_unlock(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
|
||||
|
||||
/* Make sure HBA is alive */
|
||||
lpfc_issue_hb_tmo(phba);
|
||||
@ -1998,8 +1998,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
return;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&lpfc_nbuf->buf_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
spin_unlock(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -4273,7 +4273,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
"x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
|
||||
cmd->device->id, cmd->device->lun, cmd,
|
||||
cmd->result, *lp, *(lp + 3),
|
||||
(u64)scsi_get_lba(cmd),
|
||||
(cmd->device->sector_size) ?
|
||||
(u64)scsi_get_lba(cmd) : 0,
|
||||
cmd->retries, scsi_get_resid(cmd));
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,8 @@ static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
|
||||
int);
|
||||
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
|
||||
struct lpfc_queue *eq,
|
||||
struct lpfc_eqe *eqe);
|
||||
struct lpfc_eqe *eqe,
|
||||
enum lpfc_poll_mode poll_mode);
|
||||
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
|
||||
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
|
||||
static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
|
||||
@ -629,7 +630,7 @@ lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
|
||||
|
||||
static int
|
||||
lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
|
||||
uint8_t rearm)
|
||||
u8 rearm, enum lpfc_poll_mode poll_mode)
|
||||
{
|
||||
struct lpfc_eqe *eqe;
|
||||
int count = 0, consumed = 0;
|
||||
@ -639,7 +640,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
|
||||
|
||||
eqe = lpfc_sli4_eq_get(eq);
|
||||
while (eqe) {
|
||||
lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
|
||||
lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
|
||||
__lpfc_sli4_consume_eqe(phba, eq, eqe);
|
||||
|
||||
consumed++;
|
||||
@ -1931,7 +1932,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
|
||||
unsigned long iflags;
|
||||
u32 ret_val;
|
||||
u32 atot, wtot, max;
|
||||
u16 warn_sync_period = 0;
|
||||
u8 warn_sync_period = 0;
|
||||
|
||||
/* First address any alarm / warning activity */
|
||||
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
|
||||
@ -7957,7 +7958,7 @@ out_rdf:
|
||||
* lpfc_init_idle_stat_hb - Initialize idle_stat tracking
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine initializes the per-cq idle_stat to dynamically dictate
|
||||
* This routine initializes the per-eq idle_stat to dynamically dictate
|
||||
* polling decisions.
|
||||
*
|
||||
* Return codes:
|
||||
@ -7967,16 +7968,16 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
|
||||
{
|
||||
int i;
|
||||
struct lpfc_sli4_hdw_queue *hdwq;
|
||||
struct lpfc_queue *cq;
|
||||
struct lpfc_queue *eq;
|
||||
struct lpfc_idle_stat *idle_stat;
|
||||
u64 wall;
|
||||
|
||||
for_each_present_cpu(i) {
|
||||
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
|
||||
cq = hdwq->io_cq;
|
||||
eq = hdwq->hba_eq;
|
||||
|
||||
/* Skip if we've already handled this cq's primary CPU */
|
||||
if (cq->chann != i)
|
||||
/* Skip if we've already handled this eq's primary CPU */
|
||||
if (eq->chann != i)
|
||||
continue;
|
||||
|
||||
idle_stat = &phba->sli4_hba.idle_stat[i];
|
||||
@ -7985,13 +7986,14 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
|
||||
idle_stat->prev_wall = wall;
|
||||
|
||||
if (phba->nvmet_support ||
|
||||
phba->cmf_active_mode != LPFC_CFG_OFF)
|
||||
cq->poll_mode = LPFC_QUEUE_WORK;
|
||||
phba->cmf_active_mode != LPFC_CFG_OFF ||
|
||||
phba->intr_type != MSIX)
|
||||
eq->poll_mode = LPFC_QUEUE_WORK;
|
||||
else
|
||||
cq->poll_mode = LPFC_IRQ_POLL;
|
||||
eq->poll_mode = LPFC_THREADED_IRQ;
|
||||
}
|
||||
|
||||
if (!phba->nvmet_support)
|
||||
if (!phba->nvmet_support && phba->intr_type == MSIX)
|
||||
schedule_delayed_work(&phba->idle_stat_delay_work,
|
||||
msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
|
||||
}
|
||||
@ -9218,7 +9220,8 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
|
||||
|
||||
if (mbox_pending)
|
||||
/* process and rearm the EQ */
|
||||
lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
|
||||
lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
|
||||
LPFC_QUEUE_WORK);
|
||||
else
|
||||
/* Always clear and re-arm the EQ */
|
||||
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
|
||||
@ -11254,7 +11257,8 @@ inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
|
||||
* will be handled through a sched from polling timer
|
||||
* function which is currently triggered every 1msec.
|
||||
*/
|
||||
lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
|
||||
lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
|
||||
LPFC_QUEUE_WORK);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -14682,6 +14686,38 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
workposted = true;
|
||||
break;
|
||||
case FC_STATUS_RQ_DMA_FAILURE:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"2564 RQE DMA Error x%x, x%08x x%08x x%08x "
|
||||
"x%08x\n",
|
||||
status, rcqe->word0, rcqe->word1,
|
||||
rcqe->word2, rcqe->word3);
|
||||
|
||||
/* If IV set, no further recovery */
|
||||
if (bf_get(lpfc_rcqe_iv, rcqe))
|
||||
break;
|
||||
|
||||
/* recycle consumed resource */
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
lpfc_sli4_rq_release(hrq, drq);
|
||||
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
|
||||
if (!dma_buf) {
|
||||
hrq->RQ_no_buf_found++;
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
break;
|
||||
}
|
||||
hrq->RQ_rcv_buf++;
|
||||
hrq->RQ_buf_posted--;
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
lpfc_in_buf_free(phba, &dma_buf->dbuf);
|
||||
break;
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"2565 Unexpected RQE Status x%x, w0-3 x%08x "
|
||||
"x%08x x%08x x%08x\n",
|
||||
status, rcqe->word0, rcqe->word1,
|
||||
rcqe->word2, rcqe->word3);
|
||||
break;
|
||||
}
|
||||
out:
|
||||
return workposted;
|
||||
@ -14803,7 +14839,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
|
||||
* @cq: Pointer to CQ to be processed
|
||||
* @handler: Routine to process each cqe
|
||||
* @delay: Pointer to usdelay to set in case of rescheduling of the handler
|
||||
* @poll_mode: Polling mode we were called from
|
||||
*
|
||||
* This routine processes completion queue entries in a CQ. While a valid
|
||||
* queue element is found, the handler is called. During processing checks
|
||||
@ -14821,8 +14856,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
|
||||
static bool
|
||||
__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
|
||||
struct lpfc_cqe *), unsigned long *delay,
|
||||
enum lpfc_poll_mode poll_mode)
|
||||
struct lpfc_cqe *), unsigned long *delay)
|
||||
{
|
||||
struct lpfc_cqe *cqe;
|
||||
bool workposted = false;
|
||||
@ -14863,10 +14897,6 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
arm = false;
|
||||
}
|
||||
|
||||
/* Note: complete the irq_poll softirq before rearming CQ */
|
||||
if (poll_mode == LPFC_IRQ_POLL)
|
||||
irq_poll_complete(&cq->iop);
|
||||
|
||||
/* Track the max number of CQEs processed in 1 EQ */
|
||||
if (count > cq->CQ_max_cqe)
|
||||
cq->CQ_max_cqe = count;
|
||||
@ -14916,17 +14946,17 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
|
||||
case LPFC_MCQ:
|
||||
workposted |= __lpfc_sli4_process_cq(phba, cq,
|
||||
lpfc_sli4_sp_handle_mcqe,
|
||||
&delay, LPFC_QUEUE_WORK);
|
||||
&delay);
|
||||
break;
|
||||
case LPFC_WCQ:
|
||||
if (cq->subtype == LPFC_IO)
|
||||
workposted |= __lpfc_sli4_process_cq(phba, cq,
|
||||
lpfc_sli4_fp_handle_cqe,
|
||||
&delay, LPFC_QUEUE_WORK);
|
||||
&delay);
|
||||
else
|
||||
workposted |= __lpfc_sli4_process_cq(phba, cq,
|
||||
lpfc_sli4_sp_handle_cqe,
|
||||
&delay, LPFC_QUEUE_WORK);
|
||||
&delay);
|
||||
break;
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
@ -15203,6 +15233,38 @@ drop:
|
||||
hrq->RQ_no_posted_buf++;
|
||||
/* Post more buffers if possible */
|
||||
break;
|
||||
case FC_STATUS_RQ_DMA_FAILURE:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"2575 RQE DMA Error x%x, x%08x x%08x x%08x "
|
||||
"x%08x\n",
|
||||
status, rcqe->word0, rcqe->word1,
|
||||
rcqe->word2, rcqe->word3);
|
||||
|
||||
/* If IV set, no further recovery */
|
||||
if (bf_get(lpfc_rcqe_iv, rcqe))
|
||||
break;
|
||||
|
||||
/* recycle consumed resource */
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
lpfc_sli4_rq_release(hrq, drq);
|
||||
dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
|
||||
if (!dma_buf) {
|
||||
hrq->RQ_no_buf_found++;
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
break;
|
||||
}
|
||||
hrq->RQ_rcv_buf++;
|
||||
hrq->RQ_buf_posted--;
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
lpfc_rq_buf_free(phba, &dma_buf->hbuf);
|
||||
break;
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"2576 Unexpected RQE Status x%x, w0-3 x%08x "
|
||||
"x%08x x%08x x%08x\n",
|
||||
status, rcqe->word0, rcqe->word1,
|
||||
rcqe->word2, rcqe->word3);
|
||||
break;
|
||||
}
|
||||
out:
|
||||
return workposted;
|
||||
@ -15271,45 +15333,64 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_sched_cq_work - Schedules cq work
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @cq: Pointer to CQ
|
||||
* @cqid: CQ ID
|
||||
* __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
|
||||
* @cq: Pointer to CQ to be processed
|
||||
*
|
||||
* This routine checks the poll mode of the CQ corresponding to
|
||||
* cq->chann, then either schedules a softirq or queue_work to complete
|
||||
* cq work.
|
||||
*
|
||||
* queue_work path is taken if in NVMET mode, or if poll_mode is in
|
||||
* LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
|
||||
* This routine calls the cq processing routine with the handler for
|
||||
* fast path CQEs.
|
||||
*
|
||||
* The CQ routine returns two values: the first is the calling status,
|
||||
* which indicates whether work was queued to the background discovery
|
||||
* thread. If true, the routine should wakeup the discovery thread;
|
||||
* the second is the delay parameter. If non-zero, rather than rearming
|
||||
* the CQ and yet another interrupt, the CQ handler should be queued so
|
||||
* that it is processed in a subsequent polling action. The value of
|
||||
* the delay indicates when to reschedule it.
|
||||
**/
|
||||
static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
|
||||
struct lpfc_queue *cq, uint16_t cqid)
|
||||
static void
|
||||
__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
|
||||
{
|
||||
int ret = 0;
|
||||
struct lpfc_hba *phba = cq->phba;
|
||||
unsigned long delay;
|
||||
bool workposted = false;
|
||||
int ret;
|
||||
|
||||
switch (cq->poll_mode) {
|
||||
case LPFC_IRQ_POLL:
|
||||
/* CGN mgmt is mutually exclusive from softirq processing */
|
||||
if (phba->cmf_active_mode == LPFC_CFG_OFF) {
|
||||
irq_poll_sched(&cq->iop);
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
case LPFC_QUEUE_WORK:
|
||||
default:
|
||||
/* process and rearm the CQ */
|
||||
workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
|
||||
&delay);
|
||||
|
||||
if (delay) {
|
||||
if (is_kdump_kernel())
|
||||
ret = queue_work(phba->wq, &cq->irqwork);
|
||||
ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
|
||||
delay);
|
||||
else
|
||||
ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
|
||||
ret = queue_delayed_work_on(cq->chann, phba->wq,
|
||||
&cq->sched_irqwork, delay);
|
||||
if (!ret)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"0383 Cannot schedule queue work "
|
||||
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
|
||||
cqid, cq->queue_id,
|
||||
raw_smp_processor_id());
|
||||
"0367 Cannot schedule queue work "
|
||||
"for cqid=%d on CPU %d\n",
|
||||
cq->queue_id, cq->chann);
|
||||
}
|
||||
|
||||
/* wake up worker thread if there are works to be done */
|
||||
if (workposted)
|
||||
lpfc_worker_wake_up(phba);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_hba_process_cq - fast-path work handler when started by
|
||||
* interrupt
|
||||
* @work: pointer to work element
|
||||
*
|
||||
* translates from the work handler and calls the fast-path handler.
|
||||
**/
|
||||
static void
|
||||
lpfc_sli4_hba_process_cq(struct work_struct *work)
|
||||
{
|
||||
struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
|
||||
|
||||
__lpfc_sli4_hba_process_cq(cq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -15317,6 +15398,7 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @eq: Pointer to the queue structure.
|
||||
* @eqe: Pointer to fast-path event queue entry.
|
||||
* @poll_mode: poll_mode to execute processing the cq.
|
||||
*
|
||||
* This routine process a event queue entry from the fast-path event queue.
|
||||
* It will check the MajorCode and MinorCode to determine this is for a
|
||||
@ -15327,11 +15409,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
|
||||
**/
|
||||
static void
|
||||
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
|
||||
struct lpfc_eqe *eqe)
|
||||
struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
|
||||
{
|
||||
struct lpfc_queue *cq = NULL;
|
||||
uint32_t qidx = eq->hdwq;
|
||||
uint16_t cqid, id;
|
||||
int ret;
|
||||
|
||||
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
@ -15391,70 +15474,25 @@ work_cq:
|
||||
else
|
||||
cq->isr_timestamp = 0;
|
||||
#endif
|
||||
lpfc_sli4_sched_cq_work(phba, cq, cqid);
|
||||
}
|
||||
|
||||
/**
|
||||
* __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
|
||||
* @cq: Pointer to CQ to be processed
|
||||
* @poll_mode: Enum lpfc_poll_state to determine poll mode
|
||||
*
|
||||
* This routine calls the cq processing routine with the handler for
|
||||
* fast path CQEs.
|
||||
*
|
||||
* The CQ routine returns two values: the first is the calling status,
|
||||
* which indicates whether work was queued to the background discovery
|
||||
* thread. If true, the routine should wakeup the discovery thread;
|
||||
* the second is the delay parameter. If non-zero, rather than rearming
|
||||
* the CQ and yet another interrupt, the CQ handler should be queued so
|
||||
* that it is processed in a subsequent polling action. The value of
|
||||
* the delay indicates when to reschedule it.
|
||||
**/
|
||||
static void
|
||||
__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
|
||||
enum lpfc_poll_mode poll_mode)
|
||||
{
|
||||
struct lpfc_hba *phba = cq->phba;
|
||||
unsigned long delay;
|
||||
bool workposted = false;
|
||||
int ret = 0;
|
||||
|
||||
/* process and rearm the CQ */
|
||||
workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
|
||||
&delay, poll_mode);
|
||||
|
||||
if (delay) {
|
||||
switch (poll_mode) {
|
||||
case LPFC_THREADED_IRQ:
|
||||
__lpfc_sli4_hba_process_cq(cq);
|
||||
break;
|
||||
case LPFC_QUEUE_WORK:
|
||||
default:
|
||||
if (is_kdump_kernel())
|
||||
ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
|
||||
delay);
|
||||
ret = queue_work(phba->wq, &cq->irqwork);
|
||||
else
|
||||
ret = queue_delayed_work_on(cq->chann, phba->wq,
|
||||
&cq->sched_irqwork, delay);
|
||||
ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
|
||||
if (!ret)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"0367 Cannot schedule queue work "
|
||||
"for cqid=%d on CPU %d\n",
|
||||
cq->queue_id, cq->chann);
|
||||
"0383 Cannot schedule queue work "
|
||||
"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
|
||||
cqid, cq->queue_id,
|
||||
raw_smp_processor_id());
|
||||
break;
|
||||
}
|
||||
|
||||
/* wake up worker thread if there are works to be done */
|
||||
if (workposted)
|
||||
lpfc_worker_wake_up(phba);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_hba_process_cq - fast-path work handler when started by
|
||||
* interrupt
|
||||
* @work: pointer to work element
|
||||
*
|
||||
* translates from the work handler and calls the fast-path handler.
|
||||
**/
|
||||
static void
|
||||
lpfc_sli4_hba_process_cq(struct work_struct *work)
|
||||
{
|
||||
struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
|
||||
|
||||
__lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -15469,7 +15507,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
|
||||
struct lpfc_queue *cq = container_of(to_delayed_work(work),
|
||||
struct lpfc_queue, sched_irqwork);
|
||||
|
||||
__lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
|
||||
__lpfc_sli4_hba_process_cq(cq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -15495,8 +15533,9 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
|
||||
* and returns for these events. This function is called without any lock
|
||||
* held. It gets the hbalock to access and update SLI data structures.
|
||||
*
|
||||
* This function returns IRQ_HANDLED when interrupt is handled else it
|
||||
* returns IRQ_NONE.
|
||||
* This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
|
||||
* when interrupt is scheduled to be handled from a threaded irq context, or
|
||||
* else returns IRQ_NONE.
|
||||
**/
|
||||
irqreturn_t
|
||||
lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
|
||||
@ -15505,8 +15544,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
|
||||
struct lpfc_hba_eq_hdl *hba_eq_hdl;
|
||||
struct lpfc_queue *fpeq;
|
||||
unsigned long iflag;
|
||||
int ecount = 0;
|
||||
int hba_eqidx;
|
||||
int ecount = 0;
|
||||
struct lpfc_eq_intr_info *eqi;
|
||||
|
||||
/* Get the driver's phba structure from the dev_id */
|
||||
@ -15535,30 +15574,41 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
|
||||
eqi->icnt++;
|
||||
switch (fpeq->poll_mode) {
|
||||
case LPFC_THREADED_IRQ:
|
||||
/* CGN mgmt is mutually exclusive from irq processing */
|
||||
if (phba->cmf_active_mode == LPFC_CFG_OFF)
|
||||
return IRQ_WAKE_THREAD;
|
||||
fallthrough;
|
||||
case LPFC_QUEUE_WORK:
|
||||
default:
|
||||
eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
|
||||
eqi->icnt++;
|
||||
|
||||
fpeq->last_cpu = raw_smp_processor_id();
|
||||
fpeq->last_cpu = raw_smp_processor_id();
|
||||
|
||||
if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
|
||||
fpeq->q_flag & HBA_EQ_DELAY_CHK &&
|
||||
phba->cfg_auto_imax &&
|
||||
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
|
||||
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
|
||||
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
|
||||
if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
|
||||
fpeq->q_flag & HBA_EQ_DELAY_CHK &&
|
||||
phba->cfg_auto_imax &&
|
||||
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
|
||||
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
|
||||
lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
|
||||
LPFC_MAX_AUTO_EQ_DELAY);
|
||||
|
||||
/* process and rearm the EQ */
|
||||
ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
|
||||
/* process and rearm the EQ */
|
||||
ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
|
||||
LPFC_QUEUE_WORK);
|
||||
|
||||
if (unlikely(ecount == 0)) {
|
||||
fpeq->EQ_no_entry++;
|
||||
if (phba->intr_type == MSIX)
|
||||
/* MSI-X treated interrupt served as no EQ share INT */
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
|
||||
"0358 MSI-X interrupt with no EQE\n");
|
||||
else
|
||||
/* Non MSI-X treated on interrupt as EQ share INT */
|
||||
return IRQ_NONE;
|
||||
if (unlikely(ecount == 0)) {
|
||||
fpeq->EQ_no_entry++;
|
||||
if (phba->intr_type == MSIX)
|
||||
/* MSI-X treated interrupt served as no EQ share INT */
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
|
||||
"0358 MSI-X interrupt with no EQE\n");
|
||||
else
|
||||
/* Non MSI-X treated on interrupt as EQ share INT */
|
||||
return IRQ_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -16115,13 +16165,69 @@ out:
|
||||
return status;
|
||||
}
|
||||
|
||||
static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
|
||||
/**
|
||||
* lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
|
||||
* @irq: Interrupt number.
|
||||
* @dev_id: The device context pointer.
|
||||
*
|
||||
* This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
|
||||
* threaded irq context.
|
||||
*
|
||||
* Returns
|
||||
* IRQ_HANDLED - interrupt is handled
|
||||
* IRQ_NONE - otherwise
|
||||
**/
|
||||
irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
|
||||
{
|
||||
struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
|
||||
struct lpfc_hba *phba;
|
||||
struct lpfc_hba_eq_hdl *hba_eq_hdl;
|
||||
struct lpfc_queue *fpeq;
|
||||
int ecount = 0;
|
||||
int hba_eqidx;
|
||||
struct lpfc_eq_intr_info *eqi;
|
||||
|
||||
__lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
|
||||
/* Get the driver's phba structure from the dev_id */
|
||||
hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
|
||||
phba = hba_eq_hdl->phba;
|
||||
hba_eqidx = hba_eq_hdl->idx;
|
||||
|
||||
return 1;
|
||||
if (unlikely(!phba))
|
||||
return IRQ_NONE;
|
||||
if (unlikely(!phba->sli4_hba.hdwq))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* Get to the EQ struct associated with this vector */
|
||||
fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
|
||||
if (unlikely(!fpeq))
|
||||
return IRQ_NONE;
|
||||
|
||||
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
|
||||
eqi->icnt++;
|
||||
|
||||
fpeq->last_cpu = raw_smp_processor_id();
|
||||
|
||||
if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
|
||||
fpeq->q_flag & HBA_EQ_DELAY_CHK &&
|
||||
phba->cfg_auto_imax &&
|
||||
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
|
||||
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
|
||||
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
|
||||
|
||||
/* process and rearm the EQ */
|
||||
ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
|
||||
LPFC_THREADED_IRQ);
|
||||
|
||||
if (unlikely(ecount == 0)) {
|
||||
fpeq->EQ_no_entry++;
|
||||
if (phba->intr_type == MSIX)
|
||||
/* MSI-X treated interrupt served as no EQ share INT */
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
|
||||
"3358 MSI-X interrupt with no EQE\n");
|
||||
else
|
||||
/* Non MSI-X treated on interrupt as EQ share INT */
|
||||
return IRQ_NONE;
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -16265,8 +16371,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
|
||||
if (cq->queue_id > phba->sli4_hba.cq_max)
|
||||
phba->sli4_hba.cq_max = cq->queue_id;
|
||||
|
||||
irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
|
||||
out:
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
return status;
|
||||
|
@ -140,7 +140,7 @@ struct lpfc_rqb {
|
||||
|
||||
enum lpfc_poll_mode {
|
||||
LPFC_QUEUE_WORK,
|
||||
LPFC_IRQ_POLL
|
||||
LPFC_THREADED_IRQ,
|
||||
};
|
||||
|
||||
struct lpfc_idle_stat {
|
||||
@ -279,8 +279,6 @@ struct lpfc_queue {
|
||||
struct list_head _poll_list;
|
||||
void **q_pgs; /* array to index entries per page */
|
||||
|
||||
#define LPFC_IRQ_POLL_WEIGHT 256
|
||||
struct irq_poll iop;
|
||||
enum lpfc_poll_mode poll_mode;
|
||||
};
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "14.2.0.11"
|
||||
#define LPFC_DRIVER_VERSION "14.2.0.12"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
Loading…
x
Reference in New Issue
Block a user