scsi: lpfc: correct oversubscription of nvme io requests for an adapter

Under large configurations, the driver would start to log message 6065 -
NVME out of buffers (exchanges).

The driver is using the ndlp cmd_qdepth value when determining the max
outstanding ios for an adapter. This value, by default, is set to 65536,
which exceeds the maximum exchange counts supported on an adapter. The ndlp
cmd_qdepth has no relevance and outstanding io count should be capped at
the max exchange count with IO requests beyond that level getting bounced
back with an EBUSY status so that they are retried by the block layer.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
James Smart 2018-05-24 21:08:58 -07:00 committed by Martin K. Petersen
parent dc19e3b4a8
commit 4d5e789a2e
3 changed files with 32 additions and 4 deletions

View File

@ -297,6 +297,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n"); len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
len += snprintf(buf + len, PAGE_SIZE - len,
"XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
phba->brd_no,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.nvme_xri_max,
phba->sli4_hba.scsi_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
/* Port state is only one of two values for now. */ /* Port state is only one of two values for now. */
if (localport->port_id) if (localport->port_id)

View File

@ -1982,6 +1982,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_disc, nvpr)) if (bf_get_be32(prli_disc, nvpr))
ndlp->nlp_type |= NLP_NVME_DISCOVERY; ndlp->nlp_type |= NLP_NVME_DISCOVERY;
/* This node is an NVME target. Adjust the command
* queue depth on this node to not exceed the available
* xris.
*/
ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max;
/* /*
* If prli_fba is set, the Target supports FirstBurst. * If prli_fba is set, the Target supports FirstBurst.
* If prli_fb_sz is 0, the FirstBurst size is unlimited, * If prli_fb_sz is 0, the FirstBurst size is unlimited,

View File

@ -973,9 +973,22 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
/* Sanity check on return of outstanding command */ /* Sanity check on return of outstanding command */
if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) { if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
if (!lpfc_ncmd) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NODE | LOG_NVME_IOERR,
"6071 Null lpfc_ncmd pointer. No "
"release, skip completion\n");
return;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
"6071 Completion pointers bad on wqe %p.\n", "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
wcqe); "nvmeCmd %p nrport %p\n",
lpfc_ncmd, lpfc_ncmd->nvmeCmd,
lpfc_ncmd->nrport);
/* Release the lpfc_ncmd regardless of the missing elements. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return; return;
} }
nCmd = lpfc_ncmd->nvmeCmd; nCmd = lpfc_ncmd->nvmeCmd;
@ -1537,8 +1550,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
!expedite) { !expedite) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6174 Fail IO, ndlp qdepth exceeded: " "6174 Fail IO, ndlp qdepth exceeded: "
"idx %d DID %x\n", "idx %d DID %x pend %d qdepth %d\n",
lpfc_queue_info->index, ndlp->nlp_DID); lpfc_queue_info->index, ndlp->nlp_DID,
atomic_read(&ndlp->cmd_pending),
ndlp->cmd_qdepth);
atomic_inc(&lport->xmt_fcp_qdepth); atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY; ret = -EBUSY;
goto out_fail; goto out_fail;