[SCSI] lpfc 8.3.2 : Addition of SLI4 Interface - FCOE Discovery support
SLI4 supports both FC and FCOE, with some extended topology objects. This patch adss support for the objects, and updates the disovery engines for their use. Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
This commit is contained in:
parent
04c6849684
commit
6fb120a7ed
@ -2924,6 +2924,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
|
|||||||
*/
|
*/
|
||||||
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
|
LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
|
||||||
|
|
||||||
|
/*
|
||||||
|
# lpfc_enable_fip: When set, FIP is required to start discovery. If not
|
||||||
|
# set, the driver will add an FCF record manually if the port has no
|
||||||
|
# FCF records available and start discovery.
|
||||||
|
# Value range is [0,1]. Default value is 1 (enabled)
|
||||||
|
*/
|
||||||
|
LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
# lpfc_prot_mask: i
|
# lpfc_prot_mask: i
|
||||||
@ -2990,6 +2998,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
|||||||
&dev_attr_lpfc_peer_port_login,
|
&dev_attr_lpfc_peer_port_login,
|
||||||
&dev_attr_lpfc_nodev_tmo,
|
&dev_attr_lpfc_nodev_tmo,
|
||||||
&dev_attr_lpfc_devloss_tmo,
|
&dev_attr_lpfc_devloss_tmo,
|
||||||
|
&dev_attr_lpfc_enable_fip,
|
||||||
&dev_attr_lpfc_fcp_class,
|
&dev_attr_lpfc_fcp_class,
|
||||||
&dev_attr_lpfc_use_adisc,
|
&dev_attr_lpfc_use_adisc,
|
||||||
&dev_attr_lpfc_ack0,
|
&dev_attr_lpfc_ack0,
|
||||||
@ -3042,6 +3051,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
|
|||||||
&dev_attr_lpfc_lun_queue_depth,
|
&dev_attr_lpfc_lun_queue_depth,
|
||||||
&dev_attr_lpfc_nodev_tmo,
|
&dev_attr_lpfc_nodev_tmo,
|
||||||
&dev_attr_lpfc_devloss_tmo,
|
&dev_attr_lpfc_devloss_tmo,
|
||||||
|
&dev_attr_lpfc_enable_fip,
|
||||||
&dev_attr_lpfc_hba_queue_depth,
|
&dev_attr_lpfc_hba_queue_depth,
|
||||||
&dev_attr_lpfc_peer_port_login,
|
&dev_attr_lpfc_peer_port_login,
|
||||||
&dev_attr_lpfc_restrict_login,
|
&dev_attr_lpfc_restrict_login,
|
||||||
@ -4167,26 +4177,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
|||||||
phba->cfg_soft_wwpn = 0L;
|
phba->cfg_soft_wwpn = 0L;
|
||||||
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
|
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
|
||||||
lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
|
lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
|
||||||
/*
|
|
||||||
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
|
|
||||||
* used to create the sg_dma_buf_pool must be dynamically calculated.
|
|
||||||
* 2 segments are added since the IOCB needs a command and response bde.
|
|
||||||
*/
|
|
||||||
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
|
|
||||||
sizeof(struct fcp_rsp) +
|
|
||||||
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
|
|
||||||
|
|
||||||
if (phba->cfg_enable_bg) {
|
|
||||||
phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
|
|
||||||
phba->cfg_sg_dma_buf_size +=
|
|
||||||
phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Also reinitialize the host templates with new values. */
|
|
||||||
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
|
||||||
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
|
||||||
|
|
||||||
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
|
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
|
||||||
|
lpfc_enable_fip_init(phba, lpfc_enable_fip);
|
||||||
|
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
|
|||||||
struct fc_rport;
|
struct fc_rport;
|
||||||
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
|
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
|
||||||
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||||
|
void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
|
||||||
|
int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
|
||||||
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||||
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
|
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
|
||||||
|
|
||||||
@ -108,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
|
|||||||
int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
|
int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
|
||||||
int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
|
int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
|
||||||
int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
|
int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
|
||||||
|
int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
|
||||||
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
|
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
|
||||||
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
|
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
|
||||||
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
|
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
|
||||||
|
@ -387,6 +387,75 @@ fail:
|
|||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
|
||||||
|
* @vport: pointer to a host virtual N_Port data structure.
|
||||||
|
*
|
||||||
|
* This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
|
||||||
|
* the @vport. This mailbox command is necessary for FCoE only.
|
||||||
|
*
|
||||||
|
* Return code
|
||||||
|
* 0 - successfully issued REG_VFI for @vport
|
||||||
|
* A failure code otherwise.
|
||||||
|
**/
|
||||||
|
static int
|
||||||
|
lpfc_issue_reg_vfi(struct lpfc_vport *vport)
|
||||||
|
{
|
||||||
|
struct lpfc_hba *phba = vport->phba;
|
||||||
|
LPFC_MBOXQ_t *mboxq;
|
||||||
|
struct lpfc_nodelist *ndlp;
|
||||||
|
struct serv_parm *sp;
|
||||||
|
struct lpfc_dmabuf *dmabuf;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
sp = &phba->fc_fabparam;
|
||||||
|
ndlp = lpfc_findnode_did(vport, Fabric_DID);
|
||||||
|
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||||
|
rc = -ENODEV;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
|
||||||
|
if (!dmabuf) {
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
|
||||||
|
if (!dmabuf->virt) {
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto fail_free_dmabuf;
|
||||||
|
}
|
||||||
|
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||||
|
if (!mboxq) {
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto fail_free_coherent;
|
||||||
|
}
|
||||||
|
vport->port_state = LPFC_FABRIC_CFG_LINK;
|
||||||
|
memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
|
||||||
|
lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
|
||||||
|
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
|
||||||
|
mboxq->vport = vport;
|
||||||
|
mboxq->context1 = dmabuf;
|
||||||
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
|
||||||
|
if (rc == MBX_NOT_FINISHED) {
|
||||||
|
rc = -ENXIO;
|
||||||
|
goto fail_free_mbox;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
fail_free_mbox:
|
||||||
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||||
|
fail_free_coherent:
|
||||||
|
lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
|
||||||
|
fail_free_dmabuf:
|
||||||
|
kfree(dmabuf);
|
||||||
|
fail:
|
||||||
|
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
|
||||||
|
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
||||||
|
"0289 Issue Register VFI failed: Err %d\n", rc);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
|
* lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
|
||||||
* @vport: pointer to a host virtual N_Port data structure.
|
* @vport: pointer to a host virtual N_Port data structure.
|
||||||
@ -499,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
|
if (phba->sli_rev < LPFC_SLI_REV4) {
|
||||||
|
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
|
||||||
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
|
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
|
||||||
vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
|
vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
|
||||||
lpfc_register_new_vport(phba, vport, ndlp);
|
lpfc_register_new_vport(phba, vport, ndlp);
|
||||||
return 0;
|
else
|
||||||
|
lpfc_issue_fabric_reglogin(vport);
|
||||||
|
} else {
|
||||||
|
ndlp->nlp_type |= NLP_FABRIC;
|
||||||
|
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||||
|
if (vport->vfi_state & LPFC_VFI_REGISTERED) {
|
||||||
|
lpfc_start_fdiscs(phba);
|
||||||
|
lpfc_do_scr_ns_plogi(phba, vport);
|
||||||
|
} else
|
||||||
|
lpfc_issue_reg_vfi(vport);
|
||||||
}
|
}
|
||||||
lpfc_issue_fabric_reglogin(vport);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
|
* lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
|
||||||
* @vport: pointer to a host virtual N_Port data structure.
|
* @vport: pointer to a host virtual N_Port data structure.
|
||||||
@ -817,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||||||
if (sp->cmn.fcphHigh < FC_PH3)
|
if (sp->cmn.fcphHigh < FC_PH3)
|
||||||
sp->cmn.fcphHigh = FC_PH3;
|
sp->cmn.fcphHigh = FC_PH3;
|
||||||
|
|
||||||
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
|
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||||
|
elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
|
||||||
|
elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
|
||||||
|
/* FLOGI needs to be 3 for WQE FCFI */
|
||||||
|
/* Set the fcfi to the fcfi we registered with */
|
||||||
|
elsiocb->iocb.ulpContext = phba->fcf.fcfi;
|
||||||
|
} else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
|
||||||
sp->cmn.request_multiple_Nport = 1;
|
sp->cmn.request_multiple_Nport = 1;
|
||||||
|
|
||||||
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
|
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
|
||||||
icmd->ulpCt_h = 1;
|
icmd->ulpCt_h = 1;
|
||||||
icmd->ulpCt_l = 0;
|
icmd->ulpCt_l = 0;
|
||||||
@ -932,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
|
|||||||
if (!ndlp)
|
if (!ndlp)
|
||||||
return 0;
|
return 0;
|
||||||
lpfc_nlp_init(vport, ndlp, Fabric_DID);
|
lpfc_nlp_init(vport, ndlp, Fabric_DID);
|
||||||
|
/* Set the node type */
|
||||||
|
ndlp->nlp_type |= NLP_FABRIC;
|
||||||
/* Put ndlp onto node list */
|
/* Put ndlp onto node list */
|
||||||
lpfc_enqueue_node(vport, ndlp);
|
lpfc_enqueue_node(vport, ndlp);
|
||||||
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
||||||
@ -1604,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
|
|||||||
* and continue discovery.
|
* and continue discovery.
|
||||||
*/
|
*/
|
||||||
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
|
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
|
||||||
!(vport->fc_flag & FC_RSCN_MODE)) {
|
!(vport->fc_flag & FC_RSCN_MODE) &&
|
||||||
|
(phba->sli_rev < LPFC_SLI_REV4)) {
|
||||||
lpfc_issue_reg_vpi(phba, vport);
|
lpfc_issue_reg_vpi(phba, vport);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -2937,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
|
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
|
||||||
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
|
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This routine is used to register and unregister in previous SLI
|
||||||
|
* modes.
|
||||||
|
*/
|
||||||
|
if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
|
||||||
|
(phba->sli_rev == LPFC_SLI_REV4))
|
||||||
|
lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
|
||||||
|
|
||||||
pmb->context1 = NULL;
|
pmb->context1 = NULL;
|
||||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||||
kfree(mp);
|
kfree(mp);
|
||||||
@ -3816,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
|
|||||||
payload_len -= sizeof(uint32_t);
|
payload_len -= sizeof(uint32_t);
|
||||||
switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
|
switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
|
||||||
case RSCN_ADDRESS_FORMAT_PORT:
|
case RSCN_ADDRESS_FORMAT_PORT:
|
||||||
if (ns_did.un.word == rscn_did.un.word)
|
if ((ns_did.un.b.domain == rscn_did.un.b.domain)
|
||||||
|
&& (ns_did.un.b.area == rscn_did.un.b.area)
|
||||||
|
&& (ns_did.un.b.id == rscn_did.un.b.id))
|
||||||
goto return_did_out;
|
goto return_did_out;
|
||||||
break;
|
break;
|
||||||
case RSCN_ADDRESS_FORMAT_AREA:
|
case RSCN_ADDRESS_FORMAT_AREA:
|
||||||
@ -4857,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
|||||||
} else {
|
} else {
|
||||||
/* FAN verified - skip FLOGI */
|
/* FAN verified - skip FLOGI */
|
||||||
vport->fc_myDID = vport->fc_prevDID;
|
vport->fc_myDID = vport->fc_prevDID;
|
||||||
lpfc_issue_fabric_reglogin(vport);
|
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||||
|
lpfc_issue_fabric_reglogin(vport);
|
||||||
|
else
|
||||||
|
lpfc_issue_reg_vfi(vport);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@ -5540,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||||||
|
|
||||||
dropit:
|
dropit:
|
||||||
if (vport && !(vport->load_flag & FC_UNLOADING))
|
if (vport && !(vport->load_flag & FC_UNLOADING))
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
|
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
||||||
"(%d):0111 Dropping received ELS cmd "
|
"0111 Dropping received ELS cmd "
|
||||||
"Data: x%x x%x x%x\n",
|
"Data: x%x x%x x%x\n",
|
||||||
vport->vpi, icmd->ulpStatus,
|
icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
|
||||||
icmd->un.ulpWord[4], icmd->ulpTimeout);
|
|
||||||
phba->fc_stat.elsRcvDrop++;
|
phba->fc_stat.elsRcvDrop++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5620,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||||||
icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
|
icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
|
||||||
if (icmd->unsli3.rcvsli3.vpi == 0xffff)
|
if (icmd->unsli3.rcvsli3.vpi == 0xffff)
|
||||||
vport = phba->pport;
|
vport = phba->pport;
|
||||||
else {
|
else
|
||||||
uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
|
vport = lpfc_find_vport_by_vpid(phba,
|
||||||
vport = lpfc_find_vport_by_vpid(phba, vpi);
|
icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
/* If there are no BDEs associated
|
/* If there are no BDEs associated
|
||||||
* with this IOCB, there is nothing to do.
|
* with this IOCB, there is nothing to do.
|
||||||
@ -5792,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
if (vport == phba->pport)
|
if (vport == phba->pport)
|
||||||
lpfc_issue_fabric_reglogin(vport);
|
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||||
|
lpfc_issue_fabric_reglogin(vport);
|
||||||
|
else
|
||||||
|
lpfc_issue_reg_vfi(vport);
|
||||||
else
|
else
|
||||||
lpfc_do_scr_ns_plogi(phba, vport);
|
lpfc_do_scr_ns_plogi(phba, vport);
|
||||||
}
|
}
|
||||||
@ -5824,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
|
|||||||
|
|
||||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||||
if (mbox) {
|
if (mbox) {
|
||||||
lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
|
lpfc_reg_vpi(vport, mbox);
|
||||||
mbox->vport = vport;
|
mbox->vport = vport;
|
||||||
mbox->context2 = lpfc_nlp_get(ndlp);
|
mbox->context2 = lpfc_nlp_get(ndlp);
|
||||||
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
|
mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
|
||||||
@ -6496,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
|
|||||||
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
||||||
IOERR_SLI_ABORTED);
|
IOERR_SLI_ABORTED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
* @axri: pointer to the els xri abort wcqe structure.
|
||||||
|
*
|
||||||
|
* This routine is invoked by the worker thread to process a SLI4 slow-path
|
||||||
|
* ELS aborted xri.
|
||||||
|
**/
|
||||||
|
void
|
||||||
|
lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
|
||||||
|
struct sli4_wcqe_xri_aborted *axri)
|
||||||
|
{
|
||||||
|
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
|
||||||
|
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
|
||||||
|
unsigned long iflag = 0;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
|
||||||
|
list_for_each_entry_safe(sglq_entry, sglq_next,
|
||||||
|
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
|
||||||
|
if (sglq_entry->sli4_xritag == xri) {
|
||||||
|
list_del(&sglq_entry->list);
|
||||||
|
spin_unlock_irqrestore(
|
||||||
|
&phba->sli4_hba.abts_sgl_list_lock,
|
||||||
|
iflag);
|
||||||
|
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||||
|
|
||||||
|
list_add_tail(&sglq_entry->list,
|
||||||
|
&phba->sli4_hba.lpfc_sgl_list);
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
|
||||||
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -363,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||||||
if (!mbox)
|
if (!mbox)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
|
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
|
||||||
(uint8_t *) sp, mbox, 0);
|
(uint8_t *) sp, mbox, 0);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
mempool_free(mbox, phba->mbox_mem_pool);
|
mempool_free(mbox, phba->mbox_mem_pool);
|
||||||
@ -497,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||||||
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
|
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
|
||||||
else
|
else
|
||||||
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
|
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
|
||||||
|
if ((ndlp->nlp_type & NLP_FABRIC) &&
|
||||||
|
vport->port_type == LPFC_NPIV_PORT) {
|
||||||
|
lpfc_linkdown_port(vport);
|
||||||
|
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
|
||||||
|
spin_lock_irq(shost->host_lock);
|
||||||
|
ndlp->nlp_flag |= NLP_DELAY_TMO;
|
||||||
|
spin_unlock_irq(shost->host_lock);
|
||||||
|
|
||||||
if ((!(ndlp->nlp_type & NLP_FABRIC) &&
|
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
|
||||||
((ndlp->nlp_type & NLP_FCP_TARGET) ||
|
} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
|
||||||
!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
|
((ndlp->nlp_type & NLP_FCP_TARGET) ||
|
||||||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
|
!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
|
||||||
|
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
|
||||||
/* Only try to re-login if this is NOT a Fabric Node */
|
/* Only try to re-login if this is NOT a Fabric Node */
|
||||||
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
|
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
|
||||||
spin_lock_irq(shost->host_lock);
|
spin_lock_irq(shost->host_lock);
|
||||||
@ -569,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
|||||||
{
|
{
|
||||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||||
|
|
||||||
if (!ndlp->nlp_rpi) {
|
if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
|
||||||
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
|
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -859,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
|
|||||||
|
|
||||||
lpfc_unreg_rpi(vport, ndlp);
|
lpfc_unreg_rpi(vport, ndlp);
|
||||||
|
|
||||||
if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
|
if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
|
||||||
(uint8_t *) sp, mbox, 0) == 0) {
|
(uint8_t *) sp, mbox, 0) == 0) {
|
||||||
switch (ndlp->nlp_DID) {
|
switch (ndlp->nlp_DID) {
|
||||||
case NameServer_DID:
|
case NameServer_DID:
|
||||||
@ -1070,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
|
|||||||
struct lpfc_iocbq *cmdiocb, *rspiocb;
|
struct lpfc_iocbq *cmdiocb, *rspiocb;
|
||||||
IOCB_t *irsp;
|
IOCB_t *irsp;
|
||||||
ADISC *ap;
|
ADISC *ap;
|
||||||
|
int rc;
|
||||||
|
|
||||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||||
rspiocb = cmdiocb->context_un.rsp_iocb;
|
rspiocb = cmdiocb->context_un.rsp_iocb;
|
||||||
@ -1095,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
|
|||||||
return ndlp->nlp_state;
|
return ndlp->nlp_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||||
|
rc = lpfc_sli4_resume_rpi(ndlp);
|
||||||
|
if (rc) {
|
||||||
|
/* Stay in state and retry. */
|
||||||
|
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
|
||||||
|
return ndlp->nlp_state;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (ndlp->nlp_type & NLP_FCP_TARGET) {
|
if (ndlp->nlp_type & NLP_FCP_TARGET) {
|
||||||
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
|
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
|
||||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
|
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
|
||||||
@ -1102,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
|
|||||||
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
|
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
|
||||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ndlp->nlp_state;
|
return ndlp->nlp_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1285,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ndlp->nlp_rpi = mb->un.varWords[0];
|
ndlp->nlp_rpi = mb->un.varWords[0];
|
||||||
|
ndlp->nlp_flag |= NLP_RPI_VALID;
|
||||||
|
|
||||||
/* Only if we are not a fabric nport do we issue PRLI */
|
/* Only if we are not a fabric nport do we issue PRLI */
|
||||||
if (!(ndlp->nlp_type & NLP_FABRIC)) {
|
if (!(ndlp->nlp_type & NLP_FABRIC)) {
|
||||||
|
@ -10929,3 +10929,520 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
|
|||||||
};
|
};
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
*
|
||||||
|
* This routine is invoked to post rpi header templates to the
|
||||||
|
* HBA consistent with the SLI-4 interface spec. This routine
|
||||||
|
* posts a PAGE_SIZE memory region to the port to hold up to
|
||||||
|
* PAGE_SIZE modulo 64 rpi context headers.
|
||||||
|
*
|
||||||
|
* This routine does not require any locks. It's usage is expected
|
||||||
|
* to be driver load or reset recovery when the driver is
|
||||||
|
* sequential.
|
||||||
|
*
|
||||||
|
* Return codes
|
||||||
|
* 0 - sucessful
|
||||||
|
* EIO - The mailbox failed to complete successfully.
|
||||||
|
* When this error occurs, the driver is not guaranteed
|
||||||
|
* to have any rpi regions posted to the device and
|
||||||
|
* must either attempt to repost the regions or take a
|
||||||
|
* fatal error.
|
||||||
|
**/
|
||||||
|
int
|
||||||
|
lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
|
||||||
|
{
|
||||||
|
struct lpfc_rpi_hdr *rpi_page;
|
||||||
|
uint32_t rc = 0;
|
||||||
|
|
||||||
|
/* Post all rpi memory regions to the port. */
|
||||||
|
list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
|
||||||
|
rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
|
||||||
|
if (rc != MBX_SUCCESS) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
|
"2008 Error %d posting all rpi "
|
||||||
|
"headers\n", rc);
|
||||||
|
rc = -EIO;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
* @rpi_page: pointer to the rpi memory region.
|
||||||
|
*
|
||||||
|
* This routine is invoked to post a single rpi header to the
|
||||||
|
* HBA consistent with the SLI-4 interface spec. This memory region
|
||||||
|
* maps up to 64 rpi context regions.
|
||||||
|
*
|
||||||
|
* Return codes
|
||||||
|
* 0 - sucessful
|
||||||
|
* ENOMEM - No available memory
|
||||||
|
* EIO - The mailbox failed to complete successfully.
|
||||||
|
**/
|
||||||
|
int
|
||||||
|
lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
|
||||||
|
{
|
||||||
|
LPFC_MBOXQ_t *mboxq;
|
||||||
|
struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
|
||||||
|
uint32_t rc = 0;
|
||||||
|
uint32_t mbox_tmo;
|
||||||
|
uint32_t shdr_status, shdr_add_status;
|
||||||
|
union lpfc_sli4_cfg_shdr *shdr;
|
||||||
|
|
||||||
|
/* The port is notified of the header region via a mailbox command. */
|
||||||
|
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||||
|
if (!mboxq) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
|
"2001 Unable to allocate memory for issuing "
|
||||||
|
"SLI_CONFIG_SPECIAL mailbox command\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Post all rpi memory regions to the port. */
|
||||||
|
hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
|
||||||
|
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
|
||||||
|
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
|
||||||
|
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
|
||||||
|
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
|
||||||
|
sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
|
||||||
|
bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
|
||||||
|
hdr_tmpl, rpi_page->page_count);
|
||||||
|
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
|
||||||
|
rpi_page->start_rpi);
|
||||||
|
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
|
||||||
|
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
|
||||||
|
if (!phba->sli4_hba.intr_enable)
|
||||||
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
||||||
|
else
|
||||||
|
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
|
||||||
|
shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
|
||||||
|
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||||
|
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
||||||
|
if (rc != MBX_TIMEOUT)
|
||||||
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||||
|
if (shdr_status || shdr_add_status || rc) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
|
"2514 POST_RPI_HDR mailbox failed with "
|
||||||
|
"status x%x add_status x%x, mbx status x%x\n",
|
||||||
|
shdr_status, shdr_add_status, rc);
|
||||||
|
rc = -ENXIO;
|
||||||
|
}
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
*
|
||||||
|
* This routine is invoked to post rpi header templates to the
|
||||||
|
* HBA consistent with the SLI-4 interface spec. This routine
|
||||||
|
* posts a PAGE_SIZE memory region to the port to hold up to
|
||||||
|
* PAGE_SIZE modulo 64 rpi context headers.
|
||||||
|
*
|
||||||
|
* Returns
|
||||||
|
* A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
|
||||||
|
* LPFC_RPI_ALLOC_ERROR if no rpis are available.
|
||||||
|
**/
|
||||||
|
int
|
||||||
|
lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
|
||||||
|
{
|
||||||
|
int rpi;
|
||||||
|
uint16_t max_rpi, rpi_base, rpi_limit;
|
||||||
|
uint16_t rpi_remaining;
|
||||||
|
struct lpfc_rpi_hdr *rpi_hdr;
|
||||||
|
|
||||||
|
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
|
||||||
|
rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
|
||||||
|
rpi_limit = phba->sli4_hba.next_rpi;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The valid rpi range is not guaranteed to be zero-based. Start
|
||||||
|
* the search at the rpi_base as reported by the port.
|
||||||
|
*/
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
|
||||||
|
if (rpi >= rpi_limit || rpi < rpi_base)
|
||||||
|
rpi = LPFC_RPI_ALLOC_ERROR;
|
||||||
|
else {
|
||||||
|
set_bit(rpi, phba->sli4_hba.rpi_bmask);
|
||||||
|
phba->sli4_hba.max_cfg_param.rpi_used++;
|
||||||
|
phba->sli4_hba.rpi_count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't try to allocate more rpi header regions if the device limit
|
||||||
|
* on available rpis max has been exhausted.
|
||||||
|
*/
|
||||||
|
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
|
||||||
|
(phba->sli4_hba.rpi_count >= max_rpi)) {
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
return rpi;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the driver is running low on rpi resources, allocate another
|
||||||
|
* page now. Note that the next_rpi value is used because
|
||||||
|
* it represents how many are actually in use whereas max_rpi notes
|
||||||
|
* how many are supported max by the device.
|
||||||
|
*/
|
||||||
|
rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
|
||||||
|
phba->sli4_hba.rpi_count;
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
|
||||||
|
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
|
||||||
|
if (!rpi_hdr) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
|
"2002 Error Could not grow rpi "
|
||||||
|
"count\n");
|
||||||
|
} else {
|
||||||
|
lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rpi;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_free_rpi - Release an rpi for reuse.
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
*
|
||||||
|
* This routine is invoked to release an rpi to the pool of
|
||||||
|
* available rpis maintained by the driver.
|
||||||
|
**/
|
||||||
|
void
|
||||||
|
lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
|
||||||
|
{
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
clear_bit(rpi, phba->sli4_hba.rpi_bmask);
|
||||||
|
phba->sli4_hba.rpi_count--;
|
||||||
|
phba->sli4_hba.max_cfg_param.rpi_used--;
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_remove_rpis - Remove the rpi bitmask region
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
*
|
||||||
|
* This routine is invoked to remove the memory region that
|
||||||
|
* provided rpi via a bitmask.
|
||||||
|
**/
|
||||||
|
void
|
||||||
|
lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
|
||||||
|
{
|
||||||
|
kfree(phba->sli4_hba.rpi_bmask);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_resume_rpi - Remove the rpi bitmask region
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
*
|
||||||
|
* This routine is invoked to remove the memory region that
|
||||||
|
* provided rpi via a bitmask.
|
||||||
|
**/
|
||||||
|
int
|
||||||
|
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
|
||||||
|
{
|
||||||
|
LPFC_MBOXQ_t *mboxq;
|
||||||
|
struct lpfc_hba *phba = ndlp->phba;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* The port is notified of the header region via a mailbox command. */
|
||||||
|
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||||
|
if (!mboxq)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* Post all rpi memory regions to the port. */
|
||||||
|
lpfc_resume_rpi(mboxq, ndlp);
|
||||||
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
|
||||||
|
if (rc == MBX_NOT_FINISHED) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
|
"2010 Resume RPI Mailbox failed "
|
||||||
|
"status %d, mbxStatus x%x\n", rc,
|
||||||
|
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
|
||||||
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_init_vpi - Initialize a vpi with the port
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
* @vpi: vpi value to activate with the port.
|
||||||
|
*
|
||||||
|
* This routine is invoked to activate a vpi with the
|
||||||
|
* port when the host intends to use vports with a
|
||||||
|
* nonzero vpi.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0 success
|
||||||
|
* -Evalue otherwise
|
||||||
|
**/
|
||||||
|
int
|
||||||
|
lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
|
||||||
|
{
|
||||||
|
LPFC_MBOXQ_t *mboxq;
|
||||||
|
int rc = 0;
|
||||||
|
uint32_t mbox_tmo;
|
||||||
|
|
||||||
|
if (vpi == 0)
|
||||||
|
return -EINVAL;
|
||||||
|
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||||
|
if (!mboxq)
|
||||||
|
return -ENOMEM;
|
||||||
|
lpfc_init_vpi(mboxq, vpi);
|
||||||
|
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
|
||||||
|
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
|
||||||
|
if (rc != MBX_TIMEOUT)
|
||||||
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||||
|
if (rc != MBX_SUCCESS) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
|
"2022 INIT VPI Mailbox failed "
|
||||||
|
"status %d, mbxStatus x%x\n", rc,
|
||||||
|
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
|
||||||
|
rc = -EIO;
|
||||||
|
}
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
* @mboxq: Pointer to mailbox object.
|
||||||
|
*
|
||||||
|
* This routine is invoked to manually add a single FCF record. The caller
|
||||||
|
* must pass a completely initialized FCF_Record. This routine takes
|
||||||
|
* care of the nonembedded mailbox operations.
|
||||||
|
**/
|
||||||
|
static void
|
||||||
|
lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||||
|
{
|
||||||
|
void *virt_addr;
|
||||||
|
union lpfc_sli4_cfg_shdr *shdr;
|
||||||
|
uint32_t shdr_status, shdr_add_status;
|
||||||
|
|
||||||
|
virt_addr = mboxq->sge_array->addr[0];
|
||||||
|
/* The IOCTL status is embedded in the mailbox subheader. */
|
||||||
|
shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
|
||||||
|
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||||
|
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
||||||
|
|
||||||
|
if ((shdr_status || shdr_add_status) &&
|
||||||
|
(shdr_status != STATUS_FCF_IN_USE))
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
|
"2558 ADD_FCF_RECORD mailbox failed with "
|
||||||
|
"status x%x add_status x%x\n",
|
||||||
|
shdr_status, shdr_add_status);
|
||||||
|
|
||||||
|
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_add_fcf_record - Manually add an FCF Record.
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
* @fcf_record: pointer to the initialized fcf record to add.
|
||||||
|
*
|
||||||
|
* This routine is invoked to manually add a single FCF record. The caller
|
||||||
|
* must pass a completely initialized FCF_Record. This routine takes
|
||||||
|
* care of the nonembedded mailbox operations.
|
||||||
|
**/
|
||||||
|
int
|
||||||
|
lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
|
||||||
|
{
|
||||||
|
int rc = 0;
|
||||||
|
LPFC_MBOXQ_t *mboxq;
|
||||||
|
uint8_t *bytep;
|
||||||
|
void *virt_addr;
|
||||||
|
dma_addr_t phys_addr;
|
||||||
|
struct lpfc_mbx_sge sge;
|
||||||
|
uint32_t alloc_len, req_len;
|
||||||
|
uint32_t fcfindex;
|
||||||
|
|
||||||
|
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||||
|
if (!mboxq) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
|
"2009 Failed to allocate mbox for ADD_FCF cmd\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
|
||||||
|
sizeof(uint32_t);
|
||||||
|
|
||||||
|
/* Allocate DMA memory and set up the non-embedded mailbox command */
|
||||||
|
alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
|
||||||
|
LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
|
||||||
|
req_len, LPFC_SLI4_MBX_NEMBED);
|
||||||
|
if (alloc_len < req_len) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
|
"2523 Allocated DMA memory size (x%x) is "
|
||||||
|
"less than the requested DMA memory "
|
||||||
|
"size (x%x)\n", alloc_len, req_len);
|
||||||
|
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get the first SGE entry from the non-embedded DMA memory. This
|
||||||
|
* routine only uses a single SGE.
|
||||||
|
*/
|
||||||
|
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
|
||||||
|
phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
|
||||||
|
if (unlikely(!mboxq->sge_array)) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
|
||||||
|
"2526 Failed to get the non-embedded SGE "
|
||||||
|
"virtual address\n");
|
||||||
|
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
virt_addr = mboxq->sge_array->addr[0];
|
||||||
|
/*
|
||||||
|
* Configure the FCF record for FCFI 0. This is the driver's
|
||||||
|
* hardcoded default and gets used in nonFIP mode.
|
||||||
|
*/
|
||||||
|
fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
|
||||||
|
bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
|
||||||
|
lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copy the fcf_index and the FCF Record Data. The data starts after
|
||||||
|
* the FCoE header plus word10. The data copy needs to be endian
|
||||||
|
* correct.
|
||||||
|
*/
|
||||||
|
bytep += sizeof(uint32_t);
|
||||||
|
lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
|
||||||
|
mboxq->vport = phba->pport;
|
||||||
|
mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
|
||||||
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
|
||||||
|
if (rc == MBX_NOT_FINISHED) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
|
"2515 ADD_FCF_RECORD mailbox failed with "
|
||||||
|
"status 0x%x\n", rc);
|
||||||
|
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||||
|
rc = -EIO;
|
||||||
|
} else
|
||||||
|
rc = 0;
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
* @fcf_record: pointer to the fcf record to write the default data.
|
||||||
|
* @fcf_index: FCF table entry index.
|
||||||
|
*
|
||||||
|
* This routine is invoked to build the driver's default FCF record. The
|
||||||
|
* values used are hardcoded. This routine handles memory initialization.
|
||||||
|
*
|
||||||
|
**/
|
||||||
|
void
|
||||||
|
lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
|
||||||
|
struct fcf_record *fcf_record,
|
||||||
|
uint16_t fcf_index)
|
||||||
|
{
|
||||||
|
memset(fcf_record, 0, sizeof(struct fcf_record));
|
||||||
|
fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
|
||||||
|
fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
|
||||||
|
fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
|
||||||
|
bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
|
||||||
|
bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
|
||||||
|
bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
|
||||||
|
bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
|
||||||
|
bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
|
||||||
|
bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
|
||||||
|
bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
|
||||||
|
bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
|
||||||
|
bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
|
||||||
|
bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
|
||||||
|
bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
|
||||||
|
bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
|
||||||
|
LPFC_FCF_FPMA | LPFC_FCF_SPMA);
|
||||||
|
/* Set the VLAN bit map */
|
||||||
|
if (phba->valid_vlan) {
|
||||||
|
fcf_record->vlan_bitmap[phba->vlan_id / 8]
|
||||||
|
= 1 << (phba->vlan_id % 8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
* @fcf_index: FCF table entry offset.
|
||||||
|
*
|
||||||
|
* This routine is invoked to read up to @fcf_num of FCF record from the
|
||||||
|
* device starting with the given @fcf_index.
|
||||||
|
**/
|
||||||
|
int
|
||||||
|
lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||||
|
{
|
||||||
|
int rc = 0, error;
|
||||||
|
LPFC_MBOXQ_t *mboxq;
|
||||||
|
void *virt_addr;
|
||||||
|
dma_addr_t phys_addr;
|
||||||
|
uint8_t *bytep;
|
||||||
|
struct lpfc_mbx_sge sge;
|
||||||
|
uint32_t alloc_len, req_len;
|
||||||
|
struct lpfc_mbx_read_fcf_tbl *read_fcf;
|
||||||
|
|
||||||
|
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||||
|
if (!mboxq) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
|
"2000 Failed to allocate mbox for "
|
||||||
|
"READ_FCF cmd\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
req_len = sizeof(struct fcf_record) +
|
||||||
|
sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
|
||||||
|
|
||||||
|
/* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
|
||||||
|
alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
|
||||||
|
LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
|
||||||
|
LPFC_SLI4_MBX_NEMBED);
|
||||||
|
|
||||||
|
if (alloc_len < req_len) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
|
"0291 Allocated DMA memory size (x%x) is "
|
||||||
|
"less than the requested DMA memory "
|
||||||
|
"size (x%x)\n", alloc_len, req_len);
|
||||||
|
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Get the first SGE entry from the non-embedded DMA memory. This
|
||||||
|
* routine only uses a single SGE.
|
||||||
|
*/
|
||||||
|
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
|
||||||
|
phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
|
||||||
|
if (unlikely(!mboxq->sge_array)) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
|
||||||
|
"2527 Failed to get the non-embedded SGE "
|
||||||
|
"virtual address\n");
|
||||||
|
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
virt_addr = mboxq->sge_array->addr[0];
|
||||||
|
read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
|
||||||
|
|
||||||
|
/* Set up command fields */
|
||||||
|
bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
|
||||||
|
/* Perform necessary endian conversion */
|
||||||
|
bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
|
||||||
|
lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
|
||||||
|
mboxq->vport = phba->pport;
|
||||||
|
mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
|
||||||
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
|
||||||
|
if (rc == MBX_NOT_FINISHED) {
|
||||||
|
lpfc_sli4_mbox_cmd_free(phba, mboxq);
|
||||||
|
error = -EIO;
|
||||||
|
} else
|
||||||
|
error = 0;
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user