lpfc: Refactor NVME LS receive handling
In preparation for supporting both intiator mode and target mode receiving NVME LS's, commonize the existing NVME LS request receive handling found in the base driver and in the nvmet side. Using the original lpfc_nvmet_unsol_ls_event() and lpfc_nvme_unsol_ls_buffer() routines as a templates, commonize the reception of an NVME LS request. The common routine will validate the LS request, that it was received from a logged-in node, and allocate a lpfc_async_xchg_ctx that is used to manage the LS request. The role of the port is then inspected to determine which handler is to receive the LS - nvme or nvmet. As such, the nvmet handler is tied back in. A handler is created in nvme and is stubbed out. Signed-off-by: Paul Ely <paul.ely@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
7b7f551b04
commit
3a8070c567
@ -563,8 +563,10 @@ void lpfc_nvme_update_localport(struct lpfc_vport *vport);
|
||||
int lpfc_nvmet_create_targetport(struct lpfc_hba *phba);
|
||||
int lpfc_nvmet_update_targetport(struct lpfc_hba *phba);
|
||||
void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
|
||||
void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
|
||||
struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
|
||||
int lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
|
||||
struct lpfc_async_xchg_ctx *axchg);
|
||||
int lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
|
||||
struct lpfc_async_xchg_ctx *axchg);
|
||||
void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint32_t idx,
|
||||
struct rqb_dmabuf *nvmebuf, uint64_t isr_ts,
|
||||
uint8_t cqflag);
|
||||
|
@ -393,6 +393,25 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @axchg: pointer to exchange context for the NVME LS request
|
||||
*
|
||||
* This routine is used for processing an asychronously received NVME LS
|
||||
* request. Any remaining validation is done and the LS is then forwarded
|
||||
* to the nvme-fc transport via nvme_fc_rcv_ls_req().
|
||||
*
|
||||
* Returns 0 if LS was handled and delivered to the transport
|
||||
* Returns 1 if LS failed to be handled and should be dropped
|
||||
*/
|
||||
int
|
||||
lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
|
||||
struct lpfc_async_xchg_ctx *axchg)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
struct lpfc_wcqe_complete *wcqe)
|
||||
|
@ -188,6 +188,7 @@ struct lpfc_async_xchg_ctx {
|
||||
} hdlrctx;
|
||||
struct list_head list;
|
||||
struct lpfc_hba *phba;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct nvmefc_ls_req *ls_req;
|
||||
struct nvmefc_ls_rsp ls_rsp;
|
||||
struct lpfc_iocbq *wqeq;
|
||||
@ -202,6 +203,7 @@ struct lpfc_async_xchg_ctx {
|
||||
uint16_t idx;
|
||||
uint16_t state;
|
||||
uint16_t flag;
|
||||
void *payload;
|
||||
struct rqb_dmabuf *rqb_buffer;
|
||||
struct lpfc_nvmet_ctxbuf *ctxbuf;
|
||||
struct lpfc_sli4_hdw_queue *hdwq;
|
||||
@ -224,3 +226,6 @@ struct lpfc_async_xchg_ctx {
|
||||
/* routines found in lpfc_nvme.c */
|
||||
|
||||
/* routines found in lpfc_nvmet.c */
|
||||
int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
|
||||
uint16_t xri);
|
||||
|
@ -63,9 +63,6 @@ static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
|
||||
static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
|
||||
struct lpfc_async_xchg_ctx *,
|
||||
uint32_t, uint16_t);
|
||||
static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
|
||||
struct lpfc_async_xchg_ctx *,
|
||||
uint32_t, uint16_t);
|
||||
static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
|
||||
struct lpfc_async_xchg_ctx *);
|
||||
static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
|
||||
@ -865,7 +862,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
|
||||
ctxp->oxid);
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
atomic_inc(&nvmep->xmt_ls_abort);
|
||||
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
|
||||
lpfc_nvme_unsol_ls_issue_abort(phba, ctxp,
|
||||
ctxp->sid, ctxp->oxid);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -908,7 +905,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
|
||||
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
atomic_inc(&nvmep->xmt_ls_abort);
|
||||
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
|
||||
lpfc_nvme_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -1922,107 +1919,49 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
|
||||
* lpfc_nvmet_handle_lsreq - Process an NVME LS request
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @pring: pointer to a SLI ring.
|
||||
* @nvmebuf: pointer to lpfc nvme command HBQ data structure.
|
||||
* @axchg: pointer to exchange context for the NVME LS request
|
||||
*
|
||||
* This routine is used for processing the WQE associated with a unsolicited
|
||||
* event. It first determines whether there is an existing ndlp that matches
|
||||
* the DID from the unsolicited WQE. If not, it will create a new one with
|
||||
* the DID from the unsolicited WQE. The ELS command from the unsolicited
|
||||
* WQE is then used to invoke the proper routine and to set up proper state
|
||||
* of the discovery state machine.
|
||||
**/
|
||||
static void
|
||||
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
struct hbq_dmabuf *nvmebuf)
|
||||
* This routine is used for processing an asychronously received NVME LS
|
||||
* request. Any remaining validation is done and the LS is then forwarded
|
||||
* to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
|
||||
*
|
||||
* The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
|
||||
* -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
|
||||
* lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
|
||||
*
|
||||
* Returns 0 if LS was handled and delivered to the transport
|
||||
* Returns 1 if LS failed to be handled and should be dropped
|
||||
*/
|
||||
int
|
||||
lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
|
||||
struct lpfc_async_xchg_ctx *axchg)
|
||||
{
|
||||
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct fc_frame_header *fc_hdr;
|
||||
struct lpfc_async_xchg_ctx *ctxp;
|
||||
uint32_t *payload;
|
||||
uint32_t size, oxid, sid, rc;
|
||||
struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
|
||||
uint32_t *payload = axchg->payload;
|
||||
int rc;
|
||||
|
||||
|
||||
if (!nvmebuf || !phba->targetport) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6154 LS Drop IO\n");
|
||||
oxid = 0;
|
||||
size = 0;
|
||||
sid = 0;
|
||||
ctxp = NULL;
|
||||
goto dropit;
|
||||
}
|
||||
|
||||
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
|
||||
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
payload = (uint32_t *)(nvmebuf->dbuf.virt);
|
||||
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
||||
|
||||
ctxp = kzalloc(sizeof(struct lpfc_async_xchg_ctx), GFP_ATOMIC);
|
||||
if (ctxp == NULL) {
|
||||
atomic_inc(&tgtp->rcv_ls_req_drop);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6155 LS Drop IO x%x: Alloc\n",
|
||||
oxid);
|
||||
dropit:
|
||||
lpfc_nvmeio_data(phba, "NVMET LS DROP: "
|
||||
"xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
return;
|
||||
}
|
||||
ctxp->phba = phba;
|
||||
ctxp->size = size;
|
||||
ctxp->oxid = oxid;
|
||||
ctxp->sid = sid;
|
||||
ctxp->wqeq = NULL;
|
||||
ctxp->state = LPFC_NVME_STE_LS_RCV;
|
||||
ctxp->entry_cnt = 1;
|
||||
ctxp->rqb_buffer = (void *)nvmebuf;
|
||||
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
/*
|
||||
* The calling sequence should be:
|
||||
* nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
|
||||
* lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
|
||||
*/
|
||||
atomic_inc(&tgtp->rcv_ls_req_in);
|
||||
rc = nvmet_fc_rcv_ls_req(phba->targetport, NULL, &ctxp->ls_rsp,
|
||||
payload, size);
|
||||
|
||||
rc = nvmet_fc_rcv_ls_req(phba->targetport, NULL, &axchg->ls_rsp,
|
||||
axchg->payload, axchg->size);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
|
||||
"%08x %08x %08x\n", size, rc,
|
||||
"%08x %08x %08x\n", axchg->size, rc,
|
||||
*payload, *(payload+1), *(payload+2),
|
||||
*(payload+3), *(payload+4), *(payload+5));
|
||||
|
||||
if (rc == 0) {
|
||||
if (!rc) {
|
||||
atomic_inc(&tgtp->rcv_ls_req_out);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
|
||||
atomic_inc(&tgtp->rcv_ls_req_drop);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
|
||||
ctxp->oxid, rc);
|
||||
|
||||
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
|
||||
atomic_inc(&tgtp->xmt_ls_abort);
|
||||
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2364,40 +2303,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @pring: pointer to a SLI ring.
|
||||
* @nvmebuf: pointer to received nvme data structure.
|
||||
*
|
||||
* This routine is used to process an unsolicited event received from a SLI
|
||||
* (Service Level Interface) ring. The actual processing of the data buffer
|
||||
* associated with the unsolicited event is done by invoking the routine
|
||||
* lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
|
||||
* SLI RQ on which the unsolicited event was received.
|
||||
**/
|
||||
void
|
||||
lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
struct lpfc_iocbq *piocb)
|
||||
{
|
||||
struct lpfc_dmabuf *d_buf;
|
||||
struct hbq_dmabuf *nvmebuf;
|
||||
|
||||
d_buf = piocb->context2;
|
||||
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
|
||||
|
||||
if (!nvmebuf) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"3015 LS Drop IO\n");
|
||||
return;
|
||||
}
|
||||
if (phba->nvmet_support == 0) {
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
return;
|
||||
}
|
||||
lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -3402,8 +3307,16 @@ aerr:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
/**
|
||||
* lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
|
||||
* via async frame receive where the frame is not handled.
|
||||
* @phba: pointer to adapter structure
|
||||
* @ctxp: pointer to the asynchronously received received sequence
|
||||
* @sid: address of the remote port to send the ABTS to
|
||||
* @xri: oxid value to for the ABTS (other side's exchange id).
|
||||
**/
|
||||
int
|
||||
lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
struct lpfc_async_xchg_ctx *ctxp,
|
||||
uint32_t sid, uint16_t xri)
|
||||
{
|
||||
|
@ -2792,6 +2792,121 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
|
||||
return &hbq_entry->dbuf;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
|
||||
* containing a NVME LS request.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @piocb: pointer to the iocbq struct representing the sequence starting
|
||||
* frame.
|
||||
*
|
||||
* This routine initially validates the NVME LS, validates there is a login
|
||||
* with the port that sent the LS, and then calls the appropriate nvme host
|
||||
* or target LS request handler.
|
||||
**/
|
||||
static void
|
||||
lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
|
||||
{
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_dmabuf *d_buf;
|
||||
struct hbq_dmabuf *nvmebuf;
|
||||
struct fc_frame_header *fc_hdr;
|
||||
struct lpfc_async_xchg_ctx *axchg = NULL;
|
||||
char *failwhy = NULL;
|
||||
uint32_t oxid, sid, did, fctl, size;
|
||||
int ret;
|
||||
|
||||
d_buf = piocb->context2;
|
||||
|
||||
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
|
||||
fc_hdr = nvmebuf->hbuf.virt;
|
||||
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
||||
did = sli4_did_from_fc_hdr(fc_hdr);
|
||||
fctl = (fc_hdr->fh_f_ctl[0] << 16 |
|
||||
fc_hdr->fh_f_ctl[1] << 8 |
|
||||
fc_hdr->fh_f_ctl[2]);
|
||||
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
|
||||
if (phba->pport->load_flag & FC_UNLOADING) {
|
||||
failwhy = "Driver Unloading";
|
||||
} else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
|
||||
failwhy = "NVME FC4 Disabled";
|
||||
} else if (!phba->nvmet_support && !phba->pport->localport) {
|
||||
failwhy = "No Localport";
|
||||
} else if (phba->nvmet_support && !phba->targetport) {
|
||||
failwhy = "No Targetport";
|
||||
} else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
|
||||
failwhy = "Bad NVME LS R_CTL";
|
||||
} else if (unlikely((fctl & 0x00FF0000) !=
|
||||
(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
|
||||
failwhy = "Bad NVME LS F_CTL";
|
||||
} else {
|
||||
axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
|
||||
if (!axchg)
|
||||
failwhy = "No CTX memory";
|
||||
}
|
||||
|
||||
if (unlikely(failwhy)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
|
||||
"6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
|
||||
sid, oxid, failwhy);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
/* validate the source of the LS is logged in */
|
||||
ndlp = lpfc_findnode_did(phba->pport, sid);
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
|
||||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
||||
"6216 NVME Unsol rcv: No ndlp: "
|
||||
"NPort_ID x%x oxid x%x\n",
|
||||
sid, oxid);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
axchg->phba = phba;
|
||||
axchg->ndlp = ndlp;
|
||||
axchg->size = size;
|
||||
axchg->oxid = oxid;
|
||||
axchg->sid = sid;
|
||||
axchg->wqeq = NULL;
|
||||
axchg->state = LPFC_NVME_STE_LS_RCV;
|
||||
axchg->entry_cnt = 1;
|
||||
axchg->rqb_buffer = (void *)nvmebuf;
|
||||
axchg->hdwq = &phba->sli4_hba.hdwq[0];
|
||||
axchg->payload = nvmebuf->dbuf.virt;
|
||||
INIT_LIST_HEAD(&axchg->list);
|
||||
|
||||
if (phba->nvmet_support)
|
||||
ret = lpfc_nvmet_handle_lsreq(phba, axchg);
|
||||
else
|
||||
ret = lpfc_nvme_handle_lsreq(phba, axchg);
|
||||
|
||||
/* if zero, LS was successfully handled. If non-zero, LS not handled */
|
||||
if (!ret)
|
||||
return;
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
|
||||
"6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
|
||||
"NVMe%s handler failed %d\n",
|
||||
did, sid, oxid,
|
||||
(phba->nvmet_support) ? "T" : "I", ret);
|
||||
|
||||
out_fail:
|
||||
kfree(axchg);
|
||||
|
||||
/* recycle receive buffer */
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
|
||||
/* If start of new exchange, abort it */
|
||||
if (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX))
|
||||
lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_complete_unsol_iocb - Complete an unsolicited sequence
|
||||
* @phba: Pointer to HBA context object.
|
||||
@ -2813,7 +2928,7 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
|
||||
switch (fch_type) {
|
||||
case FC_TYPE_NVME:
|
||||
lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
|
||||
lpfc_nvme_unsol_ls_handler(phba, saveq);
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
@ -13978,8 +14093,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
|
||||
/* Just some basic sanity checks on FCP Command frame */
|
||||
fctl = (fc_hdr->fh_f_ctl[0] << 16 |
|
||||
fc_hdr->fh_f_ctl[1] << 8 |
|
||||
fc_hdr->fh_f_ctl[2]);
|
||||
fc_hdr->fh_f_ctl[1] << 8 |
|
||||
fc_hdr->fh_f_ctl[2]);
|
||||
if (((fctl &
|
||||
(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
|
||||
(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
|
||||
|
Loading…
Reference in New Issue
Block a user