nvme-fc: eliminate terminate_io use by nvme_fc_error_recovery
nvme_fc_error_recovery() special cases handling when in CONNECTING state and calls __nvme_fc_terminate_io(). __nvme_fc_terminate_io() itself special cases CONNECTING state and calls the routine to abort outstanding ios. Simplify the sequence by putting the call to abort outstanding I/Os directly in nvme_fc_error_recovery. Move the location of __nvme_fc_abort_outstanding_ios(), and nvme_fc_terminate_exchange() which is called by it, to avoid adding function prototypes for nvme_fc_error_recovery(). Signed-off-by: James Smart <james.smart@broadcom.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
9c2bb2577d
commit
95ced8a2c7
@ -2413,27 +2413,97 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
|
||||
nvme_fc_ctrl_put(ctrl);
|
||||
}
|
||||
|
||||
static void __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl);
|
||||
/*
|
||||
* This routine is used by the transport when it needs to find active
|
||||
* io on a queue that is to be terminated. The transport uses
|
||||
* blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
|
||||
* this routine to kill them on a 1 by 1 basis.
|
||||
*
|
||||
* As FC allocates FC exchange for each io, the transport must contact
|
||||
* the LLDD to terminate the exchange, thus releasing the FC exchange.
|
||||
* After terminating the exchange the LLDD will call the transport's
|
||||
* normal io done path for the request, but it will have an aborted
|
||||
* status. The done path will return the io request back to the block
|
||||
* layer with an error status.
|
||||
*/
|
||||
static bool
|
||||
nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
|
||||
{
|
||||
struct nvme_ctrl *nctrl = data;
|
||||
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
|
||||
|
||||
__nvme_fc_abort_op(ctrl, op);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine runs through all outstanding commands on the association
|
||||
* and aborts them. This routine is typically be called by the
|
||||
* delete_association routine. It is also called due to an error during
|
||||
* reconnect. In that scenario, it is most likely a command that initializes
|
||||
* the controller, including fabric Connect commands on io queues, that
|
||||
* may have timed out or failed thus the io must be killed for the connect
|
||||
* thread to see the error.
|
||||
*/
|
||||
static void
|
||||
__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
|
||||
{
|
||||
/*
|
||||
* If io queues are present, stop them and terminate all outstanding
|
||||
* ios on them. As FC allocates FC exchange for each io, the
|
||||
* transport must contact the LLDD to terminate the exchange,
|
||||
* thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
|
||||
* to tell us what io's are busy and invoke a transport routine
|
||||
* to kill them with the LLDD. After terminating the exchange
|
||||
* the LLDD will call the transport's normal io done path, but it
|
||||
* will have an aborted status. The done path will return the
|
||||
* io requests back to the block layer as part of normal completions
|
||||
* (but with error status).
|
||||
*/
|
||||
if (ctrl->ctrl.queue_count > 1) {
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
||||
nvme_fc_terminate_exchange, &ctrl->ctrl);
|
||||
blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
|
||||
if (start_queues)
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
}
|
||||
|
||||
/*
|
||||
* Other transports, which don't have link-level contexts bound
|
||||
* to sqe's, would try to gracefully shutdown the controller by
|
||||
* writing the registers for shutdown and polling (call
|
||||
* nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
|
||||
* just aborted and we will wait on those contexts, and given
|
||||
* there was no indication of how live the controlelr is on the
|
||||
* link, don't send more io to create more contexts for the
|
||||
* shutdown. Let the controller fail via keepalive failure if
|
||||
* its still present.
|
||||
*/
|
||||
|
||||
/*
|
||||
* clean up the admin queue. Same thing as above.
|
||||
*/
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
|
||||
nvme_fc_terminate_exchange, &ctrl->ctrl);
|
||||
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
|
||||
{
|
||||
/*
|
||||
* if an error (io timeout, etc) while (re)connecting,
|
||||
* it's an error on creating the new association.
|
||||
* Start the error recovery thread if it hasn't already
|
||||
* been started. It is expected there could be multiple
|
||||
* ios hitting this path before things are cleaned up.
|
||||
* if an error (io timeout, etc) while (re)connecting, the remote
|
||||
* port requested terminating of the association (disconnect_ls)
|
||||
* or an error (timeout or abort) occurred on an io while creating
|
||||
* the controller. Abort any ios on the association and let the
|
||||
* create_association error path resolve things.
|
||||
*/
|
||||
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
|
||||
__nvme_fc_terminate_io(ctrl);
|
||||
|
||||
/*
|
||||
* Rescheduling the connection after recovering
|
||||
* from the io error is left to the reconnect work
|
||||
* item, which is what should have stalled waiting on
|
||||
* the io that had the error that scheduled this work.
|
||||
*/
|
||||
__nvme_fc_abort_outstanding_ios(ctrl, true);
|
||||
set_bit(ASSOC_FAILED, &ctrl->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2747,30 +2817,6 @@ nvme_fc_complete_rq(struct request *rq)
|
||||
nvme_fc_ctrl_put(ctrl);
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine is used by the transport when it needs to find active
|
||||
* io on a queue that is to be terminated. The transport uses
|
||||
* blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
|
||||
* this routine to kill them on a 1 by 1 basis.
|
||||
*
|
||||
* As FC allocates FC exchange for each io, the transport must contact
|
||||
* the LLDD to terminate the exchange, thus releasing the FC exchange.
|
||||
* After terminating the exchange the LLDD will call the transport's
|
||||
* normal io done path for the request, but it will have an aborted
|
||||
* status. The done path will return the io request back to the block
|
||||
* layer with an error status.
|
||||
*/
|
||||
static bool
|
||||
nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
|
||||
{
|
||||
struct nvme_ctrl *nctrl = data;
|
||||
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
|
||||
|
||||
__nvme_fc_abort_op(ctrl, op);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static const struct blk_mq_ops nvme_fc_mq_ops = {
|
||||
.queue_rq = nvme_fc_queue_rq,
|
||||
@ -3111,60 +3157,6 @@ out_free_queue:
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This routine runs through all outstanding commands on the association
|
||||
* and aborts them. This routine is typically be called by the
|
||||
* delete_association routine. It is also called due to an error during
|
||||
* reconnect. In that scenario, it is most likely a command that initializes
|
||||
* the controller, including fabric Connect commands on io queues, that
|
||||
* may have timed out or failed thus the io must be killed for the connect
|
||||
* thread to see the error.
|
||||
*/
|
||||
static void
|
||||
__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
|
||||
{
|
||||
/*
|
||||
* If io queues are present, stop them and terminate all outstanding
|
||||
* ios on them. As FC allocates FC exchange for each io, the
|
||||
* transport must contact the LLDD to terminate the exchange,
|
||||
* thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
|
||||
* to tell us what io's are busy and invoke a transport routine
|
||||
* to kill them with the LLDD. After terminating the exchange
|
||||
* the LLDD will call the transport's normal io done path, but it
|
||||
* will have an aborted status. The done path will return the
|
||||
* io requests back to the block layer as part of normal completions
|
||||
* (but with error status).
|
||||
*/
|
||||
if (ctrl->ctrl.queue_count > 1) {
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
||||
nvme_fc_terminate_exchange, &ctrl->ctrl);
|
||||
blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
|
||||
if (start_queues)
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
}
|
||||
|
||||
/*
|
||||
* Other transports, which don't have link-level contexts bound
|
||||
* to sqe's, would try to gracefully shutdown the controller by
|
||||
* writing the registers for shutdown and polling (call
|
||||
* nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
|
||||
* just aborted and we will wait on those contexts, and given
|
||||
* there was no indication of how live the controlelr is on the
|
||||
* link, don't send more io to create more contexts for the
|
||||
* shutdown. Let the controller fail via keepalive failure if
|
||||
* its still present.
|
||||
*/
|
||||
|
||||
/*
|
||||
* clean up the admin queue. Same thing as above.
|
||||
*/
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
|
||||
nvme_fc_terminate_exchange, &ctrl->ctrl);
|
||||
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine stops operation of the controller on the host side.
|
||||
* On the host os stack side: Admin and IO queues are stopped,
|
||||
@ -3297,17 +3289,6 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
|
||||
static void
|
||||
__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
|
||||
{
|
||||
/*
|
||||
* if state is CONNECTING - the error occurred as part of a
|
||||
* reconnect attempt. Abort any ios on the association and
|
||||
* let the create_association error paths resolve things.
|
||||
*/
|
||||
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
|
||||
__nvme_fc_abort_outstanding_ios(ctrl, true);
|
||||
set_bit(ASSOC_FAILED, &ctrl->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* For any other state, kill the association. As this routine
|
||||
* is a common io abort routine for resetting and such, after
|
||||
|
Loading…
Reference in New Issue
Block a user