ublk: simplify aborting request

Now ublk_abort_queue() is run exclusively with ublk_queue_rq() and the
ubq_daemon task, so simplify aborting request:

- set UBLK_IO_FLAG_ABORTED in ublk_abort_queue() just for aborting
this request

- abort request in ublk_queue_rq() if ubq->canceling is set

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20231009093324.957829-8-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2023-10-09 17:33:22 +08:00 committed by Jens Axboe
parent 216c8f5ef0
commit b4e1353f46

View File

@ -1084,13 +1084,10 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
{ {
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE); WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
if (!(io->flags & UBLK_IO_FLAG_ABORTED)) { if (ublk_queue_can_use_recovery_reissue(ubq))
io->flags |= UBLK_IO_FLAG_ABORTED; blk_mq_requeue_request(req, false);
if (ublk_queue_can_use_recovery_reissue(ubq)) else
blk_mq_requeue_request(req, false); ublk_put_req_ref(ubq, req);
else
ublk_put_req_ref(ubq, req);
}
} }
static void ubq_complete_io_cmd(struct ublk_io *io, int res, static void ubq_complete_io_cmd(struct ublk_io *io, int res,
@ -1231,27 +1228,10 @@ static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{ {
struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
struct ublk_io *io;
if (!llist_add(&data->node, &ubq->io_cmds)) if (llist_add(&data->node, &ubq->io_cmds)) {
return; struct ublk_io *io = &ubq->ios[rq->tag];
io = &ubq->ios[rq->tag];
/*
* If the check pass, we know that this is a re-issued request aborted
* previously in cancel fn because the ubq_daemon(cmd's task) is
* PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
* because this ioucmd's io_uring context may be freed now if no inflight
* ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
*
* Note: cancel fn sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
* the tag). Then the request is re-started(allocating the tag) and we are here.
* Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
* guarantees that here is a re-issued request aborted previously.
*/
if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
ublk_abort_io_cmds(ubq);
} else {
io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb); io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
} }
} }
@ -1321,13 +1301,12 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort)) if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR; return BLK_STS_IOERR;
blk_mq_start_request(bd->rq);
if (unlikely(ubq->canceling)) { if (unlikely(ubq->canceling)) {
__ublk_abort_rq(ubq, rq); __ublk_abort_rq(ubq, rq);
return BLK_STS_OK; return BLK_STS_OK;
} }
blk_mq_start_request(bd->rq);
ublk_queue_cmd(ubq, rq); ublk_queue_cmd(ubq, rq);
return BLK_STS_OK; return BLK_STS_OK;
@ -1450,8 +1429,10 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
* will do it * will do it
*/ */
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i); rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
if (rq) if (rq && blk_mq_request_started(rq)) {
io->flags |= UBLK_IO_FLAG_ABORTED;
__ublk_fail_req(ubq, io, rq); __ublk_fail_req(ubq, io, rq);
}
} }
} }
} }
@ -1535,7 +1516,7 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
io = &ubq->ios[pdu->tag]; io = &ubq->ios[pdu->tag];
WARN_ON_ONCE(io->cmd != cmd); WARN_ON_ONCE(io->cmd != cmd);
ublk_cancel_cmd(ubq, &ubq->ios[pdu->tag], issue_flags); ublk_cancel_cmd(ubq, io, issue_flags);
if (need_schedule) { if (need_schedule) {
if (ublk_can_use_recovery(ub)) if (ublk_can_use_recovery(ub))