blk-mq: run dispatch lock once in case of issuing from list

It isn't necessary to call blk_mq_run_dispatch_ops() once for issuing
single request directly, and enough to do it one time when issuing from
whole list.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20211203131534.3668411-5-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2021-12-03 21:15:34 +08:00 committed by Jens Axboe
parent bcc330f42f
commit 4cafe86c92
2 changed files with 8 additions and 9 deletions

View File

@ -475,7 +475,8 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
* us one extra enqueue & dequeue to sw queue. * us one extra enqueue & dequeue to sw queue.
*/ */
if (!hctx->dispatch_busy && !run_queue_async) { if (!hctx->dispatch_busy && !run_queue_async) {
blk_mq_try_issue_list_directly(hctx, list); blk_mq_run_dispatch_ops(hctx->queue,
blk_mq_try_issue_list_directly(hctx, list));
if (list_empty(list)) if (list_empty(list))
goto out; goto out;
} }

View File

@ -2464,12 +2464,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{ {
blk_status_t ret; return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
blk_mq_run_dispatch_ops(rq->q,
ret = __blk_mq_try_issue_directly(hctx, rq, true, last));
return ret;
} }
static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
@ -2526,7 +2521,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
plug->rq_count = 0; plug->rq_count = 0;
if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
blk_mq_plug_issue_direct(plug, false); blk_mq_run_dispatch_ops(plug->mq_list->q,
blk_mq_plug_issue_direct(plug, false));
if (rq_list_empty(plug->mq_list)) if (rq_list_empty(plug->mq_list))
return; return;
} }
@ -2867,7 +2863,9 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for * bypass a potential scheduler on the bottom device for
* insert. * insert.
*/ */
return blk_mq_request_issue_directly(rq, true); blk_mq_run_dispatch_ops(rq->q,
ret = blk_mq_request_issue_directly(rq, true));
return ret;
} }
EXPORT_SYMBOL_GPL(blk_insert_cloned_request); EXPORT_SYMBOL_GPL(blk_insert_cloned_request);