blk-mq: pass hctx to blk_mq_dispatch_rq_list
All requests in the 'list' of blk_mq_dispatch_rq_list belong to same hctx, so it is better to pass hctx instead of request queue, because blk-mq's dispatch target is hctx instead of request queue. Signed-off-by: Ming Lei <ming.lei@redhat.com> Tested-by: Baolin Wang <baolin.wang7@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Cc: Sagi Grimberg <sagi@grimberg.me> Cc: Baolin Wang <baolin.wang7@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@ -96,10 +96,9 @@ static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
|
|||||||
struct elevator_queue *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
LIST_HEAD(rq_list);
|
LIST_HEAD(rq_list);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct request *rq;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct request *rq;
|
|
||||||
|
|
||||||
if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
|
if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -131,7 +130,7 @@ static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
|
|||||||
* in blk_mq_dispatch_rq_list().
|
* in blk_mq_dispatch_rq_list().
|
||||||
*/
|
*/
|
||||||
list_add(&rq->queuelist, &rq_list);
|
list_add(&rq->queuelist, &rq_list);
|
||||||
} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
|
} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true));
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -161,10 +160,9 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
|
|||||||
LIST_HEAD(rq_list);
|
LIST_HEAD(rq_list);
|
||||||
struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
|
struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct request *rq;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct request *rq;
|
|
||||||
|
|
||||||
if (!list_empty_careful(&hctx->dispatch)) {
|
if (!list_empty_careful(&hctx->dispatch)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
break;
|
break;
|
||||||
@ -200,7 +198,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
|
|||||||
/* round robin for fair dispatch */
|
/* round robin for fair dispatch */
|
||||||
ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
|
ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
|
||||||
|
|
||||||
} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
|
} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true));
|
||||||
|
|
||||||
WRITE_ONCE(hctx->dispatch_from, ctx);
|
WRITE_ONCE(hctx->dispatch_from, ctx);
|
||||||
return ret;
|
return ret;
|
||||||
@ -240,7 +238,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
|||||||
*/
|
*/
|
||||||
if (!list_empty(&rq_list)) {
|
if (!list_empty(&rq_list)) {
|
||||||
blk_mq_sched_mark_restart_hctx(hctx);
|
blk_mq_sched_mark_restart_hctx(hctx);
|
||||||
if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
|
if (blk_mq_dispatch_rq_list(hctx, &rq_list, false)) {
|
||||||
if (has_sched_dispatch)
|
if (has_sched_dispatch)
|
||||||
ret = blk_mq_do_dispatch_sched(hctx);
|
ret = blk_mq_do_dispatch_sched(hctx);
|
||||||
else
|
else
|
||||||
@ -253,7 +251,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
|||||||
ret = blk_mq_do_dispatch_ctx(hctx);
|
ret = blk_mq_do_dispatch_ctx(hctx);
|
||||||
} else {
|
} else {
|
||||||
blk_mq_flush_busy_ctxs(hctx, &rq_list);
|
blk_mq_flush_busy_ctxs(hctx, &rq_list);
|
||||||
blk_mq_dispatch_rq_list(q, &rq_list, false);
|
blk_mq_dispatch_rq_list(hctx, &rq_list, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1258,10 +1258,10 @@ static void blk_mq_handle_zone_resource(struct request *rq,
|
|||||||
/*
|
/*
|
||||||
* Returns true if we did some work AND can potentially do more.
|
* Returns true if we did some work AND can potentially do more.
|
||||||
*/
|
*/
|
||||||
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||||
bool got_budget)
|
bool got_budget)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct request_queue *q = hctx->queue;
|
||||||
struct request *rq, *nxt;
|
struct request *rq, *nxt;
|
||||||
bool no_tag = false;
|
bool no_tag = false;
|
||||||
int errors, queued;
|
int errors, queued;
|
||||||
@ -1283,7 +1283,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
|
|||||||
|
|
||||||
rq = list_first_entry(list, struct request, queuelist);
|
rq = list_first_entry(list, struct request, queuelist);
|
||||||
|
|
||||||
hctx = rq->mq_hctx;
|
WARN_ON_ONCE(hctx != rq->mq_hctx);
|
||||||
if (!got_budget && !blk_mq_get_dispatch_budget(q)) {
|
if (!got_budget && !blk_mq_get_dispatch_budget(q)) {
|
||||||
blk_mq_put_driver_tag(rq);
|
blk_mq_put_driver_tag(rq);
|
||||||
no_budget_avail = true;
|
no_budget_avail = true;
|
||||||
|
@ -40,7 +40,7 @@ struct blk_mq_ctx {
|
|||||||
void blk_mq_exit_queue(struct request_queue *q);
|
void blk_mq_exit_queue(struct request_queue *q);
|
||||||
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
|
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
|
||||||
void blk_mq_wake_waiters(struct request_queue *q);
|
void blk_mq_wake_waiters(struct request_queue *q);
|
||||||
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
|
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, bool);
|
||||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||||
bool kick_requeue_list);
|
bool kick_requeue_list);
|
||||||
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
||||||
|
Reference in New Issue
Block a user