blk-mq: pass request queue to blk_mq_run_dispatch_ops
We have switched to allocate srcu into request queue, so it is fine to pass request queue to blk_mq_run_dispatch_ops(). Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20211203131534.3668411-4-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
704b914f15
commit
bcc330f42f
@ -1925,7 +1925,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(in_interrupt());
|
WARN_ON_ONCE(in_interrupt());
|
||||||
|
|
||||||
blk_mq_run_dispatch_ops(hctx, blk_mq_sched_dispatch_requests(hctx));
|
blk_mq_run_dispatch_ops(hctx->queue,
|
||||||
|
blk_mq_sched_dispatch_requests(hctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
|
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
|
||||||
@ -2047,7 +2048,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
|||||||
* And queue will be rerun in blk_mq_unquiesce_queue() if it is
|
* And queue will be rerun in blk_mq_unquiesce_queue() if it is
|
||||||
* quiesced.
|
* quiesced.
|
||||||
*/
|
*/
|
||||||
blk_mq_run_dispatch_ops(hctx,
|
blk_mq_run_dispatch_ops(hctx->queue,
|
||||||
need_run = !blk_queue_quiesced(hctx->queue) &&
|
need_run = !blk_queue_quiesced(hctx->queue) &&
|
||||||
blk_mq_hctx_has_pending(hctx));
|
blk_mq_hctx_has_pending(hctx));
|
||||||
|
|
||||||
@ -2466,7 +2467,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
|
|||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
|
|
||||||
blk_mq_run_dispatch_ops(hctx,
|
blk_mq_run_dispatch_ops(rq->q,
|
||||||
ret = __blk_mq_try_issue_directly(hctx, rq, true, last));
|
ret = __blk_mq_try_issue_directly(hctx, rq, true, last));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2780,7 +2781,7 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||||||
(q->nr_hw_queues == 1 || !is_sync)))
|
(q->nr_hw_queues == 1 || !is_sync)))
|
||||||
blk_mq_sched_insert_request(rq, false, true, true);
|
blk_mq_sched_insert_request(rq, false, true, true);
|
||||||
else
|
else
|
||||||
blk_mq_run_dispatch_ops(rq->mq_hctx,
|
blk_mq_run_dispatch_ops(rq->q,
|
||||||
blk_mq_try_issue_directly(rq->mq_hctx, rq));
|
blk_mq_try_issue_directly(rq->mq_hctx, rq));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,9 +375,9 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* run the code block in @dispatch_ops with rcu/srcu read lock held */
|
/* run the code block in @dispatch_ops with rcu/srcu read lock held */
|
||||||
#define blk_mq_run_dispatch_ops(hctx, dispatch_ops) \
|
#define blk_mq_run_dispatch_ops(q, dispatch_ops) \
|
||||||
do { \
|
do { \
|
||||||
if (!((hctx)->flags & BLK_MQ_F_BLOCKING)) { \
|
if (!blk_queue_has_srcu(q)) { \
|
||||||
rcu_read_lock(); \
|
rcu_read_lock(); \
|
||||||
(dispatch_ops); \
|
(dispatch_ops); \
|
||||||
rcu_read_unlock(); \
|
rcu_read_unlock(); \
|
||||||
@ -385,9 +385,9 @@ do { \
|
|||||||
int srcu_idx; \
|
int srcu_idx; \
|
||||||
\
|
\
|
||||||
might_sleep(); \
|
might_sleep(); \
|
||||||
srcu_idx = srcu_read_lock((hctx)->queue->srcu); \
|
srcu_idx = srcu_read_lock((q)->srcu); \
|
||||||
(dispatch_ops); \
|
(dispatch_ops); \
|
||||||
srcu_read_unlock((hctx)->queue->srcu, srcu_idx); \
|
srcu_read_unlock((q)->srcu, srcu_idx); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user