diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 4bc701b32ce2..0861f15ec0df 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -336,6 +336,18 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, struct blk_mq_hw_ctx *hctx; int i; + /* + * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and + * queue_hw_ctx after freeze the queue. So we could use q_usage_counter + * to avoid race with it. __blk_mq_update_nr_hw_queues will users + * synchronize_rcu to ensure all of the users go out of the critical + * section below and see zeroed q_usage_counter. + */ + rcu_read_lock(); + if (percpu_ref_is_zero(&q->q_usage_counter)) { + rcu_read_unlock(); + return; + } queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; @@ -351,7 +363,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); } - + rcu_read_unlock(); } static unsigned int bt_unused_tags(const struct sbitmap_queue *bt) diff --git a/block/blk-mq.c b/block/blk-mq.c index 24fc09cf7f17..58be2eaa5aaa 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2346,6 +2346,10 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue(q); + /* + * Sync with blk_mq_queue_tag_busy_iter. + */ + synchronize_rcu(); } EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);