nvme: Use BLK_MQ_S_STOPPED instead of QUEUE_FLAG_STOPPED in blk-mq code
Make nvme_requeue_req() check BLK_MQ_S_STOPPED instead of QUEUE_FLAG_STOPPED. Remove the QUEUE_FLAG_STOPPED manipulations that became superfluous because of this change. Change blk_queue_stopped() tests into blk_mq_queue_stopped(). This patch fixes a race condition: using queue_flag_clear_unlocked() is not safe if any other function that manipulates the queue flags can be called concurrently, e.g. blk_cleanup_queue(). Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
3174dd33fa
commit
a6eaa8849f
@ -201,13 +201,7 @@ fail:
|
|||||||
|
|
||||||
void nvme_requeue_req(struct request *req)
|
void nvme_requeue_req(struct request *req)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
|
||||||
|
|
||||||
blk_mq_requeue_request(req, false);
|
|
||||||
spin_lock_irqsave(req->q->queue_lock, flags);
|
|
||||||
if (!blk_queue_stopped(req->q))
|
|
||||||
blk_mq_kick_requeue_list(req->q);
|
|
||||||
spin_unlock_irqrestore(req->q->queue_lock, flags);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_requeue_req);
|
EXPORT_SYMBOL_GPL(nvme_requeue_req);
|
||||||
|
|
||||||
@ -2076,13 +2070,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
|
|||||||
struct nvme_ns *ns;
|
struct nvme_ns *ns;
|
||||||
|
|
||||||
mutex_lock(&ctrl->namespaces_mutex);
|
mutex_lock(&ctrl->namespaces_mutex);
|
||||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||||
spin_lock_irq(ns->queue->queue_lock);
|
|
||||||
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
|
|
||||||
spin_unlock_irq(ns->queue->queue_lock);
|
|
||||||
|
|
||||||
blk_mq_quiesce_queue(ns->queue);
|
blk_mq_quiesce_queue(ns->queue);
|
||||||
}
|
|
||||||
mutex_unlock(&ctrl->namespaces_mutex);
|
mutex_unlock(&ctrl->namespaces_mutex);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_stop_queues);
|
EXPORT_SYMBOL_GPL(nvme_stop_queues);
|
||||||
@ -2093,7 +2082,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
|
|||||||
|
|
||||||
mutex_lock(&ctrl->namespaces_mutex);
|
mutex_lock(&ctrl->namespaces_mutex);
|
||||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
||||||
queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
|
|
||||||
blk_mq_start_stopped_hw_queues(ns->queue, true);
|
blk_mq_start_stopped_hw_queues(ns->queue, true);
|
||||||
blk_mq_kick_requeue_list(ns->queue);
|
blk_mq_kick_requeue_list(ns->queue);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user