block, blk-mq: draining can't be skipped even if bypass_depth was non-zero
Currently, both blk_queue_bypass_start() and blk_mq_freeze_queue() skip queue draining if bypass_depth was already above zero. The assumption is that the one which bumped the bypass_depth should have performed draining already; however, there's nothing which prevents a new instance of bypassing/freezing from starting before the previous one finishes draining. The current code may allow the later bypassing/freezing instances to complete while there still are in-flight requests which haven't finished draining. Fix it by draining regardless of bypass_depth. We still skip draining from blk_queue_bypass_start() while the queue is initializing to avoid introducing excessive delays during boot. INIT_DONE setting is moved above the initial blk_queue_bypass_end() so that bypassing attempts can't slip inbetween. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Nicholas A. Bellinger <nab@linux-iscsi.org> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
531ed6261e
commit
776687bce4
@ -438,14 +438,17 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
|
|||||||
*/
|
*/
|
||||||
void blk_queue_bypass_start(struct request_queue *q)
|
void blk_queue_bypass_start(struct request_queue *q)
|
||||||
{
|
{
|
||||||
bool drain;
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
drain = !q->bypass_depth++;
|
q->bypass_depth++;
|
||||||
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
if (drain) {
|
/*
|
||||||
|
* Queues start drained. Skip actual draining till init is
|
||||||
|
* complete. This avoids lenghty delays during queue init which
|
||||||
|
* can happen many times during boot.
|
||||||
|
*/
|
||||||
|
if (blk_queue_init_done(q)) {
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
__blk_drain_queue(q, false);
|
__blk_drain_queue(q, false);
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
@ -131,15 +131,12 @@ void blk_mq_drain_queue(struct request_queue *q)
|
|||||||
*/
|
*/
|
||||||
static void blk_mq_freeze_queue(struct request_queue *q)
|
static void blk_mq_freeze_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
bool drain;
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
drain = !q->bypass_depth++;
|
q->bypass_depth++;
|
||||||
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
if (drain)
|
blk_mq_drain_queue(q);
|
||||||
blk_mq_drain_queue(q);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_unfreeze_queue(struct request_queue *q)
|
static void blk_mq_unfreeze_queue(struct request_queue *q)
|
||||||
|
@ -554,8 +554,8 @@ int blk_register_queue(struct gendisk *disk)
|
|||||||
* Initialization must be complete by now. Finish the initial
|
* Initialization must be complete by now. Finish the initial
|
||||||
* bypass from queue allocation.
|
* bypass from queue allocation.
|
||||||
*/
|
*/
|
||||||
blk_queue_bypass_end(q);
|
|
||||||
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
|
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
|
||||||
|
blk_queue_bypass_end(q);
|
||||||
|
|
||||||
ret = blk_trace_init_sysfs(dev);
|
ret = blk_trace_init_sysfs(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user