diff --git a/block/blk-mq.c b/block/blk-mq.c index b5633501f181..65340847b51e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -628,6 +628,9 @@ static void blk_mq_rq_timer(unsigned long priv) }; int i; + if (blk_queue_enter(q, GFP_NOWAIT)) + return; + blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); if (data.next_set) { @@ -642,6 +645,7 @@ static void blk_mq_rq_timer(unsigned long priv) blk_mq_tag_idle(hctx); } } + blk_queue_exit(q); } /* diff --git a/block/blk-timeout.c b/block/blk-timeout.c index aa40aa93381b..2bc03df554a6 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -134,6 +134,8 @@ void blk_rq_timed_out_timer(unsigned long data) struct request *rq, *tmp; int next_set = 0; + if (blk_queue_enter(q, GFP_NOWAIT)) + return; spin_lock_irqsave(q->queue_lock, flags); list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) @@ -143,6 +145,7 @@ void blk_rq_timed_out_timer(unsigned long data) mod_timer(&q->timeout, round_jiffies_up(next)); spin_unlock_irqrestore(q->queue_lock, flags); + blk_queue_exit(q); } /**