blk-mq: pair blk_mq_start_request / blk_mq_requeue_request
Make sure we have a proper pairing between starting and requeueing requests. Move the dma drain and REQ_END setup into blk_mq_start_request, and make sure blk_mq_requeue_request properly undoes them, giving us a pair of function to prepare and unprepare a request without leaving side effects. Together this ensures we always clean up properly after BLK_MQ_RQ_QUEUE_BUSY returns from ->queue_rq. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
1e93b8c274
commit
49f5baa510
@ -377,7 +377,7 @@ void blk_mq_complete_request(struct request *rq)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_complete_request);
|
||||
|
||||
static void blk_mq_start_request(struct request *rq)
|
||||
static void blk_mq_start_request(struct request *rq, bool last)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
|
||||
@ -390,6 +390,25 @@ static void blk_mq_start_request(struct request *rq)
|
||||
*/
|
||||
rq->deadline = jiffies + q->rq_timeout;
|
||||
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
||||
|
||||
if (q->dma_drain_size && blk_rq_bytes(rq)) {
|
||||
/*
|
||||
* Make sure space for the drain appears. We know we can do
|
||||
* this because max_hw_segments has been adjusted to be one
|
||||
* fewer than the device can handle.
|
||||
*/
|
||||
rq->nr_phys_segments++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flag the last request in the series so that drivers know when IO
|
||||
* should be kicked off, if they don't do it on a per-request basis.
|
||||
*
|
||||
* Note: the flag isn't the only condition drivers should do kick off.
|
||||
* If drive is busy, the last request might not have the bit set.
|
||||
*/
|
||||
if (last)
|
||||
rq->cmd_flags |= REQ_END;
|
||||
}
|
||||
|
||||
static void blk_mq_requeue_request(struct request *rq)
|
||||
@ -398,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq)
|
||||
|
||||
trace_block_rq_requeue(q, rq);
|
||||
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
||||
|
||||
rq->cmd_flags &= ~REQ_END;
|
||||
|
||||
if (q->dma_drain_size && blk_rq_bytes(rq))
|
||||
rq->nr_phys_segments--;
|
||||
}
|
||||
|
||||
struct blk_mq_timeout_data {
|
||||
@ -565,29 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||
|
||||
rq = list_first_entry(&rq_list, struct request, queuelist);
|
||||
list_del_init(&rq->queuelist);
|
||||
blk_mq_start_request(rq);
|
||||
|
||||
if (q->dma_drain_size && blk_rq_bytes(rq)) {
|
||||
/*
|
||||
* make sure space for the drain appears we
|
||||
* know we can do this because max_hw_segments
|
||||
* has been adjusted to be one fewer than the
|
||||
* device can handle
|
||||
*/
|
||||
rq->nr_phys_segments++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Last request in the series. Flag it as such, this
|
||||
* enables drivers to know when IO should be kicked off,
|
||||
* if they don't do it on a per-request basis.
|
||||
*
|
||||
* Note: the flag isn't the only condition drivers
|
||||
* should do kick off. If drive is busy, the last
|
||||
* request might not have the bit set.
|
||||
*/
|
||||
if (list_empty(&rq_list))
|
||||
rq->cmd_flags |= REQ_END;
|
||||
blk_mq_start_request(rq, list_empty(&rq_list));
|
||||
|
||||
ret = q->mq_ops->queue_rq(hctx, rq);
|
||||
switch (ret) {
|
||||
|
Loading…
Reference in New Issue
Block a user