blk-mq: switch ->queue_rq return value to blk_status_t
Use the same values for use for request completion errors as the return value from ->queue_rq. BLK_STS_RESOURCE is special cased to cause a requeue, and all the others are completed as-is. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
2a842acab1
commit
fc17b6534e
@ -924,7 +924,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
|||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
|
int errors, queued;
|
||||||
|
|
||||||
if (list_empty(list))
|
if (list_empty(list))
|
||||||
return false;
|
return false;
|
||||||
@ -935,6 +935,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
|||||||
errors = queued = 0;
|
errors = queued = 0;
|
||||||
do {
|
do {
|
||||||
struct blk_mq_queue_data bd;
|
struct blk_mq_queue_data bd;
|
||||||
|
blk_status_t ret;
|
||||||
|
|
||||||
rq = list_first_entry(list, struct request, queuelist);
|
rq = list_first_entry(list, struct request, queuelist);
|
||||||
if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
|
if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
|
||||||
@ -975,25 +976,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = q->mq_ops->queue_rq(hctx, &bd);
|
ret = q->mq_ops->queue_rq(hctx, &bd);
|
||||||
switch (ret) {
|
if (ret == BLK_STS_RESOURCE) {
|
||||||
case BLK_MQ_RQ_QUEUE_OK:
|
|
||||||
queued++;
|
|
||||||
break;
|
|
||||||
case BLK_MQ_RQ_QUEUE_BUSY:
|
|
||||||
blk_mq_put_driver_tag_hctx(hctx, rq);
|
blk_mq_put_driver_tag_hctx(hctx, rq);
|
||||||
list_add(&rq->queuelist, list);
|
list_add(&rq->queuelist, list);
|
||||||
__blk_mq_requeue_request(rq);
|
__blk_mq_requeue_request(rq);
|
||||||
break;
|
break;
|
||||||
default:
|
|
||||||
pr_err("blk-mq: bad return on queue: %d\n", ret);
|
|
||||||
case BLK_MQ_RQ_QUEUE_ERROR:
|
|
||||||
errors++;
|
|
||||||
blk_mq_end_request(rq, BLK_STS_IOERR);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == BLK_MQ_RQ_QUEUE_BUSY)
|
if (unlikely(ret != BLK_STS_OK)) {
|
||||||
break;
|
errors++;
|
||||||
|
blk_mq_end_request(rq, BLK_STS_IOERR);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
queued++;
|
||||||
} while (!list_empty(list));
|
} while (!list_empty(list));
|
||||||
|
|
||||||
hctx->dispatched[queued_to_index(queued)]++;
|
hctx->dispatched[queued_to_index(queued)]++;
|
||||||
@ -1031,7 +1027,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
|
|||||||
* - blk_mq_run_hw_queue() checks whether or not a queue has
|
* - blk_mq_run_hw_queue() checks whether or not a queue has
|
||||||
* been stopped before rerunning a queue.
|
* been stopped before rerunning a queue.
|
||||||
* - Some but not all block drivers stop a queue before
|
* - Some but not all block drivers stop a queue before
|
||||||
* returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq
|
* returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
|
||||||
* and dm-rq.
|
* and dm-rq.
|
||||||
*/
|
*/
|
||||||
if (!blk_mq_sched_needs_restart(hctx) &&
|
if (!blk_mq_sched_needs_restart(hctx) &&
|
||||||
@ -1410,7 +1406,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
|
|||||||
};
|
};
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
blk_qc_t new_cookie;
|
blk_qc_t new_cookie;
|
||||||
int ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
if (q->elevator)
|
if (q->elevator)
|
||||||
goto insert;
|
goto insert;
|
||||||
@ -1426,18 +1422,19 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
|
|||||||
* would have done
|
* would have done
|
||||||
*/
|
*/
|
||||||
ret = q->mq_ops->queue_rq(hctx, &bd);
|
ret = q->mq_ops->queue_rq(hctx, &bd);
|
||||||
if (ret == BLK_MQ_RQ_QUEUE_OK) {
|
switch (ret) {
|
||||||
|
case BLK_STS_OK:
|
||||||
*cookie = new_cookie;
|
*cookie = new_cookie;
|
||||||
return;
|
return;
|
||||||
}
|
case BLK_STS_RESOURCE:
|
||||||
|
__blk_mq_requeue_request(rq);
|
||||||
if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
|
goto insert;
|
||||||
|
default:
|
||||||
*cookie = BLK_QC_T_NONE;
|
*cookie = BLK_QC_T_NONE;
|
||||||
blk_mq_end_request(rq, BLK_STS_IOERR);
|
blk_mq_end_request(rq, ret);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
__blk_mq_requeue_request(rq);
|
|
||||||
insert:
|
insert:
|
||||||
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
|
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
|
||||||
}
|
}
|
||||||
|
@ -1674,7 +1674,7 @@ int loop_unregister_transfer(int number)
|
|||||||
EXPORT_SYMBOL(loop_register_transfer);
|
EXPORT_SYMBOL(loop_register_transfer);
|
||||||
EXPORT_SYMBOL(loop_unregister_transfer);
|
EXPORT_SYMBOL(loop_unregister_transfer);
|
||||||
|
|
||||||
static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
||||||
@ -1683,7 +1683,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
blk_mq_start_request(bd->rq);
|
blk_mq_start_request(bd->rq);
|
||||||
|
|
||||||
if (lo->lo_state != Lo_bound)
|
if (lo->lo_state != Lo_bound)
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
switch (req_op(cmd->rq)) {
|
switch (req_op(cmd->rq)) {
|
||||||
case REQ_OP_FLUSH:
|
case REQ_OP_FLUSH:
|
||||||
@ -1698,7 +1698,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
kthread_queue_work(&lo->worker, &cmd->work);
|
kthread_queue_work(&lo->worker, &cmd->work);
|
||||||
|
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void loop_handle_cmd(struct loop_cmd *cmd)
|
static void loop_handle_cmd(struct loop_cmd *cmd)
|
||||||
|
@ -3633,8 +3633,8 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
|
||||||
struct request *rq)
|
struct request *rq)
|
||||||
{
|
{
|
||||||
struct driver_data *dd = hctx->queue->queuedata;
|
struct driver_data *dd = hctx->queue->queuedata;
|
||||||
struct mtip_int_cmd *icmd = rq->special;
|
struct mtip_int_cmd *icmd = rq->special;
|
||||||
@ -3642,7 +3642,7 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
|
|||||||
struct mtip_cmd_sg *command_sg;
|
struct mtip_cmd_sg *command_sg;
|
||||||
|
|
||||||
if (mtip_commands_active(dd->port))
|
if (mtip_commands_active(dd->port))
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
|
|
||||||
/* Populate the SG list */
|
/* Populate the SG list */
|
||||||
cmd->command_header->opts =
|
cmd->command_header->opts =
|
||||||
@ -3666,10 +3666,10 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
blk_mq_start_request(rq);
|
blk_mq_start_request(rq);
|
||||||
mtip_issue_non_ncq_command(dd->port, rq->tag);
|
mtip_issue_non_ncq_command(dd->port, rq->tag);
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct request *rq = bd->rq;
|
struct request *rq = bd->rq;
|
||||||
@ -3681,15 +3681,14 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
return mtip_issue_reserved_cmd(hctx, rq);
|
return mtip_issue_reserved_cmd(hctx, rq);
|
||||||
|
|
||||||
if (unlikely(mtip_check_unal_depth(hctx, rq)))
|
if (unlikely(mtip_check_unal_depth(hctx, rq)))
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
|
|
||||||
blk_mq_start_request(rq);
|
blk_mq_start_request(rq);
|
||||||
|
|
||||||
ret = mtip_submit_request(hctx, rq);
|
ret = mtip_submit_request(hctx, rq);
|
||||||
if (likely(!ret))
|
if (likely(!ret))
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
|
return BLK_STS_IOERR;
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
|
static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
|
||||||
|
@ -469,7 +469,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
|||||||
nsock->pending = req;
|
nsock->pending = req;
|
||||||
nsock->sent = sent;
|
nsock->sent = sent;
|
||||||
}
|
}
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
}
|
}
|
||||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||||
"Send control failed (result %d)\n", result);
|
"Send control failed (result %d)\n", result);
|
||||||
@ -510,7 +510,7 @@ send_pages:
|
|||||||
*/
|
*/
|
||||||
nsock->pending = req;
|
nsock->pending = req;
|
||||||
nsock->sent = sent;
|
nsock->sent = sent;
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
}
|
}
|
||||||
dev_err(disk_to_dev(nbd->disk),
|
dev_err(disk_to_dev(nbd->disk),
|
||||||
"Send data failed (result %d)\n",
|
"Send data failed (result %d)\n",
|
||||||
@ -798,7 +798,7 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
||||||
@ -822,13 +822,9 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
* appropriate.
|
* appropriate.
|
||||||
*/
|
*/
|
||||||
ret = nbd_handle_cmd(cmd, hctx->queue_num);
|
ret = nbd_handle_cmd(cmd, hctx->queue_num);
|
||||||
if (ret < 0)
|
|
||||||
ret = BLK_MQ_RQ_QUEUE_ERROR;
|
|
||||||
if (!ret)
|
|
||||||
ret = BLK_MQ_RQ_QUEUE_OK;
|
|
||||||
complete(&cmd->send_complete);
|
complete(&cmd->send_complete);
|
||||||
|
|
||||||
return ret;
|
return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
|
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
|
||||||
|
@ -356,7 +356,7 @@ static void null_request_fn(struct request_queue *q)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
||||||
@ -373,7 +373,7 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
blk_mq_start_request(bd->rq);
|
blk_mq_start_request(bd->rq);
|
||||||
|
|
||||||
null_handle_cmd(cmd);
|
null_handle_cmd(cmd);
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
|
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
|
||||||
|
@ -4154,14 +4154,14 @@ err:
|
|||||||
blk_mq_end_request(rq, errno_to_blk_status(result));
|
blk_mq_end_request(rq, errno_to_blk_status(result));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct request *rq = bd->rq;
|
struct request *rq = bd->rq;
|
||||||
struct work_struct *work = blk_mq_rq_to_pdu(rq);
|
struct work_struct *work = blk_mq_rq_to_pdu(rq);
|
||||||
|
|
||||||
queue_work(rbd_wq, work);
|
queue_work(rbd_wq, work);
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rbd_free_disk(struct rbd_device *rbd_dev)
|
static void rbd_free_disk(struct rbd_device *rbd_dev)
|
||||||
|
@ -214,7 +214,7 @@ static void virtblk_done(struct virtqueue *vq)
|
|||||||
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
|
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct virtio_blk *vblk = hctx->queue->queuedata;
|
struct virtio_blk *vblk = hctx->queue->queuedata;
|
||||||
@ -246,7 +246,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
|
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
|
||||||
@ -276,8 +276,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
/* Out of mem doesn't actually happen, since we fall back
|
/* Out of mem doesn't actually happen, since we fall back
|
||||||
* to direct descriptors */
|
* to direct descriptors */
|
||||||
if (err == -ENOMEM || err == -ENOSPC)
|
if (err == -ENOMEM || err == -ENOSPC)
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
|
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
|
||||||
@ -286,7 +286,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
if (notify)
|
if (notify)
|
||||||
virtqueue_notify(vblk->vqs[qid].vq);
|
virtqueue_notify(vblk->vqs[qid].vq);
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* return id (s/n) string for *disk to *id_str
|
/* return id (s/n) string for *disk to *id_str
|
||||||
|
@ -881,7 +881,7 @@ static inline bool blkif_request_flush_invalid(struct request *req,
|
|||||||
!info->feature_fua));
|
!info->feature_fua));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *qd)
|
const struct blk_mq_queue_data *qd)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -904,16 +904,16 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
flush_requests(rinfo);
|
flush_requests(rinfo);
|
||||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
|
|
||||||
out_err:
|
out_err:
|
||||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
out_busy:
|
out_busy:
|
||||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||||
blk_mq_stop_hw_queue(hctx);
|
blk_mq_stop_hw_queue(hctx);
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blkif_complete_rq(struct request *rq)
|
static void blkif_complete_rq(struct request *rq)
|
||||||
|
@ -727,7 +727,7 @@ static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
|||||||
return __dm_rq_init_rq(set->driver_data, rq);
|
return __dm_rq_init_rq(set->driver_data, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct request *rq = bd->rq;
|
struct request *rq = bd->rq;
|
||||||
@ -744,7 +744,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ti->type->busy && ti->type->busy(ti))
|
if (ti->type->busy && ti->type->busy(ti))
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
|
|
||||||
dm_start_request(md, rq);
|
dm_start_request(md, rq);
|
||||||
|
|
||||||
@ -762,10 +762,10 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
rq_end_stats(md, rq);
|
rq_end_stats(md, rq);
|
||||||
rq_completed(md, rq_data_dir(rq), false);
|
rq_completed(md, rq_data_dir(rq), false);
|
||||||
blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
|
blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct blk_mq_ops dm_mq_ops = {
|
static const struct blk_mq_ops dm_mq_ops = {
|
||||||
|
@ -316,7 +316,7 @@ static void ubiblock_do_work(struct work_struct *work)
|
|||||||
blk_mq_end_request(req, errno_to_blk_status(ret));
|
blk_mq_end_request(req, errno_to_blk_status(ret));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct request *req = bd->rq;
|
struct request *req = bd->rq;
|
||||||
@ -327,9 +327,9 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
case REQ_OP_READ:
|
case REQ_OP_READ:
|
||||||
ubi_sgl_init(&pdu->usgl);
|
ubi_sgl_init(&pdu->usgl);
|
||||||
queue_work(dev->wq, &pdu->work);
|
queue_work(dev->wq, &pdu->work);
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
default:
|
default:
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -283,7 +283,7 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
|
|||||||
cmnd->common.nsid = cpu_to_le32(ns->ns_id);
|
cmnd->common.nsid = cpu_to_le32(ns->ns_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
||||||
struct nvme_command *cmnd)
|
struct nvme_command *cmnd)
|
||||||
{
|
{
|
||||||
unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
|
unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
|
||||||
@ -292,7 +292,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
|||||||
|
|
||||||
range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
|
range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
|
||||||
if (!range)
|
if (!range)
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
|
|
||||||
__rq_for_each_bio(bio, req) {
|
__rq_for_each_bio(bio, req) {
|
||||||
u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
|
u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
|
||||||
@ -306,7 +306,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
|||||||
|
|
||||||
if (WARN_ON_ONCE(n != segments)) {
|
if (WARN_ON_ONCE(n != segments)) {
|
||||||
kfree(range);
|
kfree(range);
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(cmnd, 0, sizeof(*cmnd));
|
memset(cmnd, 0, sizeof(*cmnd));
|
||||||
@ -320,7 +320,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
|||||||
req->special_vec.bv_len = sizeof(*range) * segments;
|
req->special_vec.bv_len = sizeof(*range) * segments;
|
||||||
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
|
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
|
||||||
|
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
|
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
|
||||||
@ -364,10 +364,10 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
|
|||||||
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
|
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
||||||
struct nvme_command *cmd)
|
struct nvme_command *cmd)
|
||||||
{
|
{
|
||||||
int ret = BLK_MQ_RQ_QUEUE_OK;
|
blk_status_t ret = BLK_STS_OK;
|
||||||
|
|
||||||
if (!(req->rq_flags & RQF_DONTPREP)) {
|
if (!(req->rq_flags & RQF_DONTPREP)) {
|
||||||
nvme_req(req)->retries = 0;
|
nvme_req(req)->retries = 0;
|
||||||
@ -394,7 +394,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd->common.command_id = req->tag;
|
cmd->common.command_id = req->tag;
|
||||||
|
@ -1873,7 +1873,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
|
|||||||
* level FC exchange resource that is also outstanding. This must be
|
* level FC exchange resource that is also outstanding. This must be
|
||||||
* considered in all cleanup operations.
|
* considered in all cleanup operations.
|
||||||
*/
|
*/
|
||||||
static int
|
static blk_status_t
|
||||||
nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
|
nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
|
||||||
struct nvme_fc_fcp_op *op, u32 data_len,
|
struct nvme_fc_fcp_op *op, u32 data_len,
|
||||||
enum nvmefc_fcp_datadir io_dir)
|
enum nvmefc_fcp_datadir io_dir)
|
||||||
@ -1888,10 +1888,10 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
|
|||||||
* the target device is present
|
* the target device is present
|
||||||
*/
|
*/
|
||||||
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
|
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
if (!nvme_fc_ctrl_get(ctrl))
|
if (!nvme_fc_ctrl_get(ctrl))
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
/* format the FC-NVME CMD IU and fcp_req */
|
/* format the FC-NVME CMD IU and fcp_req */
|
||||||
cmdiu->connection_id = cpu_to_be64(queue->connection_id);
|
cmdiu->connection_id = cpu_to_be64(queue->connection_id);
|
||||||
@ -1939,8 +1939,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
|
|||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
nvme_cleanup_cmd(op->rq);
|
nvme_cleanup_cmd(op->rq);
|
||||||
nvme_fc_ctrl_put(ctrl);
|
nvme_fc_ctrl_put(ctrl);
|
||||||
return (ret == -ENOMEM || ret == -EAGAIN) ?
|
if (ret == -ENOMEM || ret == -EAGAIN)
|
||||||
BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_RESOURCE;
|
||||||
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1966,19 +1967,19 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
|
|||||||
nvme_fc_ctrl_put(ctrl);
|
nvme_fc_ctrl_put(ctrl);
|
||||||
|
|
||||||
if (ret != -EBUSY)
|
if (ret != -EBUSY)
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
if (op->rq) {
|
if (op->rq) {
|
||||||
blk_mq_stop_hw_queues(op->rq->q);
|
blk_mq_stop_hw_queues(op->rq->q);
|
||||||
blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
|
blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
|
||||||
}
|
}
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static blk_status_t
|
||||||
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
|
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
@ -1991,7 +1992,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
struct nvme_command *sqe = &cmdiu->sqe;
|
struct nvme_command *sqe = &cmdiu->sqe;
|
||||||
enum nvmefc_fcp_datadir io_dir;
|
enum nvmefc_fcp_datadir io_dir;
|
||||||
u32 data_len;
|
u32 data_len;
|
||||||
int ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
ret = nvme_setup_cmd(ns, rq, sqe);
|
ret = nvme_setup_cmd(ns, rq, sqe);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -2046,7 +2047,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
|||||||
struct nvme_fc_fcp_op *aen_op;
|
struct nvme_fc_fcp_op *aen_op;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool terminating = false;
|
bool terminating = false;
|
||||||
int ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
|
if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
|
||||||
return;
|
return;
|
||||||
|
@ -296,7 +296,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
|
|||||||
#define NVME_QID_ANY -1
|
#define NVME_QID_ANY -1
|
||||||
struct request *nvme_alloc_request(struct request_queue *q,
|
struct request *nvme_alloc_request(struct request_queue *q,
|
||||||
struct nvme_command *cmd, unsigned int flags, int qid);
|
struct nvme_command *cmd, unsigned int flags, int qid);
|
||||||
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
||||||
struct nvme_command *cmd);
|
struct nvme_command *cmd);
|
||||||
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
||||||
void *buf, unsigned bufflen);
|
void *buf, unsigned bufflen);
|
||||||
|
@ -427,7 +427,7 @@ static __le64 **iod_list(struct request *req)
|
|||||||
return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
|
return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
|
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
|
||||||
{
|
{
|
||||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
|
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
|
||||||
int nseg = blk_rq_nr_phys_segments(rq);
|
int nseg = blk_rq_nr_phys_segments(rq);
|
||||||
@ -436,7 +436,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
|
|||||||
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
|
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
|
||||||
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
|
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
|
||||||
if (!iod->sg)
|
if (!iod->sg)
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
} else {
|
} else {
|
||||||
iod->sg = iod->inline_sg;
|
iod->sg = iod->inline_sg;
|
||||||
}
|
}
|
||||||
@ -446,7 +446,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
|
|||||||
iod->nents = 0;
|
iod->nents = 0;
|
||||||
iod->length = size;
|
iod->length = size;
|
||||||
|
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
|
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
|
||||||
@ -616,21 +616,21 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
|
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
||||||
struct nvme_command *cmnd)
|
struct nvme_command *cmnd)
|
||||||
{
|
{
|
||||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||||
struct request_queue *q = req->q;
|
struct request_queue *q = req->q;
|
||||||
enum dma_data_direction dma_dir = rq_data_dir(req) ?
|
enum dma_data_direction dma_dir = rq_data_dir(req) ?
|
||||||
DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||||
int ret = BLK_MQ_RQ_QUEUE_ERROR;
|
blk_status_t ret = BLK_STS_IOERR;
|
||||||
|
|
||||||
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
|
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
|
||||||
iod->nents = blk_rq_map_sg(q, req, iod->sg);
|
iod->nents = blk_rq_map_sg(q, req, iod->sg);
|
||||||
if (!iod->nents)
|
if (!iod->nents)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = BLK_MQ_RQ_QUEUE_BUSY;
|
ret = BLK_STS_RESOURCE;
|
||||||
if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
|
if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
|
||||||
DMA_ATTR_NO_WARN))
|
DMA_ATTR_NO_WARN))
|
||||||
goto out;
|
goto out;
|
||||||
@ -638,7 +638,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|||||||
if (!nvme_setup_prps(dev, req))
|
if (!nvme_setup_prps(dev, req))
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
|
|
||||||
ret = BLK_MQ_RQ_QUEUE_ERROR;
|
ret = BLK_STS_IOERR;
|
||||||
if (blk_integrity_rq(req)) {
|
if (blk_integrity_rq(req)) {
|
||||||
if (blk_rq_count_integrity_sg(q, req->bio) != 1)
|
if (blk_rq_count_integrity_sg(q, req->bio) != 1)
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
@ -658,7 +658,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|||||||
cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
|
cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
|
||||||
if (blk_integrity_rq(req))
|
if (blk_integrity_rq(req))
|
||||||
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
|
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
|
|
||||||
out_unmap:
|
out_unmap:
|
||||||
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
|
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
|
||||||
@ -688,7 +688,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
|
|||||||
/*
|
/*
|
||||||
* NOTE: ns is NULL when called on the admin queue.
|
* NOTE: ns is NULL when called on the admin queue.
|
||||||
*/
|
*/
|
||||||
static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns = hctx->queue->queuedata;
|
struct nvme_ns *ns = hctx->queue->queuedata;
|
||||||
@ -696,7 +696,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
struct nvme_dev *dev = nvmeq->dev;
|
struct nvme_dev *dev = nvmeq->dev;
|
||||||
struct request *req = bd->rq;
|
struct request *req = bd->rq;
|
||||||
struct nvme_command cmnd;
|
struct nvme_command cmnd;
|
||||||
int ret = BLK_MQ_RQ_QUEUE_OK;
|
blk_status_t ret = BLK_STS_OK;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If formated with metadata, require the block layer provide a buffer
|
* If formated with metadata, require the block layer provide a buffer
|
||||||
@ -705,38 +705,36 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
*/
|
*/
|
||||||
if (ns && ns->ms && !blk_integrity_rq(req)) {
|
if (ns && ns->ms && !blk_integrity_rq(req)) {
|
||||||
if (!(ns->pi_type && ns->ms == 8) &&
|
if (!(ns->pi_type && ns->ms == 8) &&
|
||||||
!blk_rq_is_passthrough(req)) {
|
!blk_rq_is_passthrough(req))
|
||||||
blk_mq_end_request(req, BLK_STS_NOTSUPP);
|
return BLK_STS_NOTSUPP;
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nvme_setup_cmd(ns, req, &cmnd);
|
ret = nvme_setup_cmd(ns, req, &cmnd);
|
||||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = nvme_init_iod(req, dev);
|
ret = nvme_init_iod(req, dev);
|
||||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
if (ret)
|
||||||
goto out_free_cmd;
|
goto out_free_cmd;
|
||||||
|
|
||||||
if (blk_rq_nr_phys_segments(req))
|
if (blk_rq_nr_phys_segments(req)) {
|
||||||
ret = nvme_map_data(dev, req, &cmnd);
|
ret = nvme_map_data(dev, req, &cmnd);
|
||||||
|
if (ret)
|
||||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
goto out_cleanup_iod;
|
||||||
goto out_cleanup_iod;
|
}
|
||||||
|
|
||||||
blk_mq_start_request(req);
|
blk_mq_start_request(req);
|
||||||
|
|
||||||
spin_lock_irq(&nvmeq->q_lock);
|
spin_lock_irq(&nvmeq->q_lock);
|
||||||
if (unlikely(nvmeq->cq_vector < 0)) {
|
if (unlikely(nvmeq->cq_vector < 0)) {
|
||||||
ret = BLK_MQ_RQ_QUEUE_ERROR;
|
ret = BLK_STS_IOERR;
|
||||||
spin_unlock_irq(&nvmeq->q_lock);
|
spin_unlock_irq(&nvmeq->q_lock);
|
||||||
goto out_cleanup_iod;
|
goto out_cleanup_iod;
|
||||||
}
|
}
|
||||||
__nvme_submit_cmd(nvmeq, &cmnd);
|
__nvme_submit_cmd(nvmeq, &cmnd);
|
||||||
nvme_process_cq(nvmeq);
|
nvme_process_cq(nvmeq);
|
||||||
spin_unlock_irq(&nvmeq->q_lock);
|
spin_unlock_irq(&nvmeq->q_lock);
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
out_cleanup_iod:
|
out_cleanup_iod:
|
||||||
nvme_free_iod(dev, req);
|
nvme_free_iod(dev, req);
|
||||||
out_free_cmd:
|
out_free_cmd:
|
||||||
|
@ -1448,7 +1448,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns = hctx->queue->queuedata;
|
struct nvme_ns *ns = hctx->queue->queuedata;
|
||||||
@ -1459,27 +1459,28 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
struct nvme_command *c = sqe->data;
|
struct nvme_command *c = sqe->data;
|
||||||
bool flush = false;
|
bool flush = false;
|
||||||
struct ib_device *dev;
|
struct ib_device *dev;
|
||||||
int ret;
|
blk_status_t ret;
|
||||||
|
int err;
|
||||||
|
|
||||||
WARN_ON_ONCE(rq->tag < 0);
|
WARN_ON_ONCE(rq->tag < 0);
|
||||||
|
|
||||||
if (!nvme_rdma_queue_is_ready(queue, rq))
|
if (!nvme_rdma_queue_is_ready(queue, rq))
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
|
|
||||||
dev = queue->device->dev;
|
dev = queue->device->dev;
|
||||||
ib_dma_sync_single_for_cpu(dev, sqe->dma,
|
ib_dma_sync_single_for_cpu(dev, sqe->dma,
|
||||||
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
||||||
|
|
||||||
ret = nvme_setup_cmd(ns, rq, c);
|
ret = nvme_setup_cmd(ns, rq, c);
|
||||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
blk_mq_start_request(rq);
|
blk_mq_start_request(rq);
|
||||||
|
|
||||||
ret = nvme_rdma_map_data(queue, rq, c);
|
err = nvme_rdma_map_data(queue, rq, c);
|
||||||
if (ret < 0) {
|
if (err < 0) {
|
||||||
dev_err(queue->ctrl->ctrl.device,
|
dev_err(queue->ctrl->ctrl.device,
|
||||||
"Failed to map data (%d)\n", ret);
|
"Failed to map data (%d)\n", err);
|
||||||
nvme_cleanup_cmd(rq);
|
nvme_cleanup_cmd(rq);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -1489,17 +1490,18 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
if (req_op(rq) == REQ_OP_FLUSH)
|
if (req_op(rq) == REQ_OP_FLUSH)
|
||||||
flush = true;
|
flush = true;
|
||||||
ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
|
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
|
||||||
req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
|
req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
|
||||||
if (ret) {
|
if (err) {
|
||||||
nvme_rdma_unmap_data(queue, rq);
|
nvme_rdma_unmap_data(queue, rq);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
err:
|
err:
|
||||||
return (ret == -ENOMEM || ret == -EAGAIN) ?
|
if (err == -ENOMEM || err == -EAGAIN)
|
||||||
BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_RESOURCE;
|
||||||
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
|
static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
|
||||||
|
@ -159,17 +159,17 @@ nvme_loop_timeout(struct request *rq, bool reserved)
|
|||||||
return BLK_EH_HANDLED;
|
return BLK_EH_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns = hctx->queue->queuedata;
|
struct nvme_ns *ns = hctx->queue->queuedata;
|
||||||
struct nvme_loop_queue *queue = hctx->driver_data;
|
struct nvme_loop_queue *queue = hctx->driver_data;
|
||||||
struct request *req = bd->rq;
|
struct request *req = bd->rq;
|
||||||
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
|
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
|
||||||
int ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
ret = nvme_setup_cmd(ns, req, &iod->cmd);
|
ret = nvme_setup_cmd(ns, req, &iod->cmd);
|
||||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
|
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
|
||||||
@ -179,16 +179,15 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
nvme_cleanup_cmd(req);
|
nvme_cleanup_cmd(req);
|
||||||
blk_mq_start_request(req);
|
blk_mq_start_request(req);
|
||||||
nvme_loop_queue_response(&iod->req);
|
nvme_loop_queue_response(&iod->req);
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (blk_rq_bytes(req)) {
|
if (blk_rq_bytes(req)) {
|
||||||
iod->sg_table.sgl = iod->first_sgl;
|
iod->sg_table.sgl = iod->first_sgl;
|
||||||
ret = sg_alloc_table_chained(&iod->sg_table,
|
if (sg_alloc_table_chained(&iod->sg_table,
|
||||||
blk_rq_nr_phys_segments(req),
|
blk_rq_nr_phys_segments(req),
|
||||||
iod->sg_table.sgl);
|
iod->sg_table.sgl))
|
||||||
if (ret)
|
return BLK_STS_RESOURCE;
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
|
||||||
|
|
||||||
iod->req.sg = iod->sg_table.sgl;
|
iod->req.sg = iod->sg_table.sgl;
|
||||||
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
|
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
|
||||||
@ -197,7 +196,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
blk_mq_start_request(req);
|
blk_mq_start_request(req);
|
||||||
|
|
||||||
schedule_work(&iod->work);
|
schedule_work(&iod->work);
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
||||||
|
@ -1812,15 +1812,15 @@ out_delay:
|
|||||||
blk_delay_queue(q, SCSI_QUEUE_DELAY);
|
blk_delay_queue(q, SCSI_QUEUE_DELAY);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int prep_to_mq(int ret)
|
static inline blk_status_t prep_to_mq(int ret)
|
||||||
{
|
{
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
case BLKPREP_OK:
|
case BLKPREP_OK:
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
case BLKPREP_DEFER:
|
case BLKPREP_DEFER:
|
||||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
return BLK_STS_RESOURCE;
|
||||||
default:
|
default:
|
||||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1892,7 +1892,7 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
|
|||||||
blk_mq_complete_request(cmd->request);
|
blk_mq_complete_request(cmd->request);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct request *req = bd->rq;
|
struct request *req = bd->rq;
|
||||||
@ -1900,14 +1900,14 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
struct scsi_device *sdev = q->queuedata;
|
struct scsi_device *sdev = q->queuedata;
|
||||||
struct Scsi_Host *shost = sdev->host;
|
struct Scsi_Host *shost = sdev->host;
|
||||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
|
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
|
||||||
int ret;
|
blk_status_t ret;
|
||||||
int reason;
|
int reason;
|
||||||
|
|
||||||
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
|
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
|
||||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
if (ret != BLK_STS_OK)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = BLK_MQ_RQ_QUEUE_BUSY;
|
ret = BLK_STS_RESOURCE;
|
||||||
if (!get_device(&sdev->sdev_gendev))
|
if (!get_device(&sdev->sdev_gendev))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -1920,7 +1920,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
|
|
||||||
if (!(req->rq_flags & RQF_DONTPREP)) {
|
if (!(req->rq_flags & RQF_DONTPREP)) {
|
||||||
ret = prep_to_mq(scsi_mq_prep_fn(req));
|
ret = prep_to_mq(scsi_mq_prep_fn(req));
|
||||||
if (ret != BLK_MQ_RQ_QUEUE_OK)
|
if (ret != BLK_STS_OK)
|
||||||
goto out_dec_host_busy;
|
goto out_dec_host_busy;
|
||||||
req->rq_flags |= RQF_DONTPREP;
|
req->rq_flags |= RQF_DONTPREP;
|
||||||
} else {
|
} else {
|
||||||
@ -1938,11 +1938,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
reason = scsi_dispatch_cmd(cmd);
|
reason = scsi_dispatch_cmd(cmd);
|
||||||
if (reason) {
|
if (reason) {
|
||||||
scsi_set_blocked(cmd, reason);
|
scsi_set_blocked(cmd, reason);
|
||||||
ret = BLK_MQ_RQ_QUEUE_BUSY;
|
ret = BLK_STS_RESOURCE;
|
||||||
goto out_dec_host_busy;
|
goto out_dec_host_busy;
|
||||||
}
|
}
|
||||||
|
|
||||||
return BLK_MQ_RQ_QUEUE_OK;
|
return BLK_STS_OK;
|
||||||
|
|
||||||
out_dec_host_busy:
|
out_dec_host_busy:
|
||||||
atomic_dec(&shost->host_busy);
|
atomic_dec(&shost->host_busy);
|
||||||
@ -1955,12 +1955,14 @@ out_put_device:
|
|||||||
put_device(&sdev->sdev_gendev);
|
put_device(&sdev->sdev_gendev);
|
||||||
out:
|
out:
|
||||||
switch (ret) {
|
switch (ret) {
|
||||||
case BLK_MQ_RQ_QUEUE_BUSY:
|
case BLK_STS_OK:
|
||||||
|
break;
|
||||||
|
case BLK_STS_RESOURCE:
|
||||||
if (atomic_read(&sdev->device_busy) == 0 &&
|
if (atomic_read(&sdev->device_busy) == 0 &&
|
||||||
!scsi_device_blocked(sdev))
|
!scsi_device_blocked(sdev))
|
||||||
blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
|
blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
|
||||||
break;
|
break;
|
||||||
case BLK_MQ_RQ_QUEUE_ERROR:
|
default:
|
||||||
/*
|
/*
|
||||||
* Make sure to release all allocated ressources when
|
* Make sure to release all allocated ressources when
|
||||||
* we hit an error, as we will never see this command
|
* we hit an error, as we will never see this command
|
||||||
@ -1969,8 +1971,6 @@ out:
|
|||||||
if (req->rq_flags & RQF_DONTPREP)
|
if (req->rq_flags & RQF_DONTPREP)
|
||||||
scsi_mq_uninit_cmd(cmd);
|
scsi_mq_uninit_cmd(cmd);
|
||||||
break;
|
break;
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,8 @@ struct blk_mq_queue_data {
|
|||||||
bool last;
|
bool last;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
|
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
|
||||||
|
const struct blk_mq_queue_data *);
|
||||||
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
|
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
|
||||||
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
|
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
|
||||||
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||||
@ -155,10 +156,6 @@ struct blk_mq_ops {
|
|||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
|
|
||||||
BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
|
|
||||||
BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
|
|
||||||
|
|
||||||
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
|
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
|
||||||
BLK_MQ_F_TAG_SHARED = 1 << 1,
|
BLK_MQ_F_TAG_SHARED = 1 << 1,
|
||||||
BLK_MQ_F_SG_MERGE = 1 << 2,
|
BLK_MQ_F_SG_MERGE = 1 << 2,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user