drivers: use req op accessor
The req operation REQ_OP is separated from the rq_flag_bits definition. This converts the block layer drivers to use req_op to get the op from the request struct. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
d9d8c5c489
commit
c2df40dfb8
@ -544,7 +544,7 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
|
|||||||
if (op_is_write(req_op(rq))) {
|
if (op_is_write(req_op(rq))) {
|
||||||
if (rq->cmd_flags & REQ_FLUSH)
|
if (rq->cmd_flags & REQ_FLUSH)
|
||||||
ret = lo_req_flush(lo, rq);
|
ret = lo_req_flush(lo, rq);
|
||||||
else if (rq->cmd_flags & REQ_DISCARD)
|
else if (req_op(rq) == REQ_OP_DISCARD)
|
||||||
ret = lo_discard(lo, rq, pos);
|
ret = lo_discard(lo, rq, pos);
|
||||||
else if (lo->transfer)
|
else if (lo->transfer)
|
||||||
ret = lo_write_transfer(lo, rq, pos);
|
ret = lo_write_transfer(lo, rq, pos);
|
||||||
@ -1659,8 +1659,8 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
if (lo->lo_state != Lo_bound)
|
if (lo->lo_state != Lo_bound)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH |
|
if (lo->use_dio && (!(cmd->rq->cmd_flags & REQ_FLUSH) ||
|
||||||
REQ_DISCARD)))
|
req_op(cmd->rq) == REQ_OP_DISCARD))
|
||||||
cmd->use_aio = true;
|
cmd->use_aio = true;
|
||||||
else
|
else
|
||||||
cmd->use_aio = false;
|
cmd->use_aio = false;
|
||||||
|
@ -3765,7 +3765,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
|||||||
return -ENODATA;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rq->cmd_flags & REQ_DISCARD) {
|
if (req_op(rq) == REQ_OP_DISCARD) {
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
|
err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
|
||||||
|
@ -282,7 +282,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
|
|||||||
|
|
||||||
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||||
type = NBD_CMD_DISC;
|
type = NBD_CMD_DISC;
|
||||||
else if (req->cmd_flags & REQ_DISCARD)
|
else if (req_op(req) == REQ_OP_DISCARD)
|
||||||
type = NBD_CMD_TRIM;
|
type = NBD_CMD_TRIM;
|
||||||
else if (req->cmd_flags & REQ_FLUSH)
|
else if (req->cmd_flags & REQ_FLUSH)
|
||||||
type = NBD_CMD_FLUSH;
|
type = NBD_CMD_FLUSH;
|
||||||
|
@ -3286,9 +3286,9 @@ static void rbd_queue_workfn(struct work_struct *work)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rq->cmd_flags & REQ_DISCARD)
|
if (req_op(rq) == REQ_OP_DISCARD)
|
||||||
op_type = OBJ_OP_DISCARD;
|
op_type = OBJ_OP_DISCARD;
|
||||||
else if (rq->cmd_flags & REQ_WRITE)
|
else if (req_op(rq) == REQ_OP_WRITE)
|
||||||
op_type = OBJ_OP_WRITE;
|
op_type = OBJ_OP_WRITE;
|
||||||
else
|
else
|
||||||
op_type = OBJ_OP_READ;
|
op_type = OBJ_OP_READ;
|
||||||
|
@ -844,7 +844,8 @@ static int blkif_queue_request(struct request *req, struct blkfront_ring_info *r
|
|||||||
if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
|
if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
|
if (unlikely(req_op(req) == REQ_OP_DISCARD ||
|
||||||
|
req->cmd_flags & REQ_SECURE))
|
||||||
return blkif_queue_discard_req(req, rinfo);
|
return blkif_queue_discard_req(req, rinfo);
|
||||||
else
|
else
|
||||||
return blkif_queue_rw_req(req, rinfo);
|
return blkif_queue_rw_req(req, rinfo);
|
||||||
@ -2054,8 +2055,9 @@ static int blkif_recover(struct blkfront_info *info)
|
|||||||
/*
|
/*
|
||||||
* Get the bios in the request so we can re-queue them.
|
* Get the bios in the request so we can re-queue them.
|
||||||
*/
|
*/
|
||||||
if (copy[i].request->cmd_flags &
|
if (copy[i].request->cmd_flags & REQ_FLUSH ||
|
||||||
(REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
|
req_op(copy[i].request) == REQ_OP_DISCARD ||
|
||||||
|
copy[i].request->cmd_flags & (REQ_FUA | REQ_SECURE)) {
|
||||||
/*
|
/*
|
||||||
* Flush operations don't contain bios, so
|
* Flush operations don't contain bios, so
|
||||||
* we need to requeue the whole request
|
* we need to requeue the whole request
|
||||||
|
@ -206,7 +206,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
|
|||||||
memcpy(rq->cmd, pc->c, 12);
|
memcpy(rq->cmd, pc->c, 12);
|
||||||
|
|
||||||
pc->rq = rq;
|
pc->rq = rq;
|
||||||
if (rq->cmd_flags & REQ_WRITE)
|
if (cmd == WRITE)
|
||||||
pc->flags |= PC_FLAG_WRITING;
|
pc->flags |= PC_FLAG_WRITING;
|
||||||
|
|
||||||
pc->flags |= PC_FLAG_DMA_OK;
|
pc->flags |= PC_FLAG_DMA_OK;
|
||||||
|
@ -1322,7 +1322,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
|
|||||||
r = rq_end_io(tio->ti, clone, error, &tio->info);
|
r = rq_end_io(tio->ti, clone, error, &tio->info);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
|
if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
|
||||||
!clone->q->limits.max_write_same_sectors))
|
!clone->q->limits.max_write_same_sectors))
|
||||||
disable_write_same(tio->md);
|
disable_write_same(tio->md);
|
||||||
|
|
||||||
|
@ -1722,8 +1722,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
|
|||||||
!IS_ALIGNED(blk_rq_sectors(next), 8))
|
!IS_ALIGNED(blk_rq_sectors(next), 8))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (next->cmd_flags & REQ_DISCARD ||
|
if (req_op(next) == REQ_OP_DISCARD || next->cmd_flags & REQ_FLUSH)
|
||||||
next->cmd_flags & REQ_FLUSH)
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (rq_data_dir(cur) != rq_data_dir(next))
|
if (rq_data_dir(cur) != rq_data_dir(next))
|
||||||
@ -2164,7 +2163,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
|
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
|
||||||
if (cmd_flags & REQ_DISCARD) {
|
if (req && req_op(req) == REQ_OP_DISCARD) {
|
||||||
/* complete ongoing async transfer before issuing discard */
|
/* complete ongoing async transfer before issuing discard */
|
||||||
if (card->host->areq)
|
if (card->host->areq)
|
||||||
mmc_blk_issue_rw_rq(mq, NULL);
|
mmc_blk_issue_rw_rq(mq, NULL);
|
||||||
@ -2188,7 +2187,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
|
if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
|
||||||
(cmd_flags & MMC_REQ_SPECIAL_MASK))
|
mmc_req_is_special(req))
|
||||||
/*
|
/*
|
||||||
* Release host when there are no more requests
|
* Release host when there are no more requests
|
||||||
* and after special request(discard, flush) is done.
|
* and after special request(discard, flush) is done.
|
||||||
|
@ -33,7 +33,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
|
|||||||
/*
|
/*
|
||||||
* We only like normal block requests and discards.
|
* We only like normal block requests and discards.
|
||||||
*/
|
*/
|
||||||
if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
|
if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) {
|
||||||
blk_dump_rq_flags(req, "MMC bad request");
|
blk_dump_rq_flags(req, "MMC bad request");
|
||||||
return BLKPREP_KILL;
|
return BLKPREP_KILL;
|
||||||
}
|
}
|
||||||
@ -56,7 +56,6 @@ static int mmc_queue_thread(void *d)
|
|||||||
down(&mq->thread_sem);
|
down(&mq->thread_sem);
|
||||||
do {
|
do {
|
||||||
struct request *req = NULL;
|
struct request *req = NULL;
|
||||||
unsigned int cmd_flags = 0;
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
@ -66,7 +65,6 @@ static int mmc_queue_thread(void *d)
|
|||||||
|
|
||||||
if (req || mq->mqrq_prev->req) {
|
if (req || mq->mqrq_prev->req) {
|
||||||
set_current_state(TASK_RUNNING);
|
set_current_state(TASK_RUNNING);
|
||||||
cmd_flags = req ? req->cmd_flags : 0;
|
|
||||||
mq->issue_fn(mq, req);
|
mq->issue_fn(mq, req);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
|
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
|
||||||
@ -81,7 +79,7 @@ static int mmc_queue_thread(void *d)
|
|||||||
* has been finished. Do not assign it to previous
|
* has been finished. Do not assign it to previous
|
||||||
* request.
|
* request.
|
||||||
*/
|
*/
|
||||||
if (cmd_flags & MMC_REQ_SPECIAL_MASK)
|
if (mmc_req_is_special(req))
|
||||||
mq->mqrq_cur->req = NULL;
|
mq->mqrq_cur->req = NULL;
|
||||||
|
|
||||||
mq->mqrq_prev->brq.mrq.data = NULL;
|
mq->mqrq_prev->brq.mrq.data = NULL;
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
#ifndef MMC_QUEUE_H
|
#ifndef MMC_QUEUE_H
|
||||||
#define MMC_QUEUE_H
|
#define MMC_QUEUE_H
|
||||||
|
|
||||||
#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
|
static inline bool mmc_req_is_special(struct request *req)
|
||||||
|
{
|
||||||
|
return req && (req->cmd_flags & REQ_FLUSH || req_op(req) == REQ_OP_DISCARD);
|
||||||
|
}
|
||||||
|
|
||||||
struct request;
|
struct request;
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
@ -94,7 +94,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||||||
get_capacity(req->rq_disk))
|
get_capacity(req->rq_disk))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (req->cmd_flags & REQ_DISCARD)
|
if (req_op(req) == REQ_OP_DISCARD)
|
||||||
return tr->discard(dev, block, nsect);
|
return tr->discard(dev, block, nsect);
|
||||||
|
|
||||||
if (rq_data_dir(req) == READ) {
|
if (rq_data_dir(req) == READ) {
|
||||||
|
@ -292,7 +292,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
|||||||
memcpy(cmd, req->cmd, sizeof(*cmd));
|
memcpy(cmd, req->cmd, sizeof(*cmd));
|
||||||
else if (req->cmd_flags & REQ_FLUSH)
|
else if (req->cmd_flags & REQ_FLUSH)
|
||||||
nvme_setup_flush(ns, cmd);
|
nvme_setup_flush(ns, cmd);
|
||||||
else if (req->cmd_flags & REQ_DISCARD)
|
else if (req_op(req) == REQ_OP_DISCARD)
|
||||||
ret = nvme_setup_discard(ns, req, cmd);
|
ret = nvme_setup_discard(ns, req, cmd);
|
||||||
else
|
else
|
||||||
nvme_setup_rw(ns, req, cmd);
|
nvme_setup_rw(ns, req, cmd);
|
||||||
|
@ -177,7 +177,7 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
|
|||||||
|
|
||||||
static inline unsigned nvme_map_len(struct request *rq)
|
static inline unsigned nvme_map_len(struct request *rq)
|
||||||
{
|
{
|
||||||
if (rq->cmd_flags & REQ_DISCARD)
|
if (req_op(rq) == REQ_OP_DISCARD)
|
||||||
return sizeof(struct nvme_dsm_range);
|
return sizeof(struct nvme_dsm_range);
|
||||||
else
|
else
|
||||||
return blk_rq_bytes(rq);
|
return blk_rq_bytes(rq);
|
||||||
@ -185,7 +185,7 @@ static inline unsigned nvme_map_len(struct request *rq)
|
|||||||
|
|
||||||
static inline void nvme_cleanup_cmd(struct request *req)
|
static inline void nvme_cleanup_cmd(struct request *req)
|
||||||
{
|
{
|
||||||
if (req->cmd_flags & REQ_DISCARD)
|
if (req_op(req) == REQ_OP_DISCARD)
|
||||||
kfree(req->completion_data);
|
kfree(req->completion_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1012,7 +1012,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
|
|||||||
} else if (rq_data_dir(rq) == READ) {
|
} else if (rq_data_dir(rq) == READ) {
|
||||||
SCpnt->cmnd[0] = READ_6;
|
SCpnt->cmnd[0] = READ_6;
|
||||||
} else {
|
} else {
|
||||||
scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
|
scmd_printk(KERN_ERR, SCpnt, "Unknown command %d,%llx\n",
|
||||||
|
req_op(rq), (unsigned long long) rq->cmd_flags);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1137,21 +1138,27 @@ static int sd_init_command(struct scsi_cmnd *cmd)
|
|||||||
{
|
{
|
||||||
struct request *rq = cmd->request;
|
struct request *rq = cmd->request;
|
||||||
|
|
||||||
if (rq->cmd_flags & REQ_DISCARD)
|
switch (req_op(rq)) {
|
||||||
|
case REQ_OP_DISCARD:
|
||||||
return sd_setup_discard_cmnd(cmd);
|
return sd_setup_discard_cmnd(cmd);
|
||||||
else if (rq->cmd_flags & REQ_WRITE_SAME)
|
case REQ_OP_WRITE_SAME:
|
||||||
return sd_setup_write_same_cmnd(cmd);
|
return sd_setup_write_same_cmnd(cmd);
|
||||||
else if (rq->cmd_flags & REQ_FLUSH)
|
case REQ_OP_READ:
|
||||||
return sd_setup_flush_cmnd(cmd);
|
case REQ_OP_WRITE:
|
||||||
else
|
if (rq->cmd_flags & REQ_FLUSH)
|
||||||
return sd_setup_read_write_cmnd(cmd);
|
return sd_setup_flush_cmnd(cmd);
|
||||||
|
else
|
||||||
|
return sd_setup_read_write_cmnd(cmd);
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
|
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
|
||||||
{
|
{
|
||||||
struct request *rq = SCpnt->request;
|
struct request *rq = SCpnt->request;
|
||||||
|
|
||||||
if (rq->cmd_flags & REQ_DISCARD)
|
if (req_op(rq) == REQ_OP_DISCARD)
|
||||||
__free_page(rq->completion_data);
|
__free_page(rq->completion_data);
|
||||||
|
|
||||||
if (SCpnt->cmnd != rq->cmd) {
|
if (SCpnt->cmnd != rq->cmd) {
|
||||||
@ -1774,7 +1781,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
|
|||||||
unsigned char op = SCpnt->cmnd[0];
|
unsigned char op = SCpnt->cmnd[0];
|
||||||
unsigned char unmap = SCpnt->cmnd[1] & 8;
|
unsigned char unmap = SCpnt->cmnd[1] & 8;
|
||||||
|
|
||||||
if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
|
if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) {
|
||||||
if (!result) {
|
if (!result) {
|
||||||
good_bytes = blk_rq_bytes(req);
|
good_bytes = blk_rq_bytes(req);
|
||||||
scsi_set_resid(SCpnt, 0);
|
scsi_set_resid(SCpnt, 0);
|
||||||
|
Loading…
Reference in New Issue
Block a user