nvme: simplify nvme_setup_prps calling convention

Pass back a true/false value instead of the length which needs a compare
with the bytes in the request and drop the pointless gfp_t argument.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Christoph Hellwig 2015-10-16 07:58:37 +02:00 committed by Jens Axboe
parent 1c63dc6658
commit 69d2b57174

View File

@ -709,9 +709,8 @@ release_iod:
blk_mq_complete_request(req, error); blk_mq_complete_request(req, error);
} }
/* length is in bytes. gfp flags indicates whether we may sleep. */ static bool nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len)
int total_len, gfp_t gfp)
{ {
struct dma_pool *pool; struct dma_pool *pool;
int length = total_len; int length = total_len;
@ -727,7 +726,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
length -= (page_size - offset); length -= (page_size - offset);
if (length <= 0) if (length <= 0)
return total_len; return true;
dma_len -= (page_size - offset); dma_len -= (page_size - offset);
if (dma_len) { if (dma_len) {
@ -740,7 +739,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
if (length <= page_size) { if (length <= page_size) {
iod->first_dma = dma_addr; iod->first_dma = dma_addr;
return total_len; return true;
} }
nprps = DIV_ROUND_UP(length, page_size); nprps = DIV_ROUND_UP(length, page_size);
@ -752,11 +751,11 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
iod->npages = 1; iod->npages = 1;
} }
prp_list = dma_pool_alloc(pool, gfp, &prp_dma); prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) { if (!prp_list) {
iod->first_dma = dma_addr; iod->first_dma = dma_addr;
iod->npages = -1; iod->npages = -1;
return (total_len - length) + page_size; return false;
} }
list[0] = prp_list; list[0] = prp_list;
iod->first_dma = prp_dma; iod->first_dma = prp_dma;
@ -764,9 +763,9 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
for (;;) { for (;;) {
if (i == page_size >> 3) { if (i == page_size >> 3) {
__le64 *old_prp_list = prp_list; __le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, gfp, &prp_dma); prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) if (!prp_list)
return total_len - length; return false;
list[iod->npages++] = prp_list; list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1]; prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma); old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@ -786,7 +785,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
dma_len = sg_dma_len(sg); dma_len = sg_dma_len(sg);
} }
return total_len; return true;
} }
static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req, static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
@ -952,8 +951,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir)) if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir))
goto retry_cmd; goto retry_cmd;
if (blk_rq_bytes(req) != if (!nvme_setup_prps(dev, iod, blk_rq_bytes(req))) {
nvme_setup_prps(dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
goto retry_cmd; goto retry_cmd;
} }