block: remove rq_flush_dcache_pages
This function is trivial, and flush_dcache_page is always defined, so just open code it in the 2.5 callers. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20211117061404.331732-3-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
79478bf9ea
commit
786d4e01c5
@ -1300,25 +1300,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_steal_bios);
|
EXPORT_SYMBOL_GPL(blk_steal_bios);
|
||||||
|
|
||||||
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
||||||
/**
|
|
||||||
* rq_flush_dcache_pages - Helper function to flush all pages in a request
|
|
||||||
* @rq: the request to be flushed
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Flush all pages in @rq.
|
|
||||||
*/
|
|
||||||
void rq_flush_dcache_pages(struct request *rq)
|
|
||||||
{
|
|
||||||
struct req_iterator iter;
|
|
||||||
struct bio_vec bvec;
|
|
||||||
|
|
||||||
rq_for_each_segment(bvec, rq, iter)
|
|
||||||
flush_dcache_page(bvec.bv_page);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_lld_busy - Check if underlying low-level drivers of a device are busy
|
* blk_lld_busy - Check if underlying low-level drivers of a device are busy
|
||||||
* @q : the queue of the device being checked
|
* @q : the queue of the device being checked
|
||||||
|
@ -46,6 +46,8 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||||||
struct mtd_blktrans_dev *dev,
|
struct mtd_blktrans_dev *dev,
|
||||||
struct request *req)
|
struct request *req)
|
||||||
{
|
{
|
||||||
|
struct req_iterator iter;
|
||||||
|
struct bio_vec bvec;
|
||||||
unsigned long block, nsect;
|
unsigned long block, nsect;
|
||||||
char *buf;
|
char *buf;
|
||||||
|
|
||||||
@ -76,13 +78,17 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
kunmap(bio_page(req->bio));
|
kunmap(bio_page(req->bio));
|
||||||
rq_flush_dcache_pages(req);
|
|
||||||
|
rq_for_each_segment(bvec, req, iter)
|
||||||
|
flush_dcache_page(bvec.bv_page);
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
case REQ_OP_WRITE:
|
case REQ_OP_WRITE:
|
||||||
if (!tr->writesect)
|
if (!tr->writesect)
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
rq_flush_dcache_pages(req);
|
rq_for_each_segment(bvec, req, iter)
|
||||||
|
flush_dcache_page(bvec.bv_page);
|
||||||
|
|
||||||
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
|
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
|
||||||
for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
|
for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
|
||||||
if (tr->writesect(dev, block, buf)) {
|
if (tr->writesect(dev, block, buf)) {
|
||||||
|
@ -294,6 +294,8 @@ static void ubiblock_do_work(struct work_struct *work)
|
|||||||
int ret;
|
int ret;
|
||||||
struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
|
struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
|
||||||
struct request *req = blk_mq_rq_from_pdu(pdu);
|
struct request *req = blk_mq_rq_from_pdu(pdu);
|
||||||
|
struct req_iterator iter;
|
||||||
|
struct bio_vec bvec;
|
||||||
|
|
||||||
blk_mq_start_request(req);
|
blk_mq_start_request(req);
|
||||||
|
|
||||||
@ -305,7 +307,9 @@ static void ubiblock_do_work(struct work_struct *work)
|
|||||||
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
|
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
|
||||||
|
|
||||||
ret = ubiblock_read(pdu);
|
ret = ubiblock_read(pdu);
|
||||||
rq_flush_dcache_pages(req);
|
|
||||||
|
rq_for_each_segment(bvec, req, iter)
|
||||||
|
flush_dcache_page(bvec.bv_page);
|
||||||
|
|
||||||
blk_mq_end_request(req, errno_to_blk_status(ret));
|
blk_mq_end_request(req, errno_to_blk_status(ret));
|
||||||
}
|
}
|
||||||
|
@ -1132,14 +1132,4 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||||
|
|
||||||
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
||||||
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
|
|
||||||
#endif
|
|
||||||
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
||||||
void rq_flush_dcache_pages(struct request *rq);
|
|
||||||
#else
|
|
||||||
static inline void rq_flush_dcache_pages(struct request *rq)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
|
|
||||||
#endif /* BLK_MQ_H */
|
#endif /* BLK_MQ_H */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user