blk-map: call blk_queue_bounce from blk_rq_append_bio
This makes moves the knowledge about bouncing out of the callers into the block core (just like we do for the normal I/O path), and allows to unexport blk_queue_bounce. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e442cbf910
commit
caa4b02476
@ -16,6 +16,8 @@
|
||||
*/
|
||||
int blk_rq_append_bio(struct request *rq, struct bio *bio)
|
||||
{
|
||||
blk_queue_bounce(rq->q, &bio);
|
||||
|
||||
if (!rq->bio) {
|
||||
blk_rq_bio_prep(rq->q, rq, bio);
|
||||
} else {
|
||||
@ -72,15 +74,13 @@ static int __blk_rq_map_user_iov(struct request *rq,
|
||||
map_data->offset += bio->bi_iter.bi_size;
|
||||
|
||||
orig_bio = bio;
|
||||
blk_queue_bounce(q, &bio);
|
||||
|
||||
/*
|
||||
* We link the bounce buffer in and could have to traverse it
|
||||
* later so we have to get a ref to prevent it from being freed
|
||||
*/
|
||||
bio_get(bio);
|
||||
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
bio_get(bio);
|
||||
if (ret) {
|
||||
bio_endio(bio);
|
||||
__blk_rq_unmap_user(orig_bio);
|
||||
@ -249,7 +249,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
blk_queue_bounce(q, &rq->bio);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_map_kern);
|
||||
|
@ -284,5 +284,3 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
||||
*/
|
||||
__blk_queue_bounce(q, bio_orig, pool);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(blk_queue_bounce);
|
||||
|
@ -1576,10 +1576,7 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
|
||||
return req;
|
||||
|
||||
for_each_bio(bio) {
|
||||
struct bio *bounce_bio = bio;
|
||||
|
||||
blk_queue_bounce(req->q, &bounce_bio);
|
||||
ret = blk_rq_append_bio(req, bounce_bio);
|
||||
ret = blk_rq_append_bio(req, bio);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user