block: change the blk_queue_bounce calling convention
The double indirect bio leads to somewhat suboptimal code generation. Instead return the (original or split) bio, and make sure the request_queue arguments to the lower level helpers is passed after the bio to avoid constant reshuffling of the argument passing registers. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Link: https://lore.kernel.org/r/20220727162300.3089193-3-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
5a97806f7d
commit
51d798cdb5
@ -2815,7 +2815,7 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||||||
unsigned int nr_segs = 1;
|
unsigned int nr_segs = 1;
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
blk_queue_bounce(q, &bio);
|
bio = blk_queue_bounce(bio, q);
|
||||||
if (bio_may_exceed_limits(bio, q))
|
if (bio_may_exceed_limits(bio, q))
|
||||||
bio = __bio_split_to_limits(bio, q, &nr_segs);
|
bio = __bio_split_to_limits(bio, q, &nr_segs);
|
||||||
|
|
||||||
|
10
block/blk.h
10
block/blk.h
@ -378,7 +378,7 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { }
|
|||||||
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
|
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
|
||||||
|
|
||||||
static inline bool blk_queue_may_bounce(struct request_queue *q)
|
static inline bool blk_queue_may_bounce(struct request_queue *q)
|
||||||
{
|
{
|
||||||
@ -387,10 +387,12 @@ static inline bool blk_queue_may_bounce(struct request_queue *q)
|
|||||||
max_low_pfn >= max_pfn;
|
max_low_pfn >= max_pfn;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
|
static inline struct bio *blk_queue_bounce(struct bio *bio,
|
||||||
|
struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
|
if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
|
||||||
__blk_queue_bounce(q, bio);
|
return __blk_queue_bounce(bio, q);
|
||||||
|
return bio;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
|
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
|
||||||
|
@ -199,24 +199,24 @@ err_put:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
struct bio *__blk_queue_bounce(struct bio *bio_orig, struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
int rw = bio_data_dir(*bio_orig);
|
int rw = bio_data_dir(bio_orig);
|
||||||
struct bio_vec *to, from;
|
struct bio_vec *to, from;
|
||||||
struct bvec_iter iter;
|
struct bvec_iter iter;
|
||||||
unsigned i = 0, bytes = 0;
|
unsigned i = 0, bytes = 0;
|
||||||
bool bounce = false;
|
bool bounce = false;
|
||||||
int sectors;
|
int sectors;
|
||||||
|
|
||||||
bio_for_each_segment(from, *bio_orig, iter) {
|
bio_for_each_segment(from, bio_orig, iter) {
|
||||||
if (i++ < BIO_MAX_VECS)
|
if (i++ < BIO_MAX_VECS)
|
||||||
bytes += from.bv_len;
|
bytes += from.bv_len;
|
||||||
if (PageHighMem(from.bv_page))
|
if (PageHighMem(from.bv_page))
|
||||||
bounce = true;
|
bounce = true;
|
||||||
}
|
}
|
||||||
if (!bounce)
|
if (!bounce)
|
||||||
return;
|
return bio_orig;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Individual bvecs might not be logical block aligned. Round down
|
* Individual bvecs might not be logical block aligned. Round down
|
||||||
@ -225,13 +225,13 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
|||||||
*/
|
*/
|
||||||
sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >>
|
sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >>
|
||||||
SECTOR_SHIFT;
|
SECTOR_SHIFT;
|
||||||
if (sectors < bio_sectors(*bio_orig)) {
|
if (sectors < bio_sectors(bio_orig)) {
|
||||||
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
|
bio = bio_split(bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
|
||||||
bio_chain(bio, *bio_orig);
|
bio_chain(bio, bio_orig);
|
||||||
submit_bio_noacct(*bio_orig);
|
submit_bio_noacct(bio_orig);
|
||||||
*bio_orig = bio;
|
bio_orig = bio;
|
||||||
}
|
}
|
||||||
bio = bounce_clone_bio(*bio_orig);
|
bio = bounce_clone_bio(bio_orig);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bvec table can't be updated by bio_for_each_segment_all(),
|
* Bvec table can't be updated by bio_for_each_segment_all(),
|
||||||
@ -254,7 +254,7 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
|||||||
to->bv_page = bounce_page;
|
to->bv_page = bounce_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_block_bio_bounce(*bio_orig);
|
trace_block_bio_bounce(bio_orig);
|
||||||
|
|
||||||
bio->bi_flags |= (1 << BIO_BOUNCED);
|
bio->bi_flags |= (1 << BIO_BOUNCED);
|
||||||
|
|
||||||
@ -263,6 +263,6 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
|||||||
else
|
else
|
||||||
bio->bi_end_io = bounce_end_io_write;
|
bio->bi_end_io = bounce_end_io_write;
|
||||||
|
|
||||||
bio->bi_private = *bio_orig;
|
bio->bi_private = bio_orig;
|
||||||
*bio_orig = bio;
|
return bio;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user