2008-01-29 16:04:06 +03:00
/*
* Functions related to segment and merge handling
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/bio.h>
# include <linux/blkdev.h>
# include <linux/scatterlist.h>
2015-12-03 17:32:30 +03:00
# include <trace/events/block.h>
2008-01-29 16:04:06 +03:00
# include "blk.h"
2015-04-24 08:37:18 +03:00
static struct bio * blk_bio_discard_split ( struct request_queue * q ,
struct bio * bio ,
2015-10-20 18:13:52 +03:00
struct bio_set * bs ,
unsigned * nsegs )
2015-04-24 08:37:18 +03:00
{
unsigned int max_discard_sectors , granularity ;
int alignment ;
sector_t tmp ;
unsigned split_sectors ;
2015-10-20 18:13:52 +03:00
* nsegs = 1 ;
2015-04-24 08:37:18 +03:00
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max ( q - > limits . discard_granularity > > 9 , 1U ) ;
max_discard_sectors = min ( q - > limits . max_discard_sectors , UINT_MAX > > 9 ) ;
max_discard_sectors - = max_discard_sectors % granularity ;
if ( unlikely ( ! max_discard_sectors ) ) {
/* XXX: warn */
return NULL ;
}
if ( bio_sectors ( bio ) < = max_discard_sectors )
return NULL ;
split_sectors = max_discard_sectors ;
/*
* If the next starting sector would be misaligned , stop the discard at
* the previous aligned sector .
*/
alignment = ( q - > limits . discard_alignment > > 9 ) % granularity ;
tmp = bio - > bi_iter . bi_sector + split_sectors - alignment ;
tmp = sector_div ( tmp , granularity ) ;
if ( split_sectors > tmp )
split_sectors - = tmp ;
return bio_split ( bio , split_sectors , GFP_NOIO , bs ) ;
}
2017-04-05 20:21:01 +03:00
static struct bio * blk_bio_write_zeroes_split ( struct request_queue * q ,
struct bio * bio , struct bio_set * bs , unsigned * nsegs )
{
* nsegs = 1 ;
if ( ! q - > limits . max_write_zeroes_sectors )
return NULL ;
if ( bio_sectors ( bio ) < = q - > limits . max_write_zeroes_sectors )
return NULL ;
return bio_split ( bio , q - > limits . max_write_zeroes_sectors , GFP_NOIO , bs ) ;
}
2015-04-24 08:37:18 +03:00
static struct bio * blk_bio_write_same_split ( struct request_queue * q ,
struct bio * bio ,
2015-10-20 18:13:52 +03:00
struct bio_set * bs ,
unsigned * nsegs )
2015-04-24 08:37:18 +03:00
{
2015-10-20 18:13:52 +03:00
* nsegs = 1 ;
2015-04-24 08:37:18 +03:00
if ( ! q - > limits . max_write_same_sectors )
return NULL ;
if ( bio_sectors ( bio ) < = q - > limits . max_write_same_sectors )
return NULL ;
return bio_split ( bio , q - > limits . max_write_same_sectors , GFP_NOIO , bs ) ;
}
2016-01-23 03:05:33 +03:00
static inline unsigned get_max_io_size ( struct request_queue * q ,
struct bio * bio )
{
unsigned sectors = blk_max_size_offset ( q , bio - > bi_iter . bi_sector ) ;
unsigned mask = queue_logical_block_size ( q ) - 1 ;
/* aligned to logical block size */
sectors & = ~ ( mask > > 9 ) ;
return sectors ;
}
2015-04-24 08:37:18 +03:00
static struct bio * blk_bio_segment_split ( struct request_queue * q ,
struct bio * bio ,
2015-10-20 18:13:52 +03:00
struct bio_set * bs ,
unsigned * segs )
2015-04-24 08:37:18 +03:00
{
2015-09-03 01:46:02 +03:00
struct bio_vec bv , bvprv , * bvprvp = NULL ;
2015-04-24 08:37:18 +03:00
struct bvec_iter iter ;
2015-04-28 09:48:34 +03:00
unsigned seg_size = 0 , nsegs = 0 , sectors = 0 ;
2015-11-24 05:35:30 +03:00
unsigned front_seg_size = bio - > bi_seg_front_size ;
bool do_split = true ;
struct bio * new = NULL ;
2016-01-23 03:05:33 +03:00
const unsigned max_sectors = get_max_io_size ( q , bio ) ;
2015-04-24 08:37:18 +03:00
bio_for_each_segment ( bv , bio , iter ) {
/*
* If the queue doesn ' t support SG gaps and adding this
* offset would create a gap , disallow it .
*/
2015-09-03 01:46:02 +03:00
if ( bvprvp & & bvec_gap_to_prev ( q , bvprvp , bv . bv_offset ) )
2015-04-24 08:37:18 +03:00
goto split ;
2016-01-23 03:05:33 +03:00
if ( sectors + ( bv . bv_len > > 9 ) > max_sectors ) {
2016-01-13 01:08:39 +03:00
/*
* Consider this a new segment if we ' re splitting in
* the middle of this vector .
*/
if ( nsegs < queue_max_segments ( q ) & &
2016-01-23 03:05:33 +03:00
sectors < max_sectors ) {
2016-01-13 01:08:39 +03:00
nsegs + + ;
2016-01-23 03:05:33 +03:00
sectors = max_sectors ;
2016-01-13 01:08:39 +03:00
}
2016-01-23 03:05:33 +03:00
if ( sectors )
goto split ;
/* Make this single bvec as the 1st segment */
2016-01-13 01:08:39 +03:00
}
2015-09-03 01:46:02 +03:00
if ( bvprvp & & blk_queue_cluster ( q ) ) {
2015-04-24 08:37:18 +03:00
if ( seg_size + bv . bv_len > queue_max_segment_size ( q ) )
goto new_segment ;
2015-09-03 01:46:02 +03:00
if ( ! BIOVEC_PHYS_MERGEABLE ( bvprvp , & bv ) )
2015-04-24 08:37:18 +03:00
goto new_segment ;
2015-09-03 01:46:02 +03:00
if ( ! BIOVEC_SEG_BOUNDARY ( q , bvprvp , & bv ) )
2015-04-24 08:37:18 +03:00
goto new_segment ;
seg_size + = bv . bv_len ;
bvprv = bv ;
2015-11-24 05:35:29 +03:00
bvprvp = & bvprv ;
2015-09-17 18:58:38 +03:00
sectors + = bv . bv_len > > 9 ;
2015-11-30 11:05:49 +03:00
if ( nsegs = = 1 & & seg_size > front_seg_size )
front_seg_size = seg_size ;
2015-04-24 08:37:18 +03:00
continue ;
}
new_segment :
if ( nsegs = = queue_max_segments ( q ) )
goto split ;
nsegs + + ;
bvprv = bv ;
2015-11-24 05:35:29 +03:00
bvprvp = & bvprv ;
2015-04-24 08:37:18 +03:00
seg_size = bv . bv_len ;
2015-09-17 18:58:38 +03:00
sectors + = bv . bv_len > > 9 ;
2015-11-24 05:35:30 +03:00
if ( nsegs = = 1 & & seg_size > front_seg_size )
front_seg_size = seg_size ;
2015-04-24 08:37:18 +03:00
}
2015-11-24 05:35:30 +03:00
do_split = false ;
2015-04-24 08:37:18 +03:00
split :
2015-10-20 18:13:52 +03:00
* segs = nsegs ;
2015-11-24 05:35:30 +03:00
if ( do_split ) {
new = bio_split ( bio , sectors , GFP_NOIO , bs ) ;
if ( new )
bio = new ;
}
bio - > bi_seg_front_size = front_seg_size ;
if ( seg_size > bio - > bi_seg_back_size )
bio - > bi_seg_back_size = seg_size ;
return do_split ? new : NULL ;
2015-04-24 08:37:18 +03:00
}
2017-06-18 07:38:57 +03:00
void blk_queue_split ( struct request_queue * q , struct bio * * bio )
2015-04-24 08:37:18 +03:00
{
2015-10-20 18:13:52 +03:00
struct bio * split , * res ;
unsigned nsegs ;
2015-04-24 08:37:18 +03:00
2016-08-16 10:59:35 +03:00
switch ( bio_op ( * bio ) ) {
case REQ_OP_DISCARD :
case REQ_OP_SECURE_ERASE :
2017-06-18 07:38:57 +03:00
split = blk_bio_discard_split ( q , * bio , q - > bio_split , & nsegs ) ;
2016-08-16 10:59:35 +03:00
break ;
2016-11-30 23:28:59 +03:00
case REQ_OP_WRITE_ZEROES :
2017-06-18 07:38:57 +03:00
split = blk_bio_write_zeroes_split ( q , * bio , q - > bio_split , & nsegs ) ;
2016-11-30 23:28:59 +03:00
break ;
2016-08-16 10:59:35 +03:00
case REQ_OP_WRITE_SAME :
2017-06-18 07:38:57 +03:00
split = blk_bio_write_same_split ( q , * bio , q - > bio_split , & nsegs ) ;
2016-08-16 10:59:35 +03:00
break ;
default :
2015-10-20 18:13:52 +03:00
split = blk_bio_segment_split ( q , * bio , q - > bio_split , & nsegs ) ;
2016-08-16 10:59:35 +03:00
break ;
}
2015-10-20 18:13:52 +03:00
/* physical segments can be figured out during splitting */
res = split ? split : * bio ;
res - > bi_phys_segments = nsegs ;
bio_set_flag ( res , BIO_SEG_VALID ) ;
2015-04-24 08:37:18 +03:00
if ( split ) {
2015-10-20 18:13:53 +03:00
/* there isn't chance to merge the splitted bio */
2016-08-06 00:35:16 +03:00
split - > bi_opf | = REQ_NOMERGE ;
2015-10-20 18:13:53 +03:00
2015-04-24 08:37:18 +03:00
bio_chain ( split , * bio ) ;
2015-12-03 17:32:30 +03:00
trace_block_split ( q , split , ( * bio ) - > bi_iter . bi_sector ) ;
2015-04-24 08:37:18 +03:00
generic_make_request ( * bio ) ;
* bio = split ;
}
}
EXPORT_SYMBOL ( blk_queue_split ) ;
2009-02-23 11:03:10 +03:00
static unsigned int __blk_recalc_rq_segments ( struct request_queue * q ,
2014-09-02 19:02:59 +04:00
struct bio * bio ,
bool no_sg_merge )
2008-01-29 16:04:06 +03:00
{
2013-11-24 05:19:00 +04:00
struct bio_vec bv , bvprv = { NULL } ;
2015-04-24 08:37:18 +03:00
int cluster , prev = 0 ;
2009-02-23 11:03:10 +03:00
unsigned int seg_size , nr_phys_segs ;
2009-03-06 10:55:24 +03:00
struct bio * fbio , * bbio ;
2013-11-24 05:19:00 +04:00
struct bvec_iter iter ;
2008-01-29 16:04:06 +03:00
2009-02-23 11:03:10 +03:00
if ( ! bio )
return 0 ;
2008-01-29 16:04:06 +03:00
2016-11-30 23:28:59 +03:00
switch ( bio_op ( bio ) ) {
case REQ_OP_DISCARD :
case REQ_OP_SECURE_ERASE :
case REQ_OP_WRITE_ZEROES :
2016-12-09 01:20:32 +03:00
return 0 ;
case REQ_OP_WRITE_SAME :
2014-02-08 00:53:46 +04:00
return 1 ;
2016-11-30 23:28:59 +03:00
}
2014-02-08 00:53:46 +04:00
2009-02-23 11:03:10 +03:00
fbio = bio ;
2010-12-01 21:41:49 +03:00
cluster = blk_queue_cluster ( q ) ;
2008-08-15 12:20:02 +04:00
seg_size = 0 ;
2010-06-21 13:02:47 +04:00
nr_phys_segs = 0 ;
2009-02-23 11:03:10 +03:00
for_each_bio ( bio ) {
2013-11-24 05:19:00 +04:00
bio_for_each_segment ( bv , bio , iter ) {
2014-05-29 19:53:32 +04:00
/*
* If SG merging is disabled , each bio vector is
* a segment
*/
if ( no_sg_merge )
goto new_segment ;
2015-04-24 08:37:18 +03:00
if ( prev & & cluster ) {
2013-11-24 05:19:00 +04:00
if ( seg_size + bv . bv_len
2009-05-23 01:17:50 +04:00
> queue_max_segment_size ( q ) )
2009-02-23 11:03:10 +03:00
goto new_segment ;
2013-11-24 05:19:00 +04:00
if ( ! BIOVEC_PHYS_MERGEABLE ( & bvprv , & bv ) )
2009-02-23 11:03:10 +03:00
goto new_segment ;
2013-11-24 05:19:00 +04:00
if ( ! BIOVEC_SEG_BOUNDARY ( q , & bvprv , & bv ) )
2009-02-23 11:03:10 +03:00
goto new_segment ;
2008-01-29 16:04:06 +03:00
2013-11-24 05:19:00 +04:00
seg_size + = bv . bv_len ;
2009-02-23 11:03:10 +03:00
bvprv = bv ;
continue ;
}
2008-01-29 16:04:06 +03:00
new_segment :
2009-02-23 11:03:10 +03:00
if ( nr_phys_segs = = 1 & & seg_size >
fbio - > bi_seg_front_size )
fbio - > bi_seg_front_size = seg_size ;
2008-10-13 16:19:05 +04:00
2009-02-23 11:03:10 +03:00
nr_phys_segs + + ;
bvprv = bv ;
2015-04-24 08:37:18 +03:00
prev = 1 ;
2013-11-24 05:19:00 +04:00
seg_size = bv . bv_len ;
2009-02-23 11:03:10 +03:00
}
2009-03-06 10:55:24 +03:00
bbio = bio ;
2008-01-29 16:04:06 +03:00
}
2009-03-06 10:55:24 +03:00
if ( nr_phys_segs = = 1 & & seg_size > fbio - > bi_seg_front_size )
fbio - > bi_seg_front_size = seg_size ;
if ( seg_size > bbio - > bi_seg_back_size )
bbio - > bi_seg_back_size = seg_size ;
2009-02-23 11:03:10 +03:00
return nr_phys_segs ;
}
void blk_recalc_rq_segments ( struct request * rq )
{
2014-09-02 19:02:59 +04:00
bool no_sg_merge = ! ! test_bit ( QUEUE_FLAG_NO_SG_MERGE ,
& rq - > q - > queue_flags ) ;
rq - > nr_phys_segments = __blk_recalc_rq_segments ( rq - > q , rq - > bio ,
no_sg_merge ) ;
2008-01-29 16:04:06 +03:00
}
void blk_recount_segments ( struct request_queue * q , struct bio * bio )
{
2014-11-11 19:15:41 +03:00
unsigned short seg_cnt ;
/* estimate segment number by bi_vcnt for non-cloned bio */
if ( bio_flagged ( bio , BIO_CLONED ) )
seg_cnt = bio_segments ( bio ) ;
else
seg_cnt = bio - > bi_vcnt ;
2014-10-09 19:17:35 +04:00
2014-11-11 19:15:41 +03:00
if ( test_bit ( QUEUE_FLAG_NO_SG_MERGE , & q - > queue_flags ) & &
( seg_cnt < queue_max_segments ( q ) ) )
bio - > bi_phys_segments = seg_cnt ;
2014-05-29 19:53:32 +04:00
else {
struct bio * nxt = bio - > bi_next ;
bio - > bi_next = NULL ;
2014-11-11 19:15:41 +03:00
bio - > bi_phys_segments = __blk_recalc_rq_segments ( q , bio , false ) ;
2014-05-29 19:53:32 +04:00
bio - > bi_next = nxt ;
}
2009-02-23 11:03:10 +03:00
2015-07-24 21:37:59 +03:00
bio_set_flag ( bio , BIO_SEG_VALID ) ;
2008-01-29 16:04:06 +03:00
}
EXPORT_SYMBOL ( blk_recount_segments ) ;
static int blk_phys_contig_segment ( struct request_queue * q , struct bio * bio ,
struct bio * nxt )
{
2013-12-04 01:29:09 +04:00
struct bio_vec end_bv = { NULL } , nxt_bv ;
2013-08-08 01:30:33 +04:00
2010-12-01 21:41:49 +03:00
if ( ! blk_queue_cluster ( q ) )
2008-01-29 16:04:06 +03:00
return 0 ;
2008-10-13 16:19:05 +04:00
if ( bio - > bi_seg_back_size + nxt - > bi_seg_front_size >
2009-05-23 01:17:50 +04:00
queue_max_segment_size ( q ) )
2008-01-29 16:04:06 +03:00
return 0 ;
2008-08-09 19:42:20 +04:00
if ( ! bio_has_data ( bio ) )
return 1 ;
2016-02-26 18:40:53 +03:00
bio_get_last_bvec ( bio , & end_bv ) ;
bio_get_first_bvec ( nxt , & nxt_bv ) ;
2013-08-08 01:30:33 +04:00
if ( ! BIOVEC_PHYS_MERGEABLE ( & end_bv , & nxt_bv ) )
2008-08-09 19:42:20 +04:00
return 0 ;
2008-01-29 16:04:06 +03:00
/*
2008-08-09 19:42:20 +04:00
* bio and nxt are contiguous in memory ; check if the queue allows
2008-01-29 16:04:06 +03:00
* these two to be merged into one
*/
2013-08-08 01:30:33 +04:00
if ( BIOVEC_SEG_BOUNDARY ( q , & end_bv , & nxt_bv ) )
2008-01-29 16:04:06 +03:00
return 1 ;
return 0 ;
}
2013-11-24 05:19:00 +04:00
static inline void
2012-08-03 01:42:03 +04:00
__blk_segment_map_sg ( struct request_queue * q , struct bio_vec * bvec ,
2013-11-24 05:19:00 +04:00
struct scatterlist * sglist , struct bio_vec * bvprv ,
2012-08-03 01:42:03 +04:00
struct scatterlist * * sg , int * nsegs , int * cluster )
{
int nbytes = bvec - > bv_len ;
2013-11-24 05:19:00 +04:00
if ( * sg & & * cluster ) {
2012-08-03 01:42:03 +04:00
if ( ( * sg ) - > length + nbytes > queue_max_segment_size ( q ) )
goto new_segment ;
2013-11-24 05:19:00 +04:00
if ( ! BIOVEC_PHYS_MERGEABLE ( bvprv , bvec ) )
2012-08-03 01:42:03 +04:00
goto new_segment ;
2013-11-24 05:19:00 +04:00
if ( ! BIOVEC_SEG_BOUNDARY ( q , bvprv , bvec ) )
2012-08-03 01:42:03 +04:00
goto new_segment ;
( * sg ) - > length + = nbytes ;
} else {
new_segment :
if ( ! * sg )
* sg = sglist ;
else {
/*
* If the driver previously mapped a shorter
* list , we could see a termination bit
* prematurely unless it fully inits the sg
* table on each mapping . We KNOW that there
* must be more entries here or the driver
* would be buggy , so force clear the
* termination bit to avoid doing a full
* sg_init_table ( ) in drivers for each command .
*/
2013-03-20 09:07:08 +04:00
sg_unmark_end ( * sg ) ;
2012-08-03 01:42:03 +04:00
* sg = sg_next ( * sg ) ;
}
sg_set_page ( * sg , bvec - > bv_page , nbytes , bvec - > bv_offset ) ;
( * nsegs ) + + ;
}
2013-11-24 05:19:00 +04:00
* bvprv = * bvec ;
2012-08-03 01:42:03 +04:00
}
2016-12-09 01:20:32 +03:00
static inline int __blk_bvec_map_sg ( struct request_queue * q , struct bio_vec bv ,
struct scatterlist * sglist , struct scatterlist * * sg )
{
* sg = sglist ;
sg_set_page ( * sg , bv . bv_page , bv . bv_len , bv . bv_offset ) ;
return 1 ;
}
2014-02-08 00:53:46 +04:00
static int __blk_bios_map_sg ( struct request_queue * q , struct bio * bio ,
struct scatterlist * sglist ,
struct scatterlist * * sg )
2008-01-29 16:04:06 +03:00
{
2013-12-04 01:29:09 +04:00
struct bio_vec bvec , bvprv = { NULL } ;
2014-02-08 00:53:46 +04:00
struct bvec_iter iter ;
2016-12-09 01:20:32 +03:00
int cluster = blk_queue_cluster ( q ) , nsegs = 0 ;
2014-02-08 00:53:46 +04:00
for_each_bio ( bio )
bio_for_each_segment ( bvec , bio , iter )
__blk_segment_map_sg ( q , & bvec , sglist , & bvprv , sg ,
& nsegs , & cluster ) ;
2008-01-29 16:04:06 +03:00
2014-02-08 00:53:46 +04:00
return nsegs ;
}
/*
* map a request to scatterlist , return number of sg entries setup . Caller
* must make sure sg can hold rq - > nr_phys_segments entries
*/
int blk_rq_map_sg ( struct request_queue * q , struct request * rq ,
struct scatterlist * sglist )
{
struct scatterlist * sg = NULL ;
int nsegs = 0 ;
2016-12-09 01:20:32 +03:00
if ( rq - > rq_flags & RQF_SPECIAL_PAYLOAD )
nsegs = __blk_bvec_map_sg ( q , rq - > special_vec , sglist , & sg ) ;
else if ( rq - > bio & & bio_op ( rq - > bio ) = = REQ_OP_WRITE_SAME )
nsegs = __blk_bvec_map_sg ( q , bio_iovec ( rq - > bio ) , sglist , & sg ) ;
else if ( rq - > bio )
2014-02-08 00:53:46 +04:00
nsegs = __blk_bios_map_sg ( q , rq - > bio , sglist , & sg ) ;
2008-04-11 14:56:52 +04:00
2016-10-20 16:12:13 +03:00
if ( unlikely ( rq - > rq_flags & RQF_COPY_USER ) & &
2009-05-07 17:24:41 +04:00
( blk_rq_bytes ( rq ) & q - > dma_pad_mask ) ) {
unsigned int pad_len =
( q - > dma_pad_mask & ~ blk_rq_bytes ( rq ) ) + 1 ;
2008-04-11 14:56:52 +04:00
sg - > length + = pad_len ;
rq - > extra_len + = pad_len ;
}
2008-02-19 13:36:53 +03:00
if ( q - > dma_drain_size & & q - > dma_drain_needed ( rq ) ) {
2016-06-05 22:31:45 +03:00
if ( op_is_write ( req_op ( rq ) ) )
2008-02-19 13:36:55 +03:00
memset ( q - > dma_drain_buffer , 0 , q - > dma_drain_size ) ;
2015-08-07 19:15:14 +03:00
sg_unmark_end ( sg ) ;
2008-01-29 16:04:06 +03:00
sg = sg_next ( sg ) ;
sg_set_page ( sg , virt_to_page ( q - > dma_drain_buffer ) ,
q - > dma_drain_size ,
( ( unsigned long ) q - > dma_drain_buffer ) &
( PAGE_SIZE - 1 ) ) ;
nsegs + + ;
2008-03-04 13:17:11 +03:00
rq - > extra_len + = q - > dma_drain_size ;
2008-01-29 16:04:06 +03:00
}
if ( sg )
sg_mark_end ( sg ) ;
2015-11-24 05:35:31 +03:00
/*
* Something must have been wrong if the figured number of
* segment is bigger than number of req ' s physical segments
*/
2016-12-09 01:20:32 +03:00
WARN_ON ( nsegs > blk_rq_nr_phys_segments ( rq ) ) ;
2015-11-24 05:35:31 +03:00
2008-01-29 16:04:06 +03:00
return nsegs ;
}
EXPORT_SYMBOL ( blk_rq_map_sg ) ;
static inline int ll_new_hw_segment ( struct request_queue * q ,
struct request * req ,
struct bio * bio )
{
int nr_phys_segs = bio_phys_segments ( q , bio ) ;
2010-09-10 22:50:10 +04:00
if ( req - > nr_phys_segments + nr_phys_segs > queue_max_segments ( q ) )
goto no_merge ;
2014-09-27 03:20:06 +04:00
if ( blk_integrity_merge_bio ( q , req , bio ) = = false )
2010-09-10 22:50:10 +04:00
goto no_merge ;
2008-01-29 16:04:06 +03:00
/*
* This will form the start of a new hw segment . Bump both
* counters .
*/
req - > nr_phys_segments + = nr_phys_segs ;
return 1 ;
2010-09-10 22:50:10 +04:00
no_merge :
2016-12-01 18:36:16 +03:00
req_set_nomerge ( q , req ) ;
2010-09-10 22:50:10 +04:00
return 0 ;
2008-01-29 16:04:06 +03:00
}
int ll_back_merge_fn ( struct request_queue * q , struct request * req ,
struct bio * bio )
{
2015-09-03 19:28:20 +03:00
if ( req_gap_back_merge ( req , bio ) )
return 0 ;
2015-09-11 18:03:04 +03:00
if ( blk_integrity_rq ( req ) & &
integrity_req_gap_back_merge ( req , bio ) )
return 0 ;
2012-09-18 20:19:26 +04:00
if ( blk_rq_sectors ( req ) + bio_sectors ( bio ) >
2016-07-21 06:40:47 +03:00
blk_rq_get_max_sectors ( req , blk_rq_pos ( req ) ) ) {
2016-12-01 18:36:16 +03:00
req_set_nomerge ( q , req ) ;
2008-01-29 16:04:06 +03:00
return 0 ;
}
2008-05-07 11:33:55 +04:00
if ( ! bio_flagged ( req - > biotail , BIO_SEG_VALID ) )
2008-01-29 16:04:06 +03:00
blk_recount_segments ( q , req - > biotail ) ;
2008-05-07 11:33:55 +04:00
if ( ! bio_flagged ( bio , BIO_SEG_VALID ) )
2008-01-29 16:04:06 +03:00
blk_recount_segments ( q , bio ) ;
return ll_new_hw_segment ( q , req , bio ) ;
}
2008-01-31 15:03:55 +03:00
int ll_front_merge_fn ( struct request_queue * q , struct request * req ,
2008-01-29 16:04:06 +03:00
struct bio * bio )
{
2015-09-03 19:28:20 +03:00
if ( req_gap_front_merge ( req , bio ) )
return 0 ;
2015-09-11 18:03:04 +03:00
if ( blk_integrity_rq ( req ) & &
integrity_req_gap_front_merge ( req , bio ) )
return 0 ;
2012-09-18 20:19:26 +04:00
if ( blk_rq_sectors ( req ) + bio_sectors ( bio ) >
2016-07-21 06:40:47 +03:00
blk_rq_get_max_sectors ( req , bio - > bi_iter . bi_sector ) ) {
2016-12-01 18:36:16 +03:00
req_set_nomerge ( q , req ) ;
2008-01-29 16:04:06 +03:00
return 0 ;
}
2008-05-07 11:33:55 +04:00
if ( ! bio_flagged ( bio , BIO_SEG_VALID ) )
2008-01-29 16:04:06 +03:00
blk_recount_segments ( q , bio ) ;
2008-05-07 11:33:55 +04:00
if ( ! bio_flagged ( req - > bio , BIO_SEG_VALID ) )
2008-01-29 16:04:06 +03:00
blk_recount_segments ( q , req - > bio ) ;
return ll_new_hw_segment ( q , req , bio ) ;
}
2013-10-29 22:11:47 +04:00
/*
* blk - mq uses req - > special to carry normal driver per - request payload , it
* does not indicate a prepared command that we cannot merge with .
*/
static bool req_no_special_merge ( struct request * req )
{
struct request_queue * q = req - > q ;
return ! q - > mq_ops & & req - > special ;
}
2008-01-29 16:04:06 +03:00
static int ll_merge_requests_fn ( struct request_queue * q , struct request * req ,
struct request * next )
{
int total_phys_segments ;
2008-10-13 16:19:05 +04:00
unsigned int seg_size =
req - > biotail - > bi_seg_back_size + next - > bio - > bi_seg_front_size ;
2008-01-29 16:04:06 +03:00
/*
* First check if the either of the requests are re - queued
* requests . Can ' t merge them if they are .
*/
2013-10-29 22:11:47 +04:00
if ( req_no_special_merge ( req ) | | req_no_special_merge ( next ) )
2008-01-29 16:04:06 +03:00
return 0 ;
2015-09-03 19:28:20 +03:00
if ( req_gap_back_merge ( req , next - > bio ) )
2015-02-11 18:20:13 +03:00
return 0 ;
2008-01-29 16:04:06 +03:00
/*
* Will it become too large ?
*/
2012-09-18 20:19:26 +04:00
if ( ( blk_rq_sectors ( req ) + blk_rq_sectors ( next ) ) >
2016-07-21 06:40:47 +03:00
blk_rq_get_max_sectors ( req , blk_rq_pos ( req ) ) )
2008-01-29 16:04:06 +03:00
return 0 ;
total_phys_segments = req - > nr_phys_segments + next - > nr_phys_segments ;
2008-10-13 16:19:05 +04:00
if ( blk_phys_contig_segment ( q , req - > biotail , next - > bio ) ) {
if ( req - > nr_phys_segments = = 1 )
req - > bio - > bi_seg_front_size = seg_size ;
if ( next - > nr_phys_segments = = 1 )
next - > biotail - > bi_seg_back_size = seg_size ;
2008-01-29 16:04:06 +03:00
total_phys_segments - - ;
2008-10-13 16:19:05 +04:00
}
2008-01-29 16:04:06 +03:00
2010-02-26 08:20:39 +03:00
if ( total_phys_segments > queue_max_segments ( q ) )
2008-01-29 16:04:06 +03:00
return 0 ;
2014-09-27 03:20:06 +04:00
if ( blk_integrity_merge_rq ( q , req , next ) = = false )
2010-09-10 22:50:10 +04:00
return 0 ;
2008-01-29 16:04:06 +03:00
/* Merge is OK... */
req - > nr_phys_segments = total_phys_segments ;
return 1 ;
}
2009-07-03 12:48:17 +04:00
/**
* blk_rq_set_mixed_merge - mark a request as mixed merge
* @ rq : request to mark as mixed merge
*
* Description :
* @ rq is about to be mixed merged . Make sure the attributes
* which can be mixed are set in each bio and mark @ rq as mixed
* merged .
*/
void blk_rq_set_mixed_merge ( struct request * rq )
{
unsigned int ff = rq - > cmd_flags & REQ_FAILFAST_MASK ;
struct bio * bio ;
2016-10-20 16:12:13 +03:00
if ( rq - > rq_flags & RQF_MIXED_MERGE )
2009-07-03 12:48:17 +04:00
return ;
/*
* @ rq will no longer represent mixable attributes for all the
* contained bios . It will just track those of the first one .
* Distributes the attributs to each bio .
*/
for ( bio = rq - > bio ; bio ; bio = bio - > bi_next ) {
2016-08-06 00:35:16 +03:00
WARN_ON_ONCE ( ( bio - > bi_opf & REQ_FAILFAST_MASK ) & &
( bio - > bi_opf & REQ_FAILFAST_MASK ) ! = ff ) ;
bio - > bi_opf | = ff ;
2009-07-03 12:48:17 +04:00
}
2016-10-20 16:12:13 +03:00
rq - > rq_flags | = RQF_MIXED_MERGE ;
2009-07-03 12:48:17 +04:00
}
2009-03-27 12:31:51 +03:00
static void blk_account_io_merge ( struct request * req )
{
if ( blk_do_io_stat ( req ) ) {
struct hd_struct * part ;
int cpu ;
cpu = part_stat_lock ( ) ;
2011-01-05 18:57:38 +03:00
part = req - > part ;
2009-03-27 12:31:51 +03:00
2017-07-01 06:55:08 +03:00
part_round_stats ( req - > q , cpu , part ) ;
part_dec_in_flight ( req - > q , part , rq_data_dir ( req ) ) ;
2009-03-27 12:31:51 +03:00
2011-01-07 10:43:37 +03:00
hd_struct_put ( part ) ;
2009-03-27 12:31:51 +03:00
part_stat_unlock ( ) ;
}
}
2008-01-29 16:04:06 +03:00
/*
2017-02-02 18:54:40 +03:00
* For non - mq , this has to be called with the request spinlock acquired .
* For mq with scheduling , the appropriate queue wide lock should be held .
2008-01-29 16:04:06 +03:00
*/
2017-02-02 18:54:40 +03:00
static struct request * attempt_merge ( struct request_queue * q ,
struct request * req , struct request * next )
2008-01-29 16:04:06 +03:00
{
2017-06-20 21:15:45 +03:00
if ( ! q - > mq_ops )
lockdep_assert_held ( q - > queue_lock ) ;
2008-01-29 16:04:06 +03:00
if ( ! rq_mergeable ( req ) | | ! rq_mergeable ( next ) )
2017-02-02 18:54:40 +03:00
return NULL ;
2008-01-29 16:04:06 +03:00
2016-06-09 17:00:36 +03:00
if ( req_op ( req ) ! = req_op ( next ) )
2017-02-02 18:54:40 +03:00
return NULL ;
2012-09-18 20:19:26 +04:00
2008-01-29 16:04:06 +03:00
/*
* not contiguous
*/
2009-05-07 17:24:39 +04:00
if ( blk_rq_pos ( req ) + blk_rq_sectors ( req ) ! = blk_rq_pos ( next ) )
2017-02-02 18:54:40 +03:00
return NULL ;
2008-01-29 16:04:06 +03:00
if ( rq_data_dir ( req ) ! = rq_data_dir ( next )
| | req - > rq_disk ! = next - > rq_disk
2013-10-29 22:11:47 +04:00
| | req_no_special_merge ( next ) )
2017-02-02 18:54:40 +03:00
return NULL ;
2008-01-29 16:04:06 +03:00
2016-06-05 22:32:15 +03:00
if ( req_op ( req ) = = REQ_OP_WRITE_SAME & &
2012-09-18 20:19:27 +04:00
! blk_write_same_mergeable ( req - > bio , next - > bio ) )
2017-02-02 18:54:40 +03:00
return NULL ;
2012-09-18 20:19:27 +04:00
2017-06-27 18:22:02 +03:00
/*
* Don ' t allow merge of different write hints , or for a hint with
* non - hint IO .
*/
if ( req - > write_hint ! = next - > write_hint )
return NULL ;
2008-01-29 16:04:06 +03:00
/*
* If we are allowed to merge , then append bio list
* from next to rq and release next . merge_requests_fn
* will have updated segment counts , update sector
* counts here .
*/
if ( ! ll_merge_requests_fn ( q , req , next ) )
2017-02-02 18:54:40 +03:00
return NULL ;
2008-01-29 16:04:06 +03:00
2009-07-03 12:48:17 +04:00
/*
* If failfast settings disagree or any of the two is already
* a mixed merge , mark both as mixed before proceeding . This
* makes sure that all involved bios have mixable attributes
* set properly .
*/
2016-10-20 16:12:13 +03:00
if ( ( ( req - > rq_flags | next - > rq_flags ) & RQF_MIXED_MERGE ) | |
2009-07-03 12:48:17 +04:00
( req - > cmd_flags & REQ_FAILFAST_MASK ) ! =
( next - > cmd_flags & REQ_FAILFAST_MASK ) ) {
blk_rq_set_mixed_merge ( req ) ;
blk_rq_set_mixed_merge ( next ) ;
}
2008-01-29 16:04:06 +03:00
/*
* At this point we have either done a back merge
* or front merge . We need the smaller start_time of
* the merged requests to be the current request
* for accounting purposes .
*/
if ( time_after ( req - > start_time , next - > start_time ) )
req - > start_time = next - > start_time ;
req - > biotail - > bi_next = next - > bio ;
req - > biotail = next - > biotail ;
2009-05-07 17:24:44 +04:00
req - > __data_len + = blk_rq_bytes ( next ) ;
2008-01-29 16:04:06 +03:00
elv_merge_requests ( q , req , next ) ;
2009-04-22 16:01:49 +04:00
/*
* ' next ' is going away , so update stats accordingly
*/
blk_account_io_merge ( next ) ;
2008-01-29 16:04:06 +03:00
req - > ioprio = ioprio_best ( req - > ioprio , next - > ioprio ) ;
2008-08-26 12:25:02 +04:00
if ( blk_rq_cpu_valid ( next ) )
req - > cpu = next - > cpu ;
2008-01-29 16:04:06 +03:00
2017-02-03 19:48:28 +03:00
/*
* ownership of bio passed from next to req , return ' next ' for
* the caller to free
*/
2009-03-24 14:35:07 +03:00
next - > bio = NULL ;
2017-02-02 18:54:40 +03:00
return next ;
2008-01-29 16:04:06 +03:00
}
2017-02-02 18:54:40 +03:00
struct request * attempt_back_merge ( struct request_queue * q , struct request * rq )
2008-01-29 16:04:06 +03:00
{
struct request * next = elv_latter_request ( q , rq ) ;
if ( next )
return attempt_merge ( q , rq , next ) ;
2017-02-02 18:54:40 +03:00
return NULL ;
2008-01-29 16:04:06 +03:00
}
2017-02-02 18:54:40 +03:00
struct request * attempt_front_merge ( struct request_queue * q , struct request * rq )
2008-01-29 16:04:06 +03:00
{
struct request * prev = elv_former_request ( q , rq ) ;
if ( prev )
return attempt_merge ( q , prev , rq ) ;
2017-02-02 18:54:40 +03:00
return NULL ;
2008-01-29 16:04:06 +03:00
}
2011-03-21 12:14:27 +03:00
int blk_attempt_req_merge ( struct request_queue * q , struct request * rq ,
struct request * next )
{
2016-07-07 21:48:22 +03:00
struct elevator_queue * e = q - > elevator ;
2017-02-03 19:48:28 +03:00
struct request * free ;
2016-07-07 21:48:22 +03:00
2017-01-17 16:03:22 +03:00
if ( ! e - > uses_mq & & e - > type - > ops . sq . elevator_allow_rq_merge_fn )
2016-12-11 01:13:59 +03:00
if ( ! e - > type - > ops . sq . elevator_allow_rq_merge_fn ( q , rq , next ) )
2016-07-07 21:48:22 +03:00
return 0 ;
2017-02-03 19:48:28 +03:00
free = attempt_merge ( q , rq , next ) ;
if ( free ) {
__blk_put_request ( q , free ) ;
return 1 ;
}
return 0 ;
2011-03-21 12:14:27 +03:00
}
2012-02-08 12:19:38 +04:00
bool blk_rq_merge_ok ( struct request * rq , struct bio * bio )
{
2012-09-18 20:19:25 +04:00
if ( ! rq_mergeable ( rq ) | | ! bio_mergeable ( bio ) )
2012-02-08 12:19:38 +04:00
return false ;
2016-06-09 17:00:36 +03:00
if ( req_op ( rq ) ! = bio_op ( bio ) )
2012-09-18 20:19:26 +04:00
return false ;
2012-02-08 12:19:38 +04:00
/* different data direction or already started, don't merge */
if ( bio_data_dir ( bio ) ! = rq_data_dir ( rq ) )
return false ;
/* must be same device and not a special request */
2017-08-23 20:10:32 +03:00
if ( rq - > rq_disk ! = bio - > bi_disk | | req_no_special_merge ( rq ) )
2012-02-08 12:19:38 +04:00
return false ;
/* only merge integrity protected bio into ditto rq */
2014-09-27 03:20:06 +04:00
if ( blk_integrity_merge_bio ( rq - > q , rq , bio ) = = false )
2012-02-08 12:19:38 +04:00
return false ;
2012-09-18 20:19:27 +04:00
/* must be using the same buffer */
2016-06-05 22:32:15 +03:00
if ( req_op ( rq ) = = REQ_OP_WRITE_SAME & &
2012-09-18 20:19:27 +04:00
! blk_write_same_mergeable ( rq - > bio , bio ) )
return false ;
2017-06-27 18:22:02 +03:00
/*
* Don ' t allow merge of different write hints , or for a hint with
* non - hint IO .
*/
if ( rq - > write_hint ! = bio - > bi_write_hint )
return false ;
2012-02-08 12:19:38 +04:00
return true ;
}
2017-02-08 16:46:48 +03:00
enum elv_merge blk_try_merge ( struct request * rq , struct bio * bio )
2012-02-08 12:19:38 +04:00
{
2017-02-08 16:46:49 +03:00
if ( req_op ( rq ) = = REQ_OP_DISCARD & &
queue_max_discard_segments ( rq - > q ) > 1 )
return ELEVATOR_DISCARD_MERGE ;
else if ( blk_rq_pos ( rq ) + blk_rq_sectors ( rq ) = = bio - > bi_iter . bi_sector )
2012-02-08 12:19:38 +04:00
return ELEVATOR_BACK_MERGE ;
2013-10-12 02:44:27 +04:00
else if ( blk_rq_pos ( rq ) - bio_sectors ( bio ) = = bio - > bi_iter . bi_sector )
2012-02-08 12:19:38 +04:00
return ELEVATOR_FRONT_MERGE ;
return ELEVATOR_NO_MERGE ;
}