2010-04-28 17:55:08 +04:00
/*
* Functions related to generic helpers functions
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/bio.h>
# include <linux/blkdev.h>
# include <linux/scatterlist.h>
# include "blk.h"
2011-05-07 05:26:27 +04:00
struct bio_batch {
atomic_t done ;
unsigned long flags ;
struct completion * wait ;
} ;
static void bio_batch_end_io ( struct bio * bio , int err )
2010-04-28 17:55:08 +04:00
{
2011-05-07 05:26:27 +04:00
struct bio_batch * bb = bio - > bi_private ;
2011-05-07 05:30:01 +04:00
if ( err & & ( err ! = - EOPNOTSUPP ) )
2011-05-07 05:26:27 +04:00
clear_bit ( BIO_UPTODATE , & bb - > flags ) ;
if ( atomic_dec_and_test ( & bb - > done ) )
complete ( bb - > wait ) ;
2010-04-28 17:55:08 +04:00
bio_put ( bio ) ;
}
/**
* blkdev_issue_discard - queue a discard
* @ bdev : blockdev to issue discard for
* @ sector : start sector
* @ nr_sects : number of sectors to discard
* @ gfp_mask : memory allocation flags ( for bio_alloc )
* @ flags : BLKDEV_IFL_ * flags to control behaviour
*
* Description :
* Issue a discard request for the sectors in question .
*/
int blkdev_issue_discard ( struct block_device * bdev , sector_t sector ,
sector_t nr_sects , gfp_t gfp_mask , unsigned long flags )
{
DECLARE_COMPLETION_ONSTACK ( wait ) ;
struct request_queue * q = bdev_get_queue ( bdev ) ;
2010-08-18 13:29:22 +04:00
int type = REQ_WRITE | REQ_DISCARD ;
2010-07-15 20:49:31 +04:00
unsigned int max_discard_sectors ;
2011-05-07 05:26:27 +04:00
struct bio_batch bb ;
2010-04-28 17:55:08 +04:00
struct bio * bio ;
int ret = 0 ;
if ( ! q )
return - ENXIO ;
if ( ! blk_queue_discard ( q ) )
return - EOPNOTSUPP ;
2010-07-15 20:49:31 +04:00
/*
* Ensure that max_discard_sectors is of the proper
* granularity
*/
max_discard_sectors = min ( q - > limits . max_discard_sectors , UINT_MAX > > 9 ) ;
2011-07-23 22:34:59 +04:00
if ( unlikely ( ! max_discard_sectors ) ) {
2011-07-06 23:30:50 +04:00
/* Avoid infinite loop below. Being cautious never hurts. */
return - EOPNOTSUPP ;
} else if ( q - > limits . discard_granularity ) {
2010-07-15 20:49:31 +04:00
unsigned int disc_sects = q - > limits . discard_granularity > > 9 ;
max_discard_sectors & = ~ ( disc_sects - 1 ) ;
}
2010-04-28 17:55:08 +04:00
2010-09-16 22:51:46 +04:00
if ( flags & BLKDEV_DISCARD_SECURE ) {
2010-08-12 01:17:49 +04:00
if ( ! blk_queue_secdiscard ( q ) )
return - EOPNOTSUPP ;
2010-08-18 13:29:22 +04:00
type | = REQ_SECURE ;
2010-08-12 01:17:49 +04:00
}
2011-05-07 05:26:27 +04:00
atomic_set ( & bb . done , 1 ) ;
bb . flags = 1 < < BIO_UPTODATE ;
bb . wait = & wait ;
while ( nr_sects ) {
2010-04-28 17:55:08 +04:00
bio = bio_alloc ( gfp_mask , 1 ) ;
2010-06-18 18:59:42 +04:00
if ( ! bio ) {
ret = - ENOMEM ;
break ;
}
2010-04-28 17:55:08 +04:00
bio - > bi_sector = sector ;
2011-05-07 05:26:27 +04:00
bio - > bi_end_io = bio_batch_end_io ;
2010-04-28 17:55:08 +04:00
bio - > bi_bdev = bdev ;
2011-05-07 05:26:27 +04:00
bio - > bi_private = & bb ;
2010-04-28 17:55:08 +04:00
if ( nr_sects > max_discard_sectors ) {
bio - > bi_size = max_discard_sectors < < 9 ;
nr_sects - = max_discard_sectors ;
sector + = max_discard_sectors ;
} else {
bio - > bi_size = nr_sects < < 9 ;
nr_sects = 0 ;
}
2011-05-07 05:26:27 +04:00
atomic_inc ( & bb . done ) ;
2010-04-28 17:55:08 +04:00
submit_bio ( type , bio ) ;
2011-05-07 05:26:27 +04:00
}
2010-04-28 17:55:08 +04:00
2011-05-07 05:26:27 +04:00
/* Wait for bios in-flight */
if ( ! atomic_dec_and_test ( & bb . done ) )
2010-09-16 22:51:46 +04:00
wait_for_completion ( & wait ) ;
2010-04-28 17:55:08 +04:00
2011-05-07 05:30:01 +04:00
if ( ! test_bit ( BIO_UPTODATE , & bb . flags ) )
2011-05-07 05:26:27 +04:00
ret = - EIO ;
2010-06-18 18:59:42 +04:00
2010-04-28 17:55:08 +04:00
return ret ;
}
EXPORT_SYMBOL ( blkdev_issue_discard ) ;
2010-04-28 17:55:09 +04:00
/**
2011-03-01 21:45:24 +03:00
* blkdev_issue_zeroout - generate number of zero filed write bios
2010-04-28 17:55:09 +04:00
* @ bdev : blockdev to issue
* @ sector : start sector
* @ nr_sects : number of sectors to write
* @ gfp_mask : memory allocation flags ( for bio_alloc )
*
* Description :
* Generate and issue number of bios with zerofiled pages .
*/
int blkdev_issue_zeroout ( struct block_device * bdev , sector_t sector ,
2010-09-16 22:51:46 +04:00
sector_t nr_sects , gfp_t gfp_mask )
2010-04-28 17:55:09 +04:00
{
2010-08-06 15:23:25 +04:00
int ret ;
2010-04-28 17:55:09 +04:00
struct bio * bio ;
struct bio_batch bb ;
2011-03-11 12:23:53 +03:00
unsigned int sz ;
2010-04-28 17:55:09 +04:00
DECLARE_COMPLETION_ONSTACK ( wait ) ;
2011-03-11 12:23:53 +03:00
atomic_set ( & bb . done , 1 ) ;
2010-04-28 17:55:09 +04:00
bb . flags = 1 < < BIO_UPTODATE ;
bb . wait = & wait ;
2010-08-06 15:23:25 +04:00
ret = 0 ;
2010-04-28 17:55:09 +04:00
while ( nr_sects ! = 0 ) {
bio = bio_alloc ( gfp_mask ,
min ( nr_sects , ( sector_t ) BIO_MAX_PAGES ) ) ;
2010-08-06 15:23:25 +04:00
if ( ! bio ) {
ret = - ENOMEM ;
2010-04-28 17:55:09 +04:00
break ;
2010-08-06 15:23:25 +04:00
}
2010-04-28 17:55:09 +04:00
bio - > bi_sector = sector ;
bio - > bi_bdev = bdev ;
bio - > bi_end_io = bio_batch_end_io ;
2010-09-16 22:51:46 +04:00
bio - > bi_private = & bb ;
2010-04-28 17:55:09 +04:00
2010-04-29 11:28:21 +04:00
while ( nr_sects ! = 0 ) {
sz = min ( ( sector_t ) PAGE_SIZE > > 9 , nr_sects ) ;
2010-04-28 17:55:09 +04:00
ret = bio_add_page ( bio , ZERO_PAGE ( 0 ) , sz < < 9 , 0 ) ;
nr_sects - = ret > > 9 ;
sector + = ret > > 9 ;
if ( ret < ( sz < < 9 ) )
break ;
}
2010-08-06 15:23:25 +04:00
ret = 0 ;
2011-03-11 12:23:53 +03:00
atomic_inc ( & bb . done ) ;
2010-04-28 17:55:09 +04:00
submit_bio ( WRITE , bio ) ;
}
2010-09-16 22:51:46 +04:00
/* Wait for bios in-flight */
2011-03-11 12:23:53 +03:00
if ( ! atomic_dec_and_test ( & bb . done ) )
2010-09-16 22:51:46 +04:00
wait_for_completion ( & wait ) ;
2010-04-28 17:55:09 +04:00
if ( ! test_bit ( BIO_UPTODATE , & bb . flags ) )
/* One of bios in the batch was completed with error.*/
ret = - EIO ;
return ret ;
}
EXPORT_SYMBOL ( blkdev_issue_zeroout ) ;