2008-01-29 16:53:40 +03:00
/*
* Functions related to mapping data to requests
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/bio.h>
# include <linux/blkdev.h>
2015-01-18 18:16:31 +03:00
# include <linux/uio.h>
2008-01-29 16:53:40 +03:00
# include "blk.h"
2016-07-19 12:31:51 +03:00
/*
* Append a bio to a passthrough request . Only works can be merged into
* the request based on the driver constraints .
*/
int blk_rq_append_bio ( struct request * rq , struct bio * bio )
2008-01-29 16:53:40 +03:00
{
2016-07-19 12:31:51 +03:00
if ( ! rq - > bio ) {
2016-10-28 17:48:16 +03:00
rq - > cmd_flags & = REQ_OP_MASK ;
rq - > cmd_flags | = ( bio - > bi_opf & REQ_OP_MASK ) ;
2016-07-19 12:31:51 +03:00
blk_rq_bio_prep ( rq - > q , rq , bio ) ;
} else {
if ( ! ll_back_merge_fn ( rq - > q , rq , bio ) )
return - EINVAL ;
2008-01-29 16:53:40 +03:00
rq - > biotail - > bi_next = bio ;
rq - > biotail = bio ;
2013-10-12 02:44:27 +04:00
rq - > __data_len + = bio - > bi_iter . bi_size ;
2008-01-29 16:53:40 +03:00
}
2016-07-19 12:31:51 +03:00
2008-01-29 16:53:40 +03:00
return 0 ;
}
2016-07-19 12:31:51 +03:00
EXPORT_SYMBOL ( blk_rq_append_bio ) ;
2008-01-29 16:53:40 +03:00
static int __blk_rq_unmap_user ( struct bio * bio )
{
int ret = 0 ;
if ( bio ) {
if ( bio_flagged ( bio , BIO_USER_MAPPED ) )
bio_unmap_user ( bio ) ;
else
ret = bio_uncopy_user ( bio ) ;
}
return ret ;
}
2016-03-02 20:07:14 +03:00
static int __blk_rq_map_user_iov ( struct request * rq ,
struct rq_map_data * map_data , struct iov_iter * iter ,
gfp_t gfp_mask , bool copy )
{
struct request_queue * q = rq - > q ;
struct bio * bio , * orig_bio ;
int ret ;
if ( copy )
bio = bio_copy_user_iov ( q , map_data , iter , gfp_mask ) ;
else
bio = bio_map_user_iov ( q , iter , gfp_mask ) ;
if ( IS_ERR ( bio ) )
return PTR_ERR ( bio ) ;
if ( map_data & & map_data - > null_mapped )
bio_set_flag ( bio , BIO_NULL_MAPPED ) ;
iov_iter_advance ( iter , bio - > bi_iter . bi_size ) ;
if ( map_data )
map_data - > offset + = bio - > bi_iter . bi_size ;
orig_bio = bio ;
blk_queue_bounce ( q , & bio ) ;
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get ( bio ) ;
2016-07-19 12:31:51 +03:00
ret = blk_rq_append_bio ( rq , bio ) ;
2016-03-02 20:07:14 +03:00
if ( ret ) {
bio_endio ( bio ) ;
__blk_rq_unmap_user ( orig_bio ) ;
bio_put ( bio ) ;
return ret ;
}
return 0 ;
}
2008-01-29 16:53:40 +03:00
/**
2008-08-19 22:13:11 +04:00
* blk_rq_map_user_iov - map user data to a request , for REQ_TYPE_BLOCK_PC usage
2008-01-29 16:53:40 +03:00
* @ q : request queue where request should be inserted
* @ rq : request to map data to
2008-08-28 11:17:06 +04:00
* @ map_data : pointer to the rq_map_data holding pages ( if necessary )
2015-01-18 18:16:31 +03:00
* @ iter : iovec iterator
2008-08-28 11:17:05 +04:00
* @ gfp_mask : memory allocation flags
2008-01-29 16:53:40 +03:00
*
* Description :
2008-08-19 22:13:11 +04:00
* Data will be mapped directly for zero copy I / O , if possible . Otherwise
2008-01-29 16:53:40 +03:00
* a kernel bounce buffer is used .
*
2008-08-19 22:13:11 +04:00
* A matching blk_rq_unmap_user ( ) must be issued at the end of I / O , while
2008-01-29 16:53:40 +03:00
* still in process context .
*
* Note : The mapped bio may need to be bounced through blk_queue_bounce ( )
* before being submitted to the device , as pages mapped may be out of
* reach . It ' s the callers responsibility to make sure this happens . The
* original bio must be passed back in to blk_rq_unmap_user ( ) for proper
* unmapping .
*/
int blk_rq_map_user_iov ( struct request_queue * q , struct request * rq ,
2015-01-18 18:16:31 +03:00
struct rq_map_data * map_data ,
const struct iov_iter * iter , gfp_t gfp_mask )
2008-01-29 16:53:40 +03:00
{
2016-04-09 02:05:19 +03:00
bool copy = false ;
unsigned long align = q - > dma_pad_mask | queue_dma_alignment ( q ) ;
2016-03-02 20:07:14 +03:00
struct bio * bio = NULL ;
struct iov_iter i ;
int ret ;
2008-01-29 16:53:40 +03:00
2016-12-07 03:18:14 +03:00
if ( ! iter_is_iovec ( iter ) )
goto fail ;
2016-04-09 02:05:19 +03:00
if ( map_data )
copy = true ;
else if ( iov_iter_alignment ( iter ) & align )
copy = true ;
else if ( queue_virt_boundary ( q ) )
copy = queue_virt_boundary ( q ) & iov_iter_gap_alignment ( iter ) ;
2008-04-11 14:56:51 +04:00
2016-03-02 20:07:14 +03:00
i = * iter ;
do {
ret = __blk_rq_map_user_iov ( rq , map_data , & i , gfp_mask , copy ) ;
if ( ret )
goto unmap_rq ;
if ( ! bio )
bio = rq - > bio ;
} while ( iov_iter_count ( & i ) ) ;
2008-01-29 16:53:40 +03:00
2008-04-11 14:56:52 +04:00
if ( ! bio_flagged ( bio , BIO_USER_MAPPED ) )
2016-10-20 16:12:13 +03:00
rq - > rq_flags | = RQF_COPY_USER ;
2008-01-29 16:53:40 +03:00
return 0 ;
2016-03-02 20:07:14 +03:00
unmap_rq :
__blk_rq_unmap_user ( bio ) ;
2016-12-07 03:18:14 +03:00
fail :
2016-03-02 20:07:14 +03:00
rq - > bio = NULL ;
return - EINVAL ;
2008-01-29 16:53:40 +03:00
}
2008-08-28 11:17:06 +04:00
EXPORT_SYMBOL ( blk_rq_map_user_iov ) ;
2008-01-29 16:53:40 +03:00
2015-01-18 18:16:29 +03:00
int blk_rq_map_user ( struct request_queue * q , struct request * rq ,
struct rq_map_data * map_data , void __user * ubuf ,
unsigned long len , gfp_t gfp_mask )
{
2015-01-18 18:16:31 +03:00
struct iovec iov ;
struct iov_iter i ;
2015-03-22 03:06:04 +03:00
int ret = import_single_range ( rq_data_dir ( rq ) , ubuf , len , & iov , & i ) ;
2015-01-18 18:16:29 +03:00
2015-03-22 03:06:04 +03:00
if ( unlikely ( ret < 0 ) )
return ret ;
2015-01-18 18:16:29 +03:00
2015-01-18 18:16:31 +03:00
return blk_rq_map_user_iov ( q , rq , map_data , & i , gfp_mask ) ;
2015-01-18 18:16:29 +03:00
}
EXPORT_SYMBOL ( blk_rq_map_user ) ;
2008-01-29 16:53:40 +03:00
/**
* blk_rq_unmap_user - unmap a request with user data
* @ bio : start of bio list
*
* Description :
* Unmap a rq previously mapped by blk_rq_map_user ( ) . The caller must
* supply the original rq - > bio from the blk_rq_map_user ( ) return , since
2008-08-19 22:13:11 +04:00
* the I / O completion may have changed rq - > bio .
2008-01-29 16:53:40 +03:00
*/
int blk_rq_unmap_user ( struct bio * bio )
{
struct bio * mapped_bio ;
int ret = 0 , ret2 ;
while ( bio ) {
mapped_bio = bio ;
if ( unlikely ( bio_flagged ( bio , BIO_BOUNCED ) ) )
mapped_bio = bio - > bi_private ;
ret2 = __blk_rq_unmap_user ( mapped_bio ) ;
if ( ret2 & & ! ret )
ret = ret2 ;
mapped_bio = bio ;
bio = bio - > bi_next ;
bio_put ( mapped_bio ) ;
}
return ret ;
}
EXPORT_SYMBOL ( blk_rq_unmap_user ) ;
/**
2008-08-19 22:13:11 +04:00
* blk_rq_map_kern - map kernel data to a request , for REQ_TYPE_BLOCK_PC usage
2008-01-29 16:53:40 +03:00
* @ q : request queue where request should be inserted
* @ rq : request to fill
* @ kbuf : the kernel buffer
* @ len : length of user data
* @ gfp_mask : memory allocation flags
2008-04-25 14:47:50 +04:00
*
* Description :
* Data will be mapped directly if possible . Otherwise a bounce
2014-02-18 17:54:36 +04:00
* buffer is used . Can be called multiple times to append multiple
2009-05-17 19:55:18 +04:00
* buffers .
2008-01-29 16:53:40 +03:00
*/
int blk_rq_map_kern ( struct request_queue * q , struct request * rq , void * kbuf ,
unsigned int len , gfp_t gfp_mask )
{
2008-04-25 14:47:50 +04:00
int reading = rq_data_dir ( rq ) = = READ ;
2010-09-15 15:08:27 +04:00
unsigned long addr = ( unsigned long ) kbuf ;
2008-04-25 14:47:50 +04:00
int do_copy = 0 ;
2008-01-29 16:53:40 +03:00
struct bio * bio ;
2009-05-17 19:55:18 +04:00
int ret ;
2008-01-29 16:53:40 +03:00
2009-05-23 01:17:50 +04:00
if ( len > ( queue_max_hw_sectors ( q ) < < 9 ) )
2008-01-29 16:53:40 +03:00
return - EINVAL ;
if ( ! len | | ! kbuf )
return - EINVAL ;
2010-09-15 15:08:27 +04:00
do_copy = ! blk_rq_aligned ( q , addr , len ) | | object_is_on_stack ( kbuf ) ;
2008-04-25 14:47:50 +04:00
if ( do_copy )
bio = bio_copy_kern ( q , kbuf , len , gfp_mask , reading ) ;
else
bio = bio_map_kern ( q , kbuf , len , gfp_mask ) ;
2008-01-29 16:53:40 +03:00
if ( IS_ERR ( bio ) )
return PTR_ERR ( bio ) ;
2011-12-21 18:27:24 +04:00
if ( ! reading )
2016-06-05 22:31:48 +03:00
bio_set_op_attrs ( bio , REQ_OP_WRITE , 0 ) ;
2008-01-29 16:53:40 +03:00
2008-04-25 14:47:50 +04:00
if ( do_copy )
2016-10-20 16:12:13 +03:00
rq - > rq_flags | = RQF_COPY_USER ;
2008-04-25 14:47:50 +04:00
2016-07-19 12:31:51 +03:00
ret = blk_rq_append_bio ( rq , bio ) ;
2009-05-17 19:55:18 +04:00
if ( unlikely ( ret ) ) {
/* request is too big */
bio_put ( bio ) ;
return ret ;
}
2008-01-29 16:53:40 +03:00
blk_queue_bounce ( q , & rq - > bio ) ;
return 0 ;
}
EXPORT_SYMBOL ( blk_rq_map_kern ) ;