2008-01-29 16:53:40 +03:00
/*
* Functions related to mapping data to requests
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/bio.h>
# include <linux/blkdev.h>
2015-01-18 18:16:31 +03:00
# include <linux/uio.h>
2008-01-29 16:53:40 +03:00
# include "blk.h"
2015-09-03 19:28:23 +03:00
static bool iovec_gap_to_prv ( struct request_queue * q ,
struct iovec * prv , struct iovec * cur )
{
unsigned long prev_end ;
if ( ! queue_virt_boundary ( q ) )
return false ;
if ( prv - > iov_base = = NULL & & prv - > iov_len = = 0 )
/* prv is not set - don't check */
return false ;
prev_end = ( unsigned long ) ( prv - > iov_base + prv - > iov_len ) ;
return ( ( ( unsigned long ) cur - > iov_base & queue_virt_boundary ( q ) ) | |
prev_end & queue_virt_boundary ( q ) ) ;
}
2008-01-29 16:53:40 +03:00
int blk_rq_append_bio ( struct request_queue * q , struct request * rq ,
struct bio * bio )
{
if ( ! rq - > bio )
blk_rq_bio_prep ( q , rq , bio ) ;
else if ( ! ll_back_merge_fn ( q , rq , bio ) )
return - EINVAL ;
else {
rq - > biotail - > bi_next = bio ;
rq - > biotail = bio ;
2013-10-12 02:44:27 +04:00
rq - > __data_len + = bio - > bi_iter . bi_size ;
2008-01-29 16:53:40 +03:00
}
return 0 ;
}
static int __blk_rq_unmap_user ( struct bio * bio )
{
int ret = 0 ;
if ( bio ) {
if ( bio_flagged ( bio , BIO_USER_MAPPED ) )
bio_unmap_user ( bio ) ;
else
ret = bio_uncopy_user ( bio ) ;
}
return ret ;
}
/**
2008-08-19 22:13:11 +04:00
* blk_rq_map_user_iov - map user data to a request , for REQ_TYPE_BLOCK_PC usage
2008-01-29 16:53:40 +03:00
* @ q : request queue where request should be inserted
* @ rq : request to map data to
2008-08-28 11:17:06 +04:00
* @ map_data : pointer to the rq_map_data holding pages ( if necessary )
2015-01-18 18:16:31 +03:00
* @ iter : iovec iterator
2008-08-28 11:17:05 +04:00
* @ gfp_mask : memory allocation flags
2008-01-29 16:53:40 +03:00
*
* Description :
2008-08-19 22:13:11 +04:00
* Data will be mapped directly for zero copy I / O , if possible . Otherwise
2008-01-29 16:53:40 +03:00
* a kernel bounce buffer is used .
*
2008-08-19 22:13:11 +04:00
* A matching blk_rq_unmap_user ( ) must be issued at the end of I / O , while
2008-01-29 16:53:40 +03:00
* still in process context .
*
* Note : The mapped bio may need to be bounced through blk_queue_bounce ( )
* before being submitted to the device , as pages mapped may be out of
* reach . It ' s the callers responsibility to make sure this happens . The
* original bio must be passed back in to blk_rq_unmap_user ( ) for proper
* unmapping .
*/
int blk_rq_map_user_iov ( struct request_queue * q , struct request * rq ,
2015-01-18 18:16:31 +03:00
struct rq_map_data * map_data ,
const struct iov_iter * iter , gfp_t gfp_mask )
2008-01-29 16:53:40 +03:00
{
struct bio * bio ;
2008-04-11 14:56:51 +04:00
int unaligned = 0 ;
2015-01-18 18:16:31 +03:00
struct iov_iter i ;
2015-09-03 19:28:23 +03:00
struct iovec iov , prv = { . iov_base = NULL , . iov_len = 0 } ;
2008-01-29 16:53:40 +03:00
2015-01-18 18:16:31 +03:00
if ( ! iter | | ! iter - > count )
2008-01-29 16:53:40 +03:00
return - EINVAL ;
2015-01-18 18:16:31 +03:00
iov_for_each ( iov , i , * iter ) {
unsigned long uaddr = ( unsigned long ) iov . iov_base ;
2008-04-11 14:56:51 +04:00
2015-01-18 18:16:31 +03:00
if ( ! iov . iov_len )
2010-11-29 12:03:55 +03:00
return - EINVAL ;
2011-11-13 22:58:09 +04:00
/*
* Keep going so we check length of all segments
*/
2015-09-03 19:28:23 +03:00
if ( ( uaddr & queue_dma_alignment ( q ) ) | |
iovec_gap_to_prv ( q , & prv , & iov ) )
2008-04-11 14:56:51 +04:00
unaligned = 1 ;
2015-09-03 19:28:23 +03:00
prv . iov_base = iov . iov_base ;
prv . iov_len = iov . iov_len ;
2008-04-11 14:56:51 +04:00
}
2015-01-18 18:16:31 +03:00
if ( unaligned | | ( q - > dma_pad_mask & iter - > count ) | | map_data )
bio = bio_copy_user_iov ( q , map_data , iter , gfp_mask ) ;
2008-04-11 14:56:51 +04:00
else
2015-01-18 18:16:33 +03:00
bio = bio_map_user_iov ( q , iter , gfp_mask ) ;
2008-04-11 14:56:51 +04:00
2008-01-29 16:53:40 +03:00
if ( IS_ERR ( bio ) )
return PTR_ERR ( bio ) ;
2015-02-11 16:07:49 +03:00
if ( map_data & & map_data - > null_mapped )
2015-07-24 21:37:59 +03:00
bio_set_flag ( bio , BIO_NULL_MAPPED ) ;
2015-02-11 16:07:49 +03:00
2015-01-18 18:16:31 +03:00
if ( bio - > bi_iter . bi_size ! = iter - > count ) {
2008-11-18 17:07:05 +03:00
/*
* Grab an extra reference to this bio , as bio_unmap_user ( )
* expects to be able to drop it twice as it happens on the
* normal IO completion path
*/
bio_get ( bio ) ;
2015-07-20 16:29:37 +03:00
bio_endio ( bio ) ;
2008-11-19 13:12:14 +03:00
__blk_rq_unmap_user ( bio ) ;
2008-01-29 16:53:40 +03:00
return - EINVAL ;
}
2008-04-11 14:56:52 +04:00
if ( ! bio_flagged ( bio , BIO_USER_MAPPED ) )
rq - > cmd_flags | = REQ_COPY_USER ;
2008-06-26 21:39:23 +04:00
blk_queue_bounce ( q , & bio ) ;
2008-01-29 16:53:40 +03:00
bio_get ( bio ) ;
blk_rq_bio_prep ( q , rq , bio ) ;
return 0 ;
}
2008-08-28 11:17:06 +04:00
EXPORT_SYMBOL ( blk_rq_map_user_iov ) ;
2008-01-29 16:53:40 +03:00
2015-01-18 18:16:29 +03:00
int blk_rq_map_user ( struct request_queue * q , struct request * rq ,
struct rq_map_data * map_data , void __user * ubuf ,
unsigned long len , gfp_t gfp_mask )
{
2015-01-18 18:16:31 +03:00
struct iovec iov ;
struct iov_iter i ;
2015-03-22 03:06:04 +03:00
int ret = import_single_range ( rq_data_dir ( rq ) , ubuf , len , & iov , & i ) ;
2015-01-18 18:16:29 +03:00
2015-03-22 03:06:04 +03:00
if ( unlikely ( ret < 0 ) )
return ret ;
2015-01-18 18:16:29 +03:00
2015-01-18 18:16:31 +03:00
return blk_rq_map_user_iov ( q , rq , map_data , & i , gfp_mask ) ;
2015-01-18 18:16:29 +03:00
}
EXPORT_SYMBOL ( blk_rq_map_user ) ;
2008-01-29 16:53:40 +03:00
/**
* blk_rq_unmap_user - unmap a request with user data
* @ bio : start of bio list
*
* Description :
* Unmap a rq previously mapped by blk_rq_map_user ( ) . The caller must
* supply the original rq - > bio from the blk_rq_map_user ( ) return , since
2008-08-19 22:13:11 +04:00
* the I / O completion may have changed rq - > bio .
2008-01-29 16:53:40 +03:00
*/
int blk_rq_unmap_user ( struct bio * bio )
{
struct bio * mapped_bio ;
int ret = 0 , ret2 ;
while ( bio ) {
mapped_bio = bio ;
if ( unlikely ( bio_flagged ( bio , BIO_BOUNCED ) ) )
mapped_bio = bio - > bi_private ;
ret2 = __blk_rq_unmap_user ( mapped_bio ) ;
if ( ret2 & & ! ret )
ret = ret2 ;
mapped_bio = bio ;
bio = bio - > bi_next ;
bio_put ( mapped_bio ) ;
}
return ret ;
}
EXPORT_SYMBOL ( blk_rq_unmap_user ) ;
/**
2008-08-19 22:13:11 +04:00
* blk_rq_map_kern - map kernel data to a request , for REQ_TYPE_BLOCK_PC usage
2008-01-29 16:53:40 +03:00
* @ q : request queue where request should be inserted
* @ rq : request to fill
* @ kbuf : the kernel buffer
* @ len : length of user data
* @ gfp_mask : memory allocation flags
2008-04-25 14:47:50 +04:00
*
* Description :
* Data will be mapped directly if possible . Otherwise a bounce
2014-02-18 17:54:36 +04:00
* buffer is used . Can be called multiple times to append multiple
2009-05-17 19:55:18 +04:00
* buffers .
2008-01-29 16:53:40 +03:00
*/
int blk_rq_map_kern ( struct request_queue * q , struct request * rq , void * kbuf ,
unsigned int len , gfp_t gfp_mask )
{
2008-04-25 14:47:50 +04:00
int reading = rq_data_dir ( rq ) = = READ ;
2010-09-15 15:08:27 +04:00
unsigned long addr = ( unsigned long ) kbuf ;
2008-04-25 14:47:50 +04:00
int do_copy = 0 ;
2008-01-29 16:53:40 +03:00
struct bio * bio ;
2009-05-17 19:55:18 +04:00
int ret ;
2008-01-29 16:53:40 +03:00
2009-05-23 01:17:50 +04:00
if ( len > ( queue_max_hw_sectors ( q ) < < 9 ) )
2008-01-29 16:53:40 +03:00
return - EINVAL ;
if ( ! len | | ! kbuf )
return - EINVAL ;
2010-09-15 15:08:27 +04:00
do_copy = ! blk_rq_aligned ( q , addr , len ) | | object_is_on_stack ( kbuf ) ;
2008-04-25 14:47:50 +04:00
if ( do_copy )
bio = bio_copy_kern ( q , kbuf , len , gfp_mask , reading ) ;
else
bio = bio_map_kern ( q , kbuf , len , gfp_mask ) ;
2008-01-29 16:53:40 +03:00
if ( IS_ERR ( bio ) )
return PTR_ERR ( bio ) ;
2011-12-21 18:27:24 +04:00
if ( ! reading )
2010-09-13 23:32:19 +04:00
bio - > bi_rw | = REQ_WRITE ;
2008-01-29 16:53:40 +03:00
2008-04-25 14:47:50 +04:00
if ( do_copy )
rq - > cmd_flags | = REQ_COPY_USER ;
2009-05-17 19:55:18 +04:00
ret = blk_rq_append_bio ( q , rq , bio ) ;
if ( unlikely ( ret ) ) {
/* request is too big */
bio_put ( bio ) ;
return ret ;
}
2008-01-29 16:53:40 +03:00
blk_queue_bounce ( q , & rq - > bio ) ;
return 0 ;
}
EXPORT_SYMBOL ( blk_rq_map_kern ) ;