2008-01-29 16:53:40 +03:00
/*
* Functions related to mapping data to requests
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/bio.h>
# include <linux/blkdev.h>
2008-04-11 14:56:51 +04:00
# include <scsi/sg.h> /* for struct sg_iovec */
2008-01-29 16:53:40 +03:00
# include "blk.h"
int blk_rq_append_bio ( struct request_queue * q , struct request * rq ,
struct bio * bio )
{
if ( ! rq - > bio )
blk_rq_bio_prep ( q , rq , bio ) ;
else if ( ! ll_back_merge_fn ( q , rq , bio ) )
return - EINVAL ;
else {
rq - > biotail - > bi_next = bio ;
rq - > biotail = bio ;
2009-05-07 17:24:44 +04:00
rq - > __data_len + = bio - > bi_size ;
2008-01-29 16:53:40 +03:00
}
return 0 ;
}
static int __blk_rq_unmap_user ( struct bio * bio )
{
int ret = 0 ;
if ( bio ) {
if ( bio_flagged ( bio , BIO_USER_MAPPED ) )
bio_unmap_user ( bio ) ;
else
ret = bio_uncopy_user ( bio ) ;
}
return ret ;
}
static int __blk_rq_map_user ( struct request_queue * q , struct request * rq ,
2008-08-28 11:17:06 +04:00
struct rq_map_data * map_data , void __user * ubuf ,
2008-12-18 08:49:38 +03:00
unsigned int len , gfp_t gfp_mask )
2008-01-29 16:53:40 +03:00
{
unsigned long uaddr ;
struct bio * bio , * orig_bio ;
int reading , ret ;
reading = rq_data_dir ( rq ) = = READ ;
/*
* if alignment requirement is satisfied , map in user pages for
* direct dma . else , set up kernel bounce buffers
*/
uaddr = ( unsigned long ) ubuf ;
2010-09-15 15:08:27 +04:00
if ( blk_rq_aligned ( q , uaddr , len ) & & ! map_data )
2008-08-28 11:17:05 +04:00
bio = bio_map_user ( q , NULL , uaddr , len , reading , gfp_mask ) ;
2008-01-29 16:53:40 +03:00
else
2008-08-28 11:17:06 +04:00
bio = bio_copy_user ( q , map_data , uaddr , len , reading , gfp_mask ) ;
2008-01-29 16:53:40 +03:00
if ( IS_ERR ( bio ) )
return PTR_ERR ( bio ) ;
2008-12-18 08:49:38 +03:00
if ( map_data & & map_data - > null_mapped )
2008-09-02 11:20:19 +04:00
bio - > bi_flags | = ( 1 < < BIO_NULL_MAPPED ) ;
2008-01-29 16:53:40 +03:00
orig_bio = bio ;
blk_queue_bounce ( q , & bio ) ;
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get ( bio ) ;
ret = blk_rq_append_bio ( q , rq , bio ) ;
if ( ! ret )
return bio - > bi_size ;
/* if it was boucned we must call the end io function */
bio_endio ( bio , 0 ) ;
__blk_rq_unmap_user ( orig_bio ) ;
bio_put ( bio ) ;
return ret ;
}
/**
2008-08-19 22:13:11 +04:00
* blk_rq_map_user - map user data to a request , for REQ_TYPE_BLOCK_PC usage
2008-01-29 16:53:40 +03:00
* @ q : request queue where request should be inserted
* @ rq : request structure to fill
2008-08-28 11:17:06 +04:00
* @ map_data : pointer to the rq_map_data holding pages ( if necessary )
2008-01-29 16:53:40 +03:00
* @ ubuf : the user buffer
* @ len : length of user data
2008-08-28 11:17:05 +04:00
* @ gfp_mask : memory allocation flags
2008-01-29 16:53:40 +03:00
*
* Description :
2008-08-19 22:13:11 +04:00
* Data will be mapped directly for zero copy I / O , if possible . Otherwise
2008-01-29 16:53:40 +03:00
* a kernel bounce buffer is used .
*
2008-08-19 22:13:11 +04:00
* A matching blk_rq_unmap_user ( ) must be issued at the end of I / O , while
2008-01-29 16:53:40 +03:00
* still in process context .
*
* Note : The mapped bio may need to be bounced through blk_queue_bounce ( )
* before being submitted to the device , as pages mapped may be out of
* reach . It ' s the callers responsibility to make sure this happens . The
* original bio must be passed back in to blk_rq_unmap_user ( ) for proper
* unmapping .
*/
int blk_rq_map_user ( struct request_queue * q , struct request * rq ,
2008-08-28 11:17:06 +04:00
struct rq_map_data * map_data , void __user * ubuf ,
unsigned long len , gfp_t gfp_mask )
2008-01-29 16:53:40 +03:00
{
unsigned long bytes_read = 0 ;
struct bio * bio = NULL ;
2008-12-18 08:49:38 +03:00
int ret ;
2008-01-29 16:53:40 +03:00
2009-05-23 01:17:50 +04:00
if ( len > ( queue_max_hw_sectors ( q ) < < 9 ) )
2008-01-29 16:53:40 +03:00
return - EINVAL ;
2008-09-02 11:20:19 +04:00
if ( ! len )
2008-01-29 16:53:40 +03:00
return - EINVAL ;
2008-12-18 08:49:38 +03:00
if ( ! ubuf & & ( ! map_data | | ! map_data - > null_mapped ) )
return - EINVAL ;
2008-01-29 16:53:40 +03:00
while ( bytes_read ! = len ) {
unsigned long map_len , end , start ;
map_len = min_t ( unsigned long , len - bytes_read , BIO_MAX_SIZE ) ;
end = ( ( unsigned long ) ubuf + map_len + PAGE_SIZE - 1 )
> > PAGE_SHIFT ;
start = ( unsigned long ) ubuf > > PAGE_SHIFT ;
/*
* A bad offset could cause us to require BIO_MAX_PAGES + 1
* pages . If this happens we just lower the requested
* mapping len by a page so that we can fit
*/
if ( end - start > BIO_MAX_PAGES )
map_len - = PAGE_SIZE ;
2008-08-28 11:17:06 +04:00
ret = __blk_rq_map_user ( q , rq , map_data , ubuf , map_len ,
2008-12-18 08:49:38 +03:00
gfp_mask ) ;
2008-01-29 16:53:40 +03:00
if ( ret < 0 )
goto unmap_rq ;
if ( ! bio )
bio = rq - > bio ;
bytes_read + = ret ;
ubuf + = ret ;
2008-12-18 08:49:37 +03:00
if ( map_data )
map_data - > offset + = ret ;
2008-01-29 16:53:40 +03:00
}
2008-04-11 14:56:52 +04:00
if ( ! bio_flagged ( bio , BIO_USER_MAPPED ) )
rq - > cmd_flags | = REQ_COPY_USER ;
2008-02-19 13:35:38 +03:00
2009-04-23 06:05:20 +04:00
rq - > buffer = NULL ;
2008-01-29 16:53:40 +03:00
return 0 ;
unmap_rq :
blk_rq_unmap_user ( bio ) ;
2008-02-18 15:51:56 +03:00
rq - > bio = NULL ;
2008-01-29 16:53:40 +03:00
return ret ;
}
EXPORT_SYMBOL ( blk_rq_map_user ) ;
/**
2008-08-19 22:13:11 +04:00
* blk_rq_map_user_iov - map user data to a request , for REQ_TYPE_BLOCK_PC usage
2008-01-29 16:53:40 +03:00
* @ q : request queue where request should be inserted
* @ rq : request to map data to
2008-08-28 11:17:06 +04:00
* @ map_data : pointer to the rq_map_data holding pages ( if necessary )
2008-01-29 16:53:40 +03:00
* @ iov : pointer to the iovec
* @ iov_count : number of elements in the iovec
* @ len : I / O byte count
2008-08-28 11:17:05 +04:00
* @ gfp_mask : memory allocation flags
2008-01-29 16:53:40 +03:00
*
* Description :
2008-08-19 22:13:11 +04:00
* Data will be mapped directly for zero copy I / O , if possible . Otherwise
2008-01-29 16:53:40 +03:00
* a kernel bounce buffer is used .
*
2008-08-19 22:13:11 +04:00
* A matching blk_rq_unmap_user ( ) must be issued at the end of I / O , while
2008-01-29 16:53:40 +03:00
* still in process context .
*
* Note : The mapped bio may need to be bounced through blk_queue_bounce ( )
* before being submitted to the device , as pages mapped may be out of
* reach . It ' s the callers responsibility to make sure this happens . The
* original bio must be passed back in to blk_rq_unmap_user ( ) for proper
* unmapping .
*/
int blk_rq_map_user_iov ( struct request_queue * q , struct request * rq ,
2008-08-28 11:17:06 +04:00
struct rq_map_data * map_data , struct sg_iovec * iov ,
int iov_count , unsigned int len , gfp_t gfp_mask )
2008-01-29 16:53:40 +03:00
{
struct bio * bio ;
2008-04-11 14:56:51 +04:00
int i , read = rq_data_dir ( rq ) = = READ ;
int unaligned = 0 ;
2008-01-29 16:53:40 +03:00
if ( ! iov | | iov_count < = 0 )
return - EINVAL ;
2008-04-11 14:56:51 +04:00
for ( i = 0 ; i < iov_count ; i + + ) {
unsigned long uaddr = ( unsigned long ) iov [ i ] . iov_base ;
2010-11-29 12:03:55 +03:00
if ( ! iov [ i ] . iov_len )
return - EINVAL ;
2008-04-11 14:56:51 +04:00
if ( uaddr & queue_dma_alignment ( q ) ) {
unaligned = 1 ;
break ;
}
}
2008-08-28 11:17:06 +04:00
if ( unaligned | | ( q - > dma_pad_mask & len ) | | map_data )
bio = bio_copy_user_iov ( q , map_data , iov , iov_count , read ,
gfp_mask ) ;
2008-04-11 14:56:51 +04:00
else
2008-08-28 11:17:05 +04:00
bio = bio_map_user_iov ( q , NULL , iov , iov_count , read , gfp_mask ) ;
2008-04-11 14:56:51 +04:00
2008-01-29 16:53:40 +03:00
if ( IS_ERR ( bio ) )
return PTR_ERR ( bio ) ;
if ( bio - > bi_size ! = len ) {
2008-11-18 17:07:05 +03:00
/*
* Grab an extra reference to this bio , as bio_unmap_user ( )
* expects to be able to drop it twice as it happens on the
* normal IO completion path
*/
bio_get ( bio ) ;
2008-01-29 16:53:40 +03:00
bio_endio ( bio , 0 ) ;
2008-11-19 13:12:14 +03:00
__blk_rq_unmap_user ( bio ) ;
2008-01-29 16:53:40 +03:00
return - EINVAL ;
}
2008-04-11 14:56:52 +04:00
if ( ! bio_flagged ( bio , BIO_USER_MAPPED ) )
rq - > cmd_flags | = REQ_COPY_USER ;
2008-06-26 21:39:23 +04:00
blk_queue_bounce ( q , & bio ) ;
2008-01-29 16:53:40 +03:00
bio_get ( bio ) ;
blk_rq_bio_prep ( q , rq , bio ) ;
2009-04-23 06:05:20 +04:00
rq - > buffer = NULL ;
2008-01-29 16:53:40 +03:00
return 0 ;
}
2008-08-28 11:17:06 +04:00
EXPORT_SYMBOL ( blk_rq_map_user_iov ) ;
2008-01-29 16:53:40 +03:00
/**
* blk_rq_unmap_user - unmap a request with user data
* @ bio : start of bio list
*
* Description :
* Unmap a rq previously mapped by blk_rq_map_user ( ) . The caller must
* supply the original rq - > bio from the blk_rq_map_user ( ) return , since
2008-08-19 22:13:11 +04:00
* the I / O completion may have changed rq - > bio .
2008-01-29 16:53:40 +03:00
*/
int blk_rq_unmap_user ( struct bio * bio )
{
struct bio * mapped_bio ;
int ret = 0 , ret2 ;
while ( bio ) {
mapped_bio = bio ;
if ( unlikely ( bio_flagged ( bio , BIO_BOUNCED ) ) )
mapped_bio = bio - > bi_private ;
ret2 = __blk_rq_unmap_user ( mapped_bio ) ;
if ( ret2 & & ! ret )
ret = ret2 ;
mapped_bio = bio ;
bio = bio - > bi_next ;
bio_put ( mapped_bio ) ;
}
return ret ;
}
EXPORT_SYMBOL ( blk_rq_unmap_user ) ;
/**
2008-08-19 22:13:11 +04:00
* blk_rq_map_kern - map kernel data to a request , for REQ_TYPE_BLOCK_PC usage
2008-01-29 16:53:40 +03:00
* @ q : request queue where request should be inserted
* @ rq : request to fill
* @ kbuf : the kernel buffer
* @ len : length of user data
* @ gfp_mask : memory allocation flags
2008-04-25 14:47:50 +04:00
*
* Description :
* Data will be mapped directly if possible . Otherwise a bounce
2009-05-17 19:55:18 +04:00
* buffer is used . Can be called multple times to append multple
* buffers .
2008-01-29 16:53:40 +03:00
*/
int blk_rq_map_kern ( struct request_queue * q , struct request * rq , void * kbuf ,
unsigned int len , gfp_t gfp_mask )
{
2008-04-25 14:47:50 +04:00
int reading = rq_data_dir ( rq ) = = READ ;
2010-09-15 15:08:27 +04:00
unsigned long addr = ( unsigned long ) kbuf ;
2008-04-25 14:47:50 +04:00
int do_copy = 0 ;
2008-01-29 16:53:40 +03:00
struct bio * bio ;
2009-05-17 19:55:18 +04:00
int ret ;
2008-01-29 16:53:40 +03:00
2009-05-23 01:17:50 +04:00
if ( len > ( queue_max_hw_sectors ( q ) < < 9 ) )
2008-01-29 16:53:40 +03:00
return - EINVAL ;
if ( ! len | | ! kbuf )
return - EINVAL ;
2010-09-15 15:08:27 +04:00
do_copy = ! blk_rq_aligned ( q , addr , len ) | | object_is_on_stack ( kbuf ) ;
2008-04-25 14:47:50 +04:00
if ( do_copy )
bio = bio_copy_kern ( q , kbuf , len , gfp_mask , reading ) ;
else
bio = bio_map_kern ( q , kbuf , len , gfp_mask ) ;
2008-01-29 16:53:40 +03:00
if ( IS_ERR ( bio ) )
return PTR_ERR ( bio ) ;
if ( rq_data_dir ( rq ) = = WRITE )
2010-09-13 23:32:19 +04:00
bio - > bi_rw | = REQ_WRITE ;
2008-01-29 16:53:40 +03:00
2008-04-25 14:47:50 +04:00
if ( do_copy )
rq - > cmd_flags | = REQ_COPY_USER ;
2009-05-17 19:55:18 +04:00
ret = blk_rq_append_bio ( q , rq , bio ) ;
if ( unlikely ( ret ) ) {
/* request is too big */
bio_put ( bio ) ;
return ret ;
}
2008-01-29 16:53:40 +03:00
blk_queue_bounce ( q , & rq - > bio ) ;
2009-04-23 06:05:20 +04:00
rq - > buffer = NULL ;
2008-01-29 16:53:40 +03:00
return 0 ;
}
EXPORT_SYMBOL ( blk_rq_map_kern ) ;