2014-02-06 04:11:33 +04:00
# include <linux/export.h>
# include <linux/uio.h>
# include <linux/pagemap.h>
2014-03-21 12:58:33 +04:00
# include <linux/slab.h>
# include <linux/vmalloc.h>
2014-02-06 04:11:33 +04:00
iov_iter.c: macros for iterating over iov_iter
iterate_all_kinds(iter, size, ident, step_iovec, step_bvec)
iterates through the ranges covered by iter (up to size bytes total),
repeating step_iovec or step_bvec for each of those. ident is
declared in expansion of that thing, either as struct iovec or
struct bvec, and it contains the range we are currently looking
at. step_bvec should be a void expression, step_iovec - a size_t
one, with non-zero meaning "stop here, that many bytes from this
range left". In the end, the amount actually handled is stored
in size.
iov_iter_copy_from_user_atomic() and iov_iter_alignment() converted
to it.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2014-11-27 21:51:41 +03:00
# define iterate_iovec(i, n, __v, __p, skip, STEP) { \
size_t left ; \
size_t wanted = n ; \
__p = i - > iov ; \
__v . iov_len = min ( n , __p - > iov_len - skip ) ; \
if ( likely ( __v . iov_len ) ) { \
__v . iov_base = __p - > iov_base + skip ; \
left = ( STEP ) ; \
__v . iov_len - = left ; \
skip + = __v . iov_len ; \
n - = __v . iov_len ; \
} else { \
left = 0 ; \
} \
while ( unlikely ( ! left & & n ) ) { \
__p + + ; \
__v . iov_len = min ( n , __p - > iov_len ) ; \
if ( unlikely ( ! __v . iov_len ) ) \
continue ; \
__v . iov_base = __p - > iov_base ; \
left = ( STEP ) ; \
__v . iov_len - = left ; \
skip = __v . iov_len ; \
n - = __v . iov_len ; \
} \
n = wanted - n ; \
}
# define iterate_bvec(i, n, __v, __p, skip, STEP) { \
size_t wanted = n ; \
__p = i - > bvec ; \
__v . bv_len = min_t ( size_t , n , __p - > bv_len - skip ) ; \
if ( likely ( __v . bv_len ) ) { \
__v . bv_page = __p - > bv_page ; \
__v . bv_offset = __p - > bv_offset + skip ; \
( void ) ( STEP ) ; \
skip + = __v . bv_len ; \
n - = __v . bv_len ; \
} \
while ( unlikely ( n ) ) { \
__p + + ; \
__v . bv_len = min_t ( size_t , n , __p - > bv_len ) ; \
if ( unlikely ( ! __v . bv_len ) ) \
continue ; \
__v . bv_page = __p - > bv_page ; \
__v . bv_offset = __p - > bv_offset ; \
( void ) ( STEP ) ; \
skip = __v . bv_len ; \
n - = __v . bv_len ; \
} \
n = wanted ; \
}
# define iterate_all_kinds(i, n, v, I, B) { \
size_t skip = i - > iov_offset ; \
if ( unlikely ( i - > type & ITER_BVEC ) ) { \
const struct bio_vec * bvec ; \
struct bio_vec v ; \
iterate_bvec ( i , n , v , bvec , skip , ( B ) ) \
} else { \
const struct iovec * iov ; \
struct iovec v ; \
iterate_iovec ( i , n , v , iov , skip , ( I ) ) \
} \
}
2014-11-27 21:59:45 +03:00
# define iterate_and_advance(i, n, v, I, B) { \
size_t skip = i - > iov_offset ; \
if ( unlikely ( i - > type & ITER_BVEC ) ) { \
const struct bio_vec * bvec ; \
struct bio_vec v ; \
iterate_bvec ( i , n , v , bvec , skip , ( B ) ) \
if ( skip = = bvec - > bv_len ) { \
bvec + + ; \
skip = 0 ; \
} \
i - > nr_segs - = bvec - i - > bvec ; \
i - > bvec = bvec ; \
} else { \
const struct iovec * iov ; \
struct iovec v ; \
iterate_iovec ( i , n , v , iov , skip , ( I ) ) \
if ( skip = = iov - > iov_len ) { \
iov + + ; \
skip = 0 ; \
} \
i - > nr_segs - = iov - i - > iov ; \
i - > iov = iov ; \
} \
i - > count - = n ; \
i - > iov_offset = skip ; \
}
2014-08-01 17:27:22 +04:00
static size_t copy_to_iter_iovec ( void * from , size_t bytes , struct iov_iter * i )
{
size_t skip , copy , left , wanted ;
const struct iovec * iov ;
char __user * buf ;
if ( unlikely ( bytes > i - > count ) )
bytes = i - > count ;
if ( unlikely ( ! bytes ) )
return 0 ;
wanted = bytes ;
iov = i - > iov ;
skip = i - > iov_offset ;
buf = iov - > iov_base + skip ;
copy = min ( bytes , iov - > iov_len - skip ) ;
left = __copy_to_user ( buf , from , copy ) ;
copy - = left ;
skip + = copy ;
from + = copy ;
bytes - = copy ;
while ( unlikely ( ! left & & bytes ) ) {
iov + + ;
buf = iov - > iov_base ;
copy = min ( bytes , iov - > iov_len ) ;
left = __copy_to_user ( buf , from , copy ) ;
copy - = left ;
skip = copy ;
from + = copy ;
bytes - = copy ;
}
if ( skip = = iov - > iov_len ) {
iov + + ;
skip = 0 ;
}
i - > count - = wanted - bytes ;
i - > nr_segs - = iov - i - > iov ;
i - > iov = iov ;
i - > iov_offset = skip ;
return wanted - bytes ;
}
static size_t copy_from_iter_iovec ( void * to , size_t bytes , struct iov_iter * i )
{
size_t skip , copy , left , wanted ;
const struct iovec * iov ;
char __user * buf ;
if ( unlikely ( bytes > i - > count ) )
bytes = i - > count ;
if ( unlikely ( ! bytes ) )
return 0 ;
wanted = bytes ;
iov = i - > iov ;
skip = i - > iov_offset ;
buf = iov - > iov_base + skip ;
copy = min ( bytes , iov - > iov_len - skip ) ;
left = __copy_from_user ( to , buf , copy ) ;
copy - = left ;
skip + = copy ;
to + = copy ;
bytes - = copy ;
while ( unlikely ( ! left & & bytes ) ) {
iov + + ;
buf = iov - > iov_base ;
copy = min ( bytes , iov - > iov_len ) ;
left = __copy_from_user ( to , buf , copy ) ;
copy - = left ;
skip = copy ;
to + = copy ;
bytes - = copy ;
}
if ( skip = = iov - > iov_len ) {
iov + + ;
skip = 0 ;
}
i - > count - = wanted - bytes ;
i - > nr_segs - = iov - i - > iov ;
i - > iov = iov ;
i - > iov_offset = skip ;
return wanted - bytes ;
}
2014-04-05 07:12:29 +04:00
static size_t copy_page_to_iter_iovec ( struct page * page , size_t offset , size_t bytes ,
2014-02-06 04:11:33 +04:00
struct iov_iter * i )
{
size_t skip , copy , left , wanted ;
const struct iovec * iov ;
char __user * buf ;
void * kaddr , * from ;
if ( unlikely ( bytes > i - > count ) )
bytes = i - > count ;
if ( unlikely ( ! bytes ) )
return 0 ;
wanted = bytes ;
iov = i - > iov ;
skip = i - > iov_offset ;
buf = iov - > iov_base + skip ;
copy = min ( bytes , iov - > iov_len - skip ) ;
if ( ! fault_in_pages_writeable ( buf , copy ) ) {
kaddr = kmap_atomic ( page ) ;
from = kaddr + offset ;
/* first chunk, usually the only one */
left = __copy_to_user_inatomic ( buf , from , copy ) ;
copy - = left ;
skip + = copy ;
from + = copy ;
bytes - = copy ;
while ( unlikely ( ! left & & bytes ) ) {
iov + + ;
buf = iov - > iov_base ;
copy = min ( bytes , iov - > iov_len ) ;
left = __copy_to_user_inatomic ( buf , from , copy ) ;
copy - = left ;
skip = copy ;
from + = copy ;
bytes - = copy ;
}
if ( likely ( ! bytes ) ) {
kunmap_atomic ( kaddr ) ;
goto done ;
}
offset = from - kaddr ;
buf + = copy ;
kunmap_atomic ( kaddr ) ;
copy = min ( bytes , iov - > iov_len - skip ) ;
}
/* Too bad - revert to non-atomic kmap */
kaddr = kmap ( page ) ;
from = kaddr + offset ;
left = __copy_to_user ( buf , from , copy ) ;
copy - = left ;
skip + = copy ;
from + = copy ;
bytes - = copy ;
while ( unlikely ( ! left & & bytes ) ) {
iov + + ;
buf = iov - > iov_base ;
copy = min ( bytes , iov - > iov_len ) ;
left = __copy_to_user ( buf , from , copy ) ;
copy - = left ;
skip = copy ;
from + = copy ;
bytes - = copy ;
}
kunmap ( page ) ;
done :
2014-04-05 03:23:46 +04:00
if ( skip = = iov - > iov_len ) {
iov + + ;
skip = 0 ;
}
2014-02-06 04:11:33 +04:00
i - > count - = wanted - bytes ;
i - > nr_segs - = iov - i - > iov ;
i - > iov = iov ;
i - > iov_offset = skip ;
return wanted - bytes ;
}
2014-04-05 07:12:29 +04:00
static size_t copy_page_from_iter_iovec ( struct page * page , size_t offset , size_t bytes ,
2014-04-03 23:05:18 +04:00
struct iov_iter * i )
{
size_t skip , copy , left , wanted ;
const struct iovec * iov ;
char __user * buf ;
void * kaddr , * to ;
if ( unlikely ( bytes > i - > count ) )
bytes = i - > count ;
if ( unlikely ( ! bytes ) )
return 0 ;
wanted = bytes ;
iov = i - > iov ;
skip = i - > iov_offset ;
buf = iov - > iov_base + skip ;
copy = min ( bytes , iov - > iov_len - skip ) ;
if ( ! fault_in_pages_readable ( buf , copy ) ) {
kaddr = kmap_atomic ( page ) ;
to = kaddr + offset ;
/* first chunk, usually the only one */
left = __copy_from_user_inatomic ( to , buf , copy ) ;
copy - = left ;
skip + = copy ;
to + = copy ;
bytes - = copy ;
while ( unlikely ( ! left & & bytes ) ) {
iov + + ;
buf = iov - > iov_base ;
copy = min ( bytes , iov - > iov_len ) ;
left = __copy_from_user_inatomic ( to , buf , copy ) ;
copy - = left ;
skip = copy ;
to + = copy ;
bytes - = copy ;
}
if ( likely ( ! bytes ) ) {
kunmap_atomic ( kaddr ) ;
goto done ;
}
offset = to - kaddr ;
buf + = copy ;
kunmap_atomic ( kaddr ) ;
copy = min ( bytes , iov - > iov_len - skip ) ;
}
/* Too bad - revert to non-atomic kmap */
kaddr = kmap ( page ) ;
to = kaddr + offset ;
left = __copy_from_user ( to , buf , copy ) ;
copy - = left ;
skip + = copy ;
to + = copy ;
bytes - = copy ;
while ( unlikely ( ! left & & bytes ) ) {
iov + + ;
buf = iov - > iov_base ;
copy = min ( bytes , iov - > iov_len ) ;
left = __copy_from_user ( to , buf , copy ) ;
copy - = left ;
skip = copy ;
to + = copy ;
bytes - = copy ;
}
kunmap ( page ) ;
done :
2014-04-05 03:23:46 +04:00
if ( skip = = iov - > iov_len ) {
iov + + ;
skip = 0 ;
}
2014-04-03 23:05:18 +04:00
i - > count - = wanted - bytes ;
i - > nr_segs - = iov - i - > iov ;
i - > iov = iov ;
i - > iov_offset = skip ;
return wanted - bytes ;
}
2014-08-01 17:27:22 +04:00
static size_t zero_iovec ( size_t bytes , struct iov_iter * i )
{
size_t skip , copy , left , wanted ;
const struct iovec * iov ;
char __user * buf ;
if ( unlikely ( bytes > i - > count ) )
bytes = i - > count ;
if ( unlikely ( ! bytes ) )
return 0 ;
wanted = bytes ;
iov = i - > iov ;
skip = i - > iov_offset ;
buf = iov - > iov_base + skip ;
copy = min ( bytes , iov - > iov_len - skip ) ;
left = __clear_user ( buf , copy ) ;
copy - = left ;
skip + = copy ;
bytes - = copy ;
while ( unlikely ( ! left & & bytes ) ) {
iov + + ;
buf = iov - > iov_base ;
copy = min ( bytes , iov - > iov_len ) ;
left = __clear_user ( buf , copy ) ;
copy - = left ;
skip = copy ;
bytes - = copy ;
}
if ( skip = = iov - > iov_len ) {
iov + + ;
skip = 0 ;
}
i - > count - = wanted - bytes ;
i - > nr_segs - = iov - i - > iov ;
i - > iov = iov ;
i - > iov_offset = skip ;
return wanted - bytes ;
}
2014-02-06 04:11:33 +04:00
/*
* Fault in the first iovec of the given iov_iter , to a maximum length
* of bytes . Returns 0 on success , or non - zero if the memory could not be
* accessed ( ie . because it is an invalid address ) .
*
* writev - intensive code may want this to prefault several iovecs - - that
* would be possible ( callers must not rely on the fact that _only_ the
* first iovec will be faulted with the current implementation ) .
*/
int iov_iter_fault_in_readable ( struct iov_iter * i , size_t bytes )
{
2014-04-05 07:12:29 +04:00
if ( ! ( i - > type & ITER_BVEC ) ) {
char __user * buf = i - > iov - > iov_base + i - > iov_offset ;
bytes = min ( bytes , i - > iov - > iov_len - i - > iov_offset ) ;
return fault_in_pages_readable ( buf , bytes ) ;
}
return 0 ;
2014-02-06 04:11:33 +04:00
}
EXPORT_SYMBOL ( iov_iter_fault_in_readable ) ;
2014-03-06 04:28:09 +04:00
void iov_iter_init ( struct iov_iter * i , int direction ,
const struct iovec * iov , unsigned long nr_segs ,
size_t count )
{
/* It will get better. Eventually... */
if ( segment_eq ( get_fs ( ) , KERNEL_DS ) )
2014-04-05 07:12:29 +04:00
direction | = ITER_KVEC ;
2014-03-06 04:28:09 +04:00
i - > type = direction ;
i - > iov = iov ;
i - > nr_segs = nr_segs ;
i - > iov_offset = 0 ;
i - > count = count ;
}
EXPORT_SYMBOL ( iov_iter_init ) ;
2014-03-15 12:05:57 +04:00
2014-04-05 07:12:29 +04:00
static ssize_t get_pages_alloc_iovec ( struct iov_iter * i ,
2014-03-21 12:58:33 +04:00
struct page * * * pages , size_t maxsize ,
size_t * start )
{
size_t offset = i - > iov_offset ;
const struct iovec * iov = i - > iov ;
size_t len ;
unsigned long addr ;
void * p ;
int n ;
int res ;
len = iov - > iov_len - offset ;
if ( len > i - > count )
len = i - > count ;
if ( len > maxsize )
len = maxsize ;
addr = ( unsigned long ) iov - > iov_base + offset ;
len + = * start = addr & ( PAGE_SIZE - 1 ) ;
addr & = ~ ( PAGE_SIZE - 1 ) ;
n = ( len + PAGE_SIZE - 1 ) / PAGE_SIZE ;
p = kmalloc ( n * sizeof ( struct page * ) , GFP_KERNEL ) ;
if ( ! p )
p = vmalloc ( n * sizeof ( struct page * ) ) ;
if ( ! p )
return - ENOMEM ;
res = get_user_pages_fast ( addr , n , ( i - > type & WRITE ) ! = WRITE , p ) ;
if ( unlikely ( res < 0 ) ) {
kvfree ( p ) ;
return res ;
}
* pages = p ;
return ( res = = n ? len : res * PAGE_SIZE ) - * start ;
}
2014-04-05 07:12:29 +04:00
static void memcpy_from_page ( char * to , struct page * page , size_t offset , size_t len )
{
char * from = kmap_atomic ( page ) ;
memcpy ( to , from + offset , len ) ;
kunmap_atomic ( from ) ;
}
static void memcpy_to_page ( struct page * page , size_t offset , char * from , size_t len )
{
char * to = kmap_atomic ( page ) ;
memcpy ( to + offset , from , len ) ;
kunmap_atomic ( to ) ;
}
2014-08-01 17:27:22 +04:00
static void memzero_page ( struct page * page , size_t offset , size_t len )
{
char * addr = kmap_atomic ( page ) ;
memset ( addr + offset , 0 , len ) ;
kunmap_atomic ( addr ) ;
}
static size_t copy_to_iter_bvec ( void * from , size_t bytes , struct iov_iter * i )
2014-04-05 07:12:29 +04:00
{
size_t skip , copy , wanted ;
const struct bio_vec * bvec ;
if ( unlikely ( bytes > i - > count ) )
bytes = i - > count ;
if ( unlikely ( ! bytes ) )
return 0 ;
wanted = bytes ;
bvec = i - > bvec ;
skip = i - > iov_offset ;
copy = min_t ( size_t , bytes , bvec - > bv_len - skip ) ;
memcpy_to_page ( bvec - > bv_page , skip + bvec - > bv_offset , from , copy ) ;
skip + = copy ;
from + = copy ;
bytes - = copy ;
while ( bytes ) {
bvec + + ;
copy = min ( bytes , ( size_t ) bvec - > bv_len ) ;
memcpy_to_page ( bvec - > bv_page , bvec - > bv_offset , from , copy ) ;
skip = copy ;
from + = copy ;
bytes - = copy ;
}
if ( skip = = bvec - > bv_len ) {
bvec + + ;
skip = 0 ;
}
i - > count - = wanted - bytes ;
i - > nr_segs - = bvec - i - > bvec ;
i - > bvec = bvec ;
i - > iov_offset = skip ;
return wanted - bytes ;
}
2014-08-01 17:27:22 +04:00
static size_t copy_from_iter_bvec ( void * to , size_t bytes , struct iov_iter * i )
2014-04-05 07:12:29 +04:00
{
size_t skip , copy , wanted ;
const struct bio_vec * bvec ;
if ( unlikely ( bytes > i - > count ) )
bytes = i - > count ;
if ( unlikely ( ! bytes ) )
return 0 ;
wanted = bytes ;
bvec = i - > bvec ;
skip = i - > iov_offset ;
copy = min ( bytes , bvec - > bv_len - skip ) ;
memcpy_from_page ( to , bvec - > bv_page , bvec - > bv_offset + skip , copy ) ;
to + = copy ;
skip + = copy ;
bytes - = copy ;
while ( bytes ) {
bvec + + ;
copy = min ( bytes , ( size_t ) bvec - > bv_len ) ;
memcpy_from_page ( to , bvec - > bv_page , bvec - > bv_offset , copy ) ;
skip = copy ;
to + = copy ;
bytes - = copy ;
}
if ( skip = = bvec - > bv_len ) {
bvec + + ;
skip = 0 ;
}
i - > count - = wanted ;
i - > nr_segs - = bvec - i - > bvec ;
i - > bvec = bvec ;
i - > iov_offset = skip ;
return wanted ;
}
2014-08-01 17:27:22 +04:00
static size_t copy_page_to_iter_bvec ( struct page * page , size_t offset ,
size_t bytes , struct iov_iter * i )
{
void * kaddr = kmap_atomic ( page ) ;
size_t wanted = copy_to_iter_bvec ( kaddr + offset , bytes , i ) ;
kunmap_atomic ( kaddr ) ;
return wanted ;
}
static size_t copy_page_from_iter_bvec ( struct page * page , size_t offset ,
size_t bytes , struct iov_iter * i )
{
void * kaddr = kmap_atomic ( page ) ;
size_t wanted = copy_from_iter_bvec ( kaddr + offset , bytes , i ) ;
kunmap_atomic ( kaddr ) ;
return wanted ;
}
static size_t zero_bvec ( size_t bytes , struct iov_iter * i )
{
size_t skip , copy , wanted ;
const struct bio_vec * bvec ;
if ( unlikely ( bytes > i - > count ) )
bytes = i - > count ;
if ( unlikely ( ! bytes ) )
return 0 ;
wanted = bytes ;
bvec = i - > bvec ;
skip = i - > iov_offset ;
copy = min_t ( size_t , bytes , bvec - > bv_len - skip ) ;
memzero_page ( bvec - > bv_page , skip + bvec - > bv_offset , copy ) ;
skip + = copy ;
bytes - = copy ;
while ( bytes ) {
bvec + + ;
copy = min ( bytes , ( size_t ) bvec - > bv_len ) ;
memzero_page ( bvec - > bv_page , bvec - > bv_offset , copy ) ;
skip = copy ;
bytes - = copy ;
}
if ( skip = = bvec - > bv_len ) {
bvec + + ;
skip = 0 ;
}
i - > count - = wanted - bytes ;
i - > nr_segs - = bvec - i - > bvec ;
i - > bvec = bvec ;
i - > iov_offset = skip ;
return wanted - bytes ;
}
2014-04-05 07:12:29 +04:00
static ssize_t get_pages_alloc_bvec ( struct iov_iter * i ,
struct page * * * pages , size_t maxsize ,
size_t * start )
{
const struct bio_vec * bvec = i - > bvec ;
size_t len = bvec - > bv_len - i - > iov_offset ;
if ( len > i - > count )
len = i - > count ;
if ( len > maxsize )
len = maxsize ;
* start = bvec - > bv_offset + i - > iov_offset ;
* pages = kmalloc ( sizeof ( struct page * ) , GFP_KERNEL ) ;
if ( ! * pages )
return - ENOMEM ;
get_page ( * * pages = bvec - > bv_page ) ;
return len ;
}
size_t copy_page_to_iter ( struct page * page , size_t offset , size_t bytes ,
struct iov_iter * i )
{
if ( i - > type & ITER_BVEC )
return copy_page_to_iter_bvec ( page , offset , bytes , i ) ;
else
return copy_page_to_iter_iovec ( page , offset , bytes , i ) ;
}
EXPORT_SYMBOL ( copy_page_to_iter ) ;
size_t copy_page_from_iter ( struct page * page , size_t offset , size_t bytes ,
struct iov_iter * i )
{
if ( i - > type & ITER_BVEC )
return copy_page_from_iter_bvec ( page , offset , bytes , i ) ;
else
return copy_page_from_iter_iovec ( page , offset , bytes , i ) ;
}
EXPORT_SYMBOL ( copy_page_from_iter ) ;
2014-08-01 17:27:22 +04:00
size_t copy_to_iter ( void * addr , size_t bytes , struct iov_iter * i )
{
if ( i - > type & ITER_BVEC )
return copy_to_iter_bvec ( addr , bytes , i ) ;
else
return copy_to_iter_iovec ( addr , bytes , i ) ;
}
EXPORT_SYMBOL ( copy_to_iter ) ;
size_t copy_from_iter ( void * addr , size_t bytes , struct iov_iter * i )
{
if ( i - > type & ITER_BVEC )
return copy_from_iter_bvec ( addr , bytes , i ) ;
else
return copy_from_iter_iovec ( addr , bytes , i ) ;
}
EXPORT_SYMBOL ( copy_from_iter ) ;
size_t iov_iter_zero ( size_t bytes , struct iov_iter * i )
{
if ( i - > type & ITER_BVEC ) {
return zero_bvec ( bytes , i ) ;
} else {
return zero_iovec ( bytes , i ) ;
}
}
EXPORT_SYMBOL ( iov_iter_zero ) ;
2014-04-05 07:12:29 +04:00
size_t iov_iter_copy_from_user_atomic ( struct page * page ,
struct iov_iter * i , unsigned long offset , size_t bytes )
{
iov_iter.c: macros for iterating over iov_iter
iterate_all_kinds(iter, size, ident, step_iovec, step_bvec)
iterates through the ranges covered by iter (up to size bytes total),
repeating step_iovec or step_bvec for each of those. ident is
declared in expansion of that thing, either as struct iovec or
struct bvec, and it contains the range we are currently looking
at. step_bvec should be a void expression, step_iovec - a size_t
one, with non-zero meaning "stop here, that many bytes from this
range left". In the end, the amount actually handled is stored
in size.
iov_iter_copy_from_user_atomic() and iov_iter_alignment() converted
to it.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2014-11-27 21:51:41 +03:00
char * kaddr = kmap_atomic ( page ) , * p = kaddr + offset ;
iterate_all_kinds ( i , bytes , v ,
__copy_from_user_inatomic ( ( p + = v . iov_len ) - v . iov_len ,
v . iov_base , v . iov_len ) ,
memcpy_from_page ( ( p + = v . bv_len ) - v . bv_len , v . bv_page ,
v . bv_offset , v . bv_len )
)
kunmap_atomic ( kaddr ) ;
return bytes ;
2014-04-05 07:12:29 +04:00
}
EXPORT_SYMBOL ( iov_iter_copy_from_user_atomic ) ;
void iov_iter_advance ( struct iov_iter * i , size_t size )
{
2014-11-27 21:59:45 +03:00
iterate_and_advance ( i , size , v , 0 , 0 )
2014-04-05 07:12:29 +04:00
}
EXPORT_SYMBOL ( iov_iter_advance ) ;
/*
* Return the count of just the current iov_iter segment .
*/
size_t iov_iter_single_seg_count ( const struct iov_iter * i )
{
if ( i - > nr_segs = = 1 )
return i - > count ;
else if ( i - > type & ITER_BVEC )
return min ( i - > count , i - > bvec - > bv_len - i - > iov_offset ) ;
2014-11-13 12:15:23 +03:00
else
return min ( i - > count , i - > iov - > iov_len - i - > iov_offset ) ;
2014-04-05 07:12:29 +04:00
}
EXPORT_SYMBOL ( iov_iter_single_seg_count ) ;
unsigned long iov_iter_alignment ( const struct iov_iter * i )
{
iov_iter.c: macros for iterating over iov_iter
iterate_all_kinds(iter, size, ident, step_iovec, step_bvec)
iterates through the ranges covered by iter (up to size bytes total),
repeating step_iovec or step_bvec for each of those. ident is
declared in expansion of that thing, either as struct iovec or
struct bvec, and it contains the range we are currently looking
at. step_bvec should be a void expression, step_iovec - a size_t
one, with non-zero meaning "stop here, that many bytes from this
range left". In the end, the amount actually handled is stored
in size.
iov_iter_copy_from_user_atomic() and iov_iter_alignment() converted
to it.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2014-11-27 21:51:41 +03:00
unsigned long res = 0 ;
size_t size = i - > count ;
if ( ! size )
return 0 ;
iterate_all_kinds ( i , size , v ,
( res | = ( unsigned long ) v . iov_base | v . iov_len , 0 ) ,
res | = v . bv_offset | v . bv_len
)
return res ;
2014-04-05 07:12:29 +04:00
}
EXPORT_SYMBOL ( iov_iter_alignment ) ;
ssize_t iov_iter_get_pages ( struct iov_iter * i ,
2014-09-24 19:09:11 +04:00
struct page * * pages , size_t maxsize , unsigned maxpages ,
2014-04-05 07:12:29 +04:00
size_t * start )
{
2014-11-27 22:12:09 +03:00
if ( maxsize > i - > count )
maxsize = i - > count ;
if ( ! maxsize )
return 0 ;
iterate_all_kinds ( i , maxsize , v , ( {
unsigned long addr = ( unsigned long ) v . iov_base ;
size_t len = v . iov_len + ( * start = addr & ( PAGE_SIZE - 1 ) ) ;
int n ;
int res ;
if ( len > maxpages * PAGE_SIZE )
len = maxpages * PAGE_SIZE ;
addr & = ~ ( PAGE_SIZE - 1 ) ;
n = DIV_ROUND_UP ( len , PAGE_SIZE ) ;
res = get_user_pages_fast ( addr , n , ( i - > type & WRITE ) ! = WRITE , pages ) ;
if ( unlikely ( res < 0 ) )
return res ;
return ( res = = n ? len : res * PAGE_SIZE ) - * start ;
0 ; } ) , ( {
/* can't be more than PAGE_SIZE */
* start = v . bv_offset ;
get_page ( * pages = v . bv_page ) ;
return v . bv_len ;
} )
)
return 0 ;
2014-04-05 07:12:29 +04:00
}
EXPORT_SYMBOL ( iov_iter_get_pages ) ;
ssize_t iov_iter_get_pages_alloc ( struct iov_iter * i ,
struct page * * * pages , size_t maxsize ,
size_t * start )
{
if ( i - > type & ITER_BVEC )
return get_pages_alloc_bvec ( i , pages , maxsize , start ) ;
else
return get_pages_alloc_iovec ( i , pages , maxsize , start ) ;
}
EXPORT_SYMBOL ( iov_iter_get_pages_alloc ) ;
int iov_iter_npages ( const struct iov_iter * i , int maxpages )
{
2014-11-27 22:09:46 +03:00
size_t size = i - > count ;
int npages = 0 ;
if ( ! size )
return 0 ;
iterate_all_kinds ( i , size , v , ( {
unsigned long p = ( unsigned long ) v . iov_base ;
npages + = DIV_ROUND_UP ( p + v . iov_len , PAGE_SIZE )
- p / PAGE_SIZE ;
if ( npages > = maxpages )
return maxpages ;
0 ; } ) , ( {
npages + + ;
if ( npages > = maxpages )
return maxpages ;
} )
)
return npages ;
2014-04-05 07:12:29 +04:00
}
2014-03-19 09:16:16 +04:00
EXPORT_SYMBOL ( iov_iter_npages ) ;