2006-03-30 17:15:30 +04:00
/*
* " splice " : joining two ropes together by interweaving their strands .
*
* This is the " extended pipe " functionality , where a pipe is used as
* an arbitrary in - memory buffer . Think of a pipe as a small kernel
* buffer that you can use to transfer data from one end to the other .
*
* The traditional unix read / write is extended with a " splice() " operation
* that transfers data buffers to or from a pipe buffer .
*
* Named by Larry McVoy , original implementation from Linus , extended by
2006-04-11 15:56:34 +04:00
* Jens to support splicing to files , network , direct splicing , etc and
* fixing lots of bugs .
2006-03-30 17:15:30 +04:00
*
2006-04-11 15:56:34 +04:00
* Copyright ( C ) 2005 - 2006 Jens Axboe < axboe @ suse . de >
* Copyright ( C ) 2005 - 2006 Linus Torvalds < torvalds @ osdl . org >
* Copyright ( C ) 2006 Ingo Molnar < mingo @ elte . hu >
2006-03-30 17:15:30 +04:00
*
*/
# include <linux/fs.h>
# include <linux/file.h>
# include <linux/pagemap.h>
# include <linux/pipe_fs_i.h>
# include <linux/mm_inline.h>
2006-03-30 17:16:46 +04:00
# include <linux/swap.h>
2006-04-03 01:04:46 +04:00
# include <linux/writeback.h>
# include <linux/buffer_head.h>
2006-03-31 08:06:13 +04:00
# include <linux/module.h>
2006-04-03 01:04:46 +04:00
# include <linux/syscalls.h>
2006-04-26 12:59:21 +04:00
# include <linux/uio.h>
2006-03-30 17:15:30 +04:00
2006-04-26 12:59:21 +04:00
struct partial_page {
unsigned int offset ;
unsigned int len ;
} ;
/*
2006-04-26 16:39:29 +04:00
* Passed to splice_to_pipe
2006-04-26 12:59:21 +04:00
*/
struct splice_pipe_desc {
struct page * * pages ; /* page map */
struct partial_page * partial ; /* pages[] may not be contig */
int nr_pages ; /* number of pages in map */
unsigned int flags ; /* splice flags */
struct pipe_buf_operations * ops ; /* ops associated with output pipe */
} ;
2006-04-03 01:05:09 +04:00
/*
* Attempt to steal a page from a pipe buffer . This should perhaps go into
* a vm helper function , it ' s already simplified quite a bit by the
* addition of remove_mapping ( ) . If success is returned , the caller may
* attempt to reuse this page for another destination .
*/
2006-03-30 17:16:46 +04:00
static int page_cache_pipe_buf_steal ( struct pipe_inode_info * info ,
struct pipe_buffer * buf )
{
struct page * page = buf - > page ;
2006-04-03 01:04:46 +04:00
struct address_space * mapping = page_mapping ( page ) ;
2006-03-30 17:16:46 +04:00
2006-04-19 17:57:31 +04:00
lock_page ( page ) ;
2006-03-30 17:16:46 +04:00
WARN_ON ( ! PageUptodate ( page ) ) ;
2006-04-03 01:10:32 +04:00
/*
* At least for ext2 with nobh option , we need to wait on writeback
* completing on this page , since we ' ll remove it from the pagecache .
* Otherwise truncate wont wait on the page , allowing the disk
* blocks to be reused by someone else before we actually wrote our
* data to them . fs corruption ensues .
*/
wait_on_page_writeback ( page ) ;
2006-04-03 01:04:46 +04:00
if ( PagePrivate ( page ) )
try_to_release_page ( page , mapping_gfp_mask ( mapping ) ) ;
2006-04-19 17:57:31 +04:00
if ( ! remove_mapping ( mapping , page ) ) {
unlock_page ( page ) ;
2006-03-30 17:16:46 +04:00
return 1 ;
2006-04-19 17:57:31 +04:00
}
2006-03-30 17:16:46 +04:00
2006-04-03 01:11:04 +04:00
buf - > flags | = PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU ;
2006-03-30 17:16:46 +04:00
return 0 ;
}
2006-03-30 17:15:30 +04:00
static void page_cache_pipe_buf_release ( struct pipe_inode_info * info ,
struct pipe_buffer * buf )
{
page_cache_release ( buf - > page ) ;
buf - > page = NULL ;
2006-04-03 01:11:04 +04:00
buf - > flags & = ~ ( PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU ) ;
2006-03-30 17:15:30 +04:00
}
static void * page_cache_pipe_buf_map ( struct file * file ,
struct pipe_inode_info * info ,
struct pipe_buffer * buf )
{
struct page * page = buf - > page ;
2006-04-10 11:04:41 +04:00
int err ;
2006-03-30 17:15:30 +04:00
if ( ! PageUptodate ( page ) ) {
2006-04-10 11:04:41 +04:00
lock_page ( page ) ;
/*
* Page got truncated / unhashed . This will cause a 0 - byte
2006-04-11 15:57:21 +04:00
* splice , if this is the first page .
2006-04-10 11:04:41 +04:00
*/
if ( ! page - > mapping ) {
err = - ENODATA ;
goto error ;
}
2006-03-30 17:15:30 +04:00
2006-04-10 11:04:41 +04:00
/*
2006-04-11 15:57:21 +04:00
* Uh oh , read - error from disk .
2006-04-10 11:04:41 +04:00
*/
if ( ! PageUptodate ( page ) ) {
err = - EIO ;
goto error ;
}
/*
2006-04-11 15:57:21 +04:00
* Page is ok afterall , fall through to mapping .
2006-04-10 11:04:41 +04:00
*/
2006-03-30 17:15:30 +04:00
unlock_page ( page ) ;
}
2006-04-10 11:04:41 +04:00
return kmap ( page ) ;
error :
unlock_page ( page ) ;
return ERR_PTR ( err ) ;
2006-03-30 17:15:30 +04:00
}
static void page_cache_pipe_buf_unmap ( struct pipe_inode_info * info ,
struct pipe_buffer * buf )
{
kunmap ( buf - > page ) ;
}
2006-04-26 12:59:21 +04:00
static void * user_page_pipe_buf_map ( struct file * file ,
struct pipe_inode_info * pipe ,
struct pipe_buffer * buf )
{
return kmap ( buf - > page ) ;
}
static void user_page_pipe_buf_unmap ( struct pipe_inode_info * pipe ,
struct pipe_buffer * buf )
{
kunmap ( buf - > page ) ;
}
2006-04-11 17:51:17 +04:00
static void page_cache_pipe_buf_get ( struct pipe_inode_info * info ,
struct pipe_buffer * buf )
{
page_cache_get ( buf - > page ) ;
}
2006-03-30 17:15:30 +04:00
static struct pipe_buf_operations page_cache_pipe_buf_ops = {
. can_merge = 0 ,
. map = page_cache_pipe_buf_map ,
. unmap = page_cache_pipe_buf_unmap ,
. release = page_cache_pipe_buf_release ,
2006-03-30 17:16:46 +04:00
. steal = page_cache_pipe_buf_steal ,
2006-04-11 17:51:17 +04:00
. get = page_cache_pipe_buf_get ,
2006-03-30 17:15:30 +04:00
} ;
2006-04-26 12:59:21 +04:00
static int user_page_pipe_buf_steal ( struct pipe_inode_info * pipe ,
struct pipe_buffer * buf )
{
return 1 ;
}
static struct pipe_buf_operations user_page_pipe_buf_ops = {
. can_merge = 0 ,
. map = user_page_pipe_buf_map ,
. unmap = user_page_pipe_buf_unmap ,
. release = page_cache_pipe_buf_release ,
. steal = user_page_pipe_buf_steal ,
. get = page_cache_pipe_buf_get ,
} ;
2006-04-03 01:05:09 +04:00
/*
* Pipe output worker . This sets up our pipe format with the page cache
* pipe buffer operations . Otherwise very similar to the regular pipe_writev ( ) .
*/
2006-04-26 16:39:29 +04:00
static ssize_t splice_to_pipe ( struct pipe_inode_info * pipe ,
struct splice_pipe_desc * spd )
2006-03-30 17:15:30 +04:00
{
2006-04-26 12:59:21 +04:00
int ret , do_wakeup , page_nr ;
2006-03-30 17:15:30 +04:00
ret = 0 ;
do_wakeup = 0 ;
2006-04-26 12:59:21 +04:00
page_nr = 0 ;
2006-03-30 17:15:30 +04:00
2006-04-10 17:18:35 +04:00
if ( pipe - > inode )
mutex_lock ( & pipe - > inode - > i_mutex ) ;
2006-03-30 17:15:30 +04:00
for ( ; ; ) {
2006-04-10 17:18:35 +04:00
if ( ! pipe - > readers ) {
2006-03-30 17:15:30 +04:00
send_sig ( SIGPIPE , current , 0 ) ;
if ( ! ret )
ret = - EPIPE ;
break ;
}
2006-04-11 15:53:56 +04:00
if ( pipe - > nrbufs < PIPE_BUFFERS ) {
int newbuf = ( pipe - > curbuf + pipe - > nrbufs ) & ( PIPE_BUFFERS - 1 ) ;
2006-04-10 17:18:35 +04:00
struct pipe_buffer * buf = pipe - > bufs + newbuf ;
2006-03-30 17:15:30 +04:00
2006-04-26 12:59:21 +04:00
buf - > page = spd - > pages [ page_nr ] ;
buf - > offset = spd - > partial [ page_nr ] . offset ;
buf - > len = spd - > partial [ page_nr ] . len ;
buf - > ops = spd - > ops ;
2006-04-11 15:53:56 +04:00
pipe - > nrbufs + + ;
2006-04-26 12:59:21 +04:00
page_nr + + ;
ret + = buf - > len ;
2006-04-11 15:53:56 +04:00
if ( pipe - > inode )
do_wakeup = 1 ;
2006-03-30 17:15:30 +04:00
2006-04-26 12:59:21 +04:00
if ( ! - - spd - > nr_pages )
2006-03-30 17:15:30 +04:00
break ;
2006-04-11 15:53:56 +04:00
if ( pipe - > nrbufs < PIPE_BUFFERS )
2006-03-30 17:15:30 +04:00
continue ;
break ;
}
2006-04-26 12:59:21 +04:00
if ( spd - > flags & SPLICE_F_NONBLOCK ) {
2006-04-02 23:46:35 +04:00
if ( ! ret )
ret = - EAGAIN ;
break ;
}
2006-03-30 17:15:30 +04:00
if ( signal_pending ( current ) ) {
if ( ! ret )
ret = - ERESTARTSYS ;
break ;
}
if ( do_wakeup ) {
2006-04-10 11:03:32 +04:00
smp_mb ( ) ;
2006-04-10 17:18:35 +04:00
if ( waitqueue_active ( & pipe - > wait ) )
wake_up_interruptible_sync ( & pipe - > wait ) ;
kill_fasync ( & pipe - > fasync_readers , SIGIO , POLL_IN ) ;
2006-03-30 17:15:30 +04:00
do_wakeup = 0 ;
}
2006-04-10 17:18:35 +04:00
pipe - > waiting_writers + + ;
pipe_wait ( pipe ) ;
pipe - > waiting_writers - - ;
2006-03-30 17:15:30 +04:00
}
2006-04-10 17:18:35 +04:00
if ( pipe - > inode )
mutex_unlock ( & pipe - > inode - > i_mutex ) ;
2006-03-30 17:15:30 +04:00
if ( do_wakeup ) {
2006-04-10 11:03:32 +04:00
smp_mb ( ) ;
2006-04-10 17:18:35 +04:00
if ( waitqueue_active ( & pipe - > wait ) )
wake_up_interruptible ( & pipe - > wait ) ;
kill_fasync ( & pipe - > fasync_readers , SIGIO , POLL_IN ) ;
2006-03-30 17:15:30 +04:00
}
2006-04-26 12:59:21 +04:00
while ( page_nr < spd - > nr_pages )
page_cache_release ( spd - > pages [ page_nr + + ] ) ;
2006-03-30 17:15:30 +04:00
return ret ;
}
2006-04-10 17:18:35 +04:00
static int
2006-04-11 16:57:50 +04:00
__generic_file_splice_read ( struct file * in , loff_t * ppos ,
struct pipe_inode_info * pipe , size_t len ,
unsigned int flags )
2006-03-30 17:15:30 +04:00
{
struct address_space * mapping = in - > f_mapping ;
2006-04-26 12:59:21 +04:00
unsigned int loff , nr_pages ;
2006-04-10 11:03:58 +04:00
struct page * pages [ PIPE_BUFFERS ] ;
2006-04-26 12:59:21 +04:00
struct partial_page partial [ PIPE_BUFFERS ] ;
2006-03-30 17:15:30 +04:00
struct page * page ;
2006-04-19 17:55:10 +04:00
pgoff_t index , end_index ;
loff_t isize ;
2006-04-26 12:59:21 +04:00
size_t total_len ;
int error ;
struct splice_pipe_desc spd = {
. pages = pages ,
. partial = partial ,
. flags = flags ,
. ops = & page_cache_pipe_buf_ops ,
} ;
2006-03-30 17:15:30 +04:00
2006-04-11 16:57:50 +04:00
index = * ppos > > PAGE_CACHE_SHIFT ;
2006-04-26 12:59:21 +04:00
loff = * ppos & ~ PAGE_CACHE_MASK ;
nr_pages = ( len + loff + PAGE_CACHE_SIZE - 1 ) > > PAGE_CACHE_SHIFT ;
2006-03-30 17:15:30 +04:00
if ( nr_pages > PIPE_BUFFERS )
nr_pages = PIPE_BUFFERS ;
/*
2006-04-11 15:57:21 +04:00
* Initiate read - ahead on this page range . however , don ' t call into
2006-04-10 11:05:04 +04:00
* read - ahead if this is a non - zero offset ( we are likely doing small
* chunk splice and the page is already there ) for a single page .
2006-03-30 17:15:30 +04:00
*/
2006-04-26 12:59:21 +04:00
if ( ! loff | | spd . nr_pages > 1 )
do_page_cache_readahead ( mapping , in , index , spd . nr_pages ) ;
2006-03-30 17:15:30 +04:00
/*
2006-04-11 15:57:21 +04:00
* Now fill in the holes :
2006-03-30 17:15:30 +04:00
*/
2006-04-11 15:52:47 +04:00
error = 0 ;
2006-04-26 12:59:21 +04:00
total_len = 0 ;
for ( spd . nr_pages = 0 ; spd . nr_pages < nr_pages ; spd . nr_pages + + , index + + ) {
2006-04-20 15:05:48 +04:00
unsigned int this_len ;
if ( ! len )
break ;
/*
* this_len is the max we ' ll use from this page
*/
2006-04-25 17:33:34 +04:00
this_len = min_t ( unsigned long , len , PAGE_CACHE_SIZE - loff ) ;
2006-04-11 15:52:47 +04:00
find_page :
2006-03-30 17:15:30 +04:00
/*
2006-04-11 15:52:47 +04:00
* lookup the page for this index
2006-03-30 17:15:30 +04:00
*/
2006-04-11 15:52:47 +04:00
page = find_get_page ( mapping , index ) ;
if ( ! page ) {
/*
* page didn ' t exist , allocate one
*/
page = page_cache_alloc_cold ( mapping ) ;
if ( ! page )
break ;
error = add_to_page_cache_lru ( page , mapping , index ,
mapping_gfp_mask ( mapping ) ) ;
if ( unlikely ( error ) ) {
page_cache_release ( page ) ;
break ;
}
goto readpage ;
}
/*
* If the page isn ' t uptodate , we may need to start io on it
*/
if ( ! PageUptodate ( page ) ) {
2006-04-19 17:56:12 +04:00
/*
* If in nonblock mode then dont block on waiting
* for an in - flight io page
*/
if ( flags & SPLICE_F_NONBLOCK )
break ;
2006-04-11 15:52:47 +04:00
lock_page ( page ) ;
/*
* page was truncated , stop here . if this isn ' t the
* first page , we ' ll just complete what we already
* added
*/
if ( ! page - > mapping ) {
unlock_page ( page ) ;
page_cache_release ( page ) ;
break ;
}
/*
* page was already under io and is now done , great
*/
if ( PageUptodate ( page ) ) {
unlock_page ( page ) ;
goto fill_it ;
}
2006-03-30 17:15:30 +04:00
2006-04-11 15:52:47 +04:00
readpage :
/*
* need to read in the page
*/
error = mapping - > a_ops - > readpage ( in , page ) ;
2006-03-30 17:15:30 +04:00
if ( unlikely ( error ) ) {
page_cache_release ( page ) ;
2006-04-11 15:52:47 +04:00
if ( error = = AOP_TRUNCATED_PAGE )
goto find_page ;
2006-03-30 17:15:30 +04:00
break ;
}
2006-04-19 17:55:10 +04:00
/*
* i_size must be checked after - > readpage ( ) .
*/
isize = i_size_read ( mapping - > host ) ;
end_index = ( isize - 1 ) > > PAGE_CACHE_SHIFT ;
if ( unlikely ( ! isize | | index > end_index ) ) {
page_cache_release ( page ) ;
break ;
}
/*
* if this is the last page , see if we need to shrink
* the length and stop
*/
if ( end_index = = index ) {
loff = PAGE_CACHE_SIZE - ( isize & ~ PAGE_CACHE_MASK ) ;
2006-04-26 12:59:21 +04:00
if ( total_len + loff > isize ) {
2006-04-19 17:55:10 +04:00
page_cache_release ( page ) ;
break ;
}
/*
* force quit after adding this page
*/
2006-04-26 12:59:21 +04:00
nr_pages = spd . nr_pages ;
2006-04-20 15:05:48 +04:00
this_len = min ( this_len , loff ) ;
2006-04-26 12:59:21 +04:00
loff = 0 ;
2006-04-19 17:55:10 +04:00
}
2006-03-30 17:15:30 +04:00
}
2006-04-11 15:52:47 +04:00
fill_it :
2006-04-26 12:59:21 +04:00
pages [ spd . nr_pages ] = page ;
partial [ spd . nr_pages ] . offset = loff ;
partial [ spd . nr_pages ] . len = this_len ;
2006-04-20 15:05:48 +04:00
len - = this_len ;
2006-04-26 12:59:21 +04:00
total_len + = this_len ;
2006-04-19 17:55:10 +04:00
loff = 0 ;
2006-03-30 17:15:30 +04:00
}
2006-04-26 12:59:21 +04:00
if ( spd . nr_pages )
2006-04-26 16:39:29 +04:00
return splice_to_pipe ( pipe , & spd ) ;
2006-03-30 17:15:30 +04:00
2006-04-11 15:52:47 +04:00
return error ;
2006-03-30 17:15:30 +04:00
}
2006-04-03 01:05:09 +04:00
/**
* generic_file_splice_read - splice data from file to a pipe
* @ in : file to splice from
* @ pipe : pipe to splice to
* @ len : number of bytes to splice
* @ flags : splice modifier flags
*
* Will read pages from given file and fill them into a pipe .
*/
2006-04-11 16:57:50 +04:00
ssize_t generic_file_splice_read ( struct file * in , loff_t * ppos ,
struct pipe_inode_info * pipe , size_t len ,
unsigned int flags )
2006-03-30 17:15:30 +04:00
{
ssize_t spliced ;
int ret ;
ret = 0 ;
spliced = 0 ;
2006-04-10 17:18:35 +04:00
2006-03-30 17:15:30 +04:00
while ( len ) {
2006-04-11 16:57:50 +04:00
ret = __generic_file_splice_read ( in , ppos , pipe , len , flags ) ;
2006-03-30 17:15:30 +04:00
2006-04-19 17:56:12 +04:00
if ( ret < 0 )
2006-03-30 17:15:30 +04:00
break ;
2006-04-19 17:56:12 +04:00
else if ( ! ret ) {
if ( spliced )
break ;
if ( flags & SPLICE_F_NONBLOCK ) {
ret = - EAGAIN ;
break ;
}
}
2006-03-30 17:15:30 +04:00
2006-04-11 16:57:50 +04:00
* ppos + = ret ;
2006-03-30 17:15:30 +04:00
len - = ret ;
spliced + = ret ;
}
if ( spliced )
return spliced ;
return ret ;
}
2006-04-03 01:06:05 +04:00
EXPORT_SYMBOL ( generic_file_splice_read ) ;
2006-03-30 17:15:30 +04:00
/*
2006-04-03 01:04:46 +04:00
* Send ' sd - > len ' bytes to socket from ' sd - > file ' at position ' sd - > pos '
2006-04-25 17:42:00 +04:00
* using sendpage ( ) . Return the number of bytes sent .
2006-03-30 17:15:30 +04:00
*/
static int pipe_to_sendpage ( struct pipe_inode_info * info ,
struct pipe_buffer * buf , struct splice_desc * sd )
{
struct file * file = sd - > file ;
loff_t pos = sd - > pos ;
ssize_t ret ;
void * ptr ;
2006-04-03 01:05:41 +04:00
int more ;
2006-03-30 17:15:30 +04:00
/*
2006-04-11 15:57:21 +04:00
* Sub - optimal , but we are limited by the pipe - > map . We don ' t
2006-03-30 17:15:30 +04:00
* need a kmap ' ed buffer here , we just want to make sure we
* have the page pinned if the pipe page originates from the
2006-04-11 15:57:21 +04:00
* page cache .
2006-03-30 17:15:30 +04:00
*/
ptr = buf - > ops - > map ( file , info , buf ) ;
if ( IS_ERR ( ptr ) )
return PTR_ERR ( ptr ) ;
2006-04-03 01:05:41 +04:00
more = ( sd - > flags & SPLICE_F_MORE ) | | sd - > len < sd - > total_len ;
2006-03-30 17:15:30 +04:00
2006-04-25 17:42:00 +04:00
ret = file - > f_op - > sendpage ( file , buf - > page , buf - > offset , sd - > len ,
& pos , more ) ;
2006-03-30 17:15:30 +04:00
buf - > ops - > unmap ( info , buf ) ;
2006-04-25 17:42:00 +04:00
return ret ;
2006-03-30 17:15:30 +04:00
}
/*
* This is a little more tricky than the file - > pipe splicing . There are
* basically three cases :
*
* - Destination page already exists in the address space and there
* are users of it . For that case we have no other option that
* copying the data . Tough luck .
* - Destination page already exists in the address space , but there
* are no users of it . Make sure it ' s uptodate , then drop it . Fall
* through to last case .
* - Destination page does not exist , we can add the pipe page to
* the page cache and avoid the copy .
*
2006-04-03 01:05:09 +04:00
* If asked to move pages to the output file ( SPLICE_F_MOVE is set in
* sd - > flags ) , we attempt to migrate pages from the pipe to the output
* file address space page cache . This is possible if no one else has
* the pipe page referenced outside of the pipe and page cache . If
* SPLICE_F_MOVE isn ' t set , or we cannot move the page , we simply create
* a new page in the output file page cache and fill / dirty that .
2006-03-30 17:15:30 +04:00
*/
static int pipe_to_file ( struct pipe_inode_info * info , struct pipe_buffer * buf ,
struct splice_desc * sd )
{
struct file * file = sd - > file ;
struct address_space * mapping = file - > f_mapping ;
2006-04-03 01:11:04 +04:00
gfp_t gfp_mask = mapping_gfp_mask ( mapping ) ;
2006-04-25 17:42:00 +04:00
unsigned int offset , this_len ;
2006-03-30 17:15:30 +04:00
struct page * page ;
pgoff_t index ;
2006-03-30 17:16:46 +04:00
char * src ;
2006-04-03 01:11:04 +04:00
int ret ;
2006-03-30 17:15:30 +04:00
/*
2006-04-10 11:04:41 +04:00
* make sure the data in this buffer is uptodate
2006-03-30 17:15:30 +04:00
*/
src = buf - > ops - > map ( file , info , buf ) ;
if ( IS_ERR ( src ) )
return PTR_ERR ( src ) ;
index = sd - > pos > > PAGE_CACHE_SHIFT ;
offset = sd - > pos & ~ PAGE_CACHE_MASK ;
2006-04-25 17:42:00 +04:00
this_len = sd - > len ;
if ( this_len + offset > PAGE_CACHE_SIZE )
this_len = PAGE_CACHE_SIZE - offset ;
2006-03-30 17:15:30 +04:00
/*
2006-04-11 15:57:21 +04:00
* Reuse buf page , if SPLICE_F_MOVE is set .
2006-03-30 17:15:30 +04:00
*/
2006-03-30 17:16:46 +04:00
if ( sd - > flags & SPLICE_F_MOVE ) {
2006-04-03 01:05:09 +04:00
/*
* If steal succeeds , buf - > page is now pruned from the vm
2006-04-19 17:57:31 +04:00
* side ( LRU and page cache ) and we can reuse it . The page
* will also be looked on successful return .
2006-04-03 01:05:09 +04:00
*/
2006-03-30 17:16:46 +04:00
if ( buf - > ops - > steal ( info , buf ) )
goto find_page ;
page = buf - > page ;
2006-04-03 01:11:04 +04:00
if ( add_to_page_cache ( page , mapping , index , gfp_mask ) )
2006-03-30 17:16:46 +04:00
goto find_page ;
2006-04-03 01:11:04 +04:00
if ( ! ( buf - > flags & PIPE_BUF_FLAG_LRU ) )
lru_cache_add ( page ) ;
2006-03-30 17:16:46 +04:00
} else {
find_page :
2006-04-19 17:57:31 +04:00
page = find_lock_page ( mapping , index ) ;
if ( ! page ) {
ret = - ENOMEM ;
page = page_cache_alloc_cold ( mapping ) ;
if ( unlikely ( ! page ) )
goto out_nomem ;
/*
* This will also lock the page
*/
ret = add_to_page_cache_lru ( page , mapping , index ,
gfp_mask ) ;
if ( unlikely ( ret ) )
goto out ;
}
2006-03-30 17:16:46 +04:00
/*
2006-04-19 17:57:31 +04:00
* We get here with the page locked . If the page is also
* uptodate , we don ' t need to do more . If it isn ' t , we
* may need to bring it in if we are not going to overwrite
* the full page .
2006-03-30 17:16:46 +04:00
*/
if ( ! PageUptodate ( page ) ) {
2006-04-25 17:42:00 +04:00
if ( this_len < PAGE_CACHE_SIZE ) {
2006-03-30 17:16:46 +04:00
ret = mapping - > a_ops - > readpage ( file , page ) ;
if ( unlikely ( ret ) )
goto out ;
lock_page ( page ) ;
if ( ! PageUptodate ( page ) ) {
/*
2006-04-11 15:57:21 +04:00
* Page got invalidated , repeat .
2006-03-30 17:16:46 +04:00
*/
if ( ! page - > mapping ) {
unlock_page ( page ) ;
page_cache_release ( page ) ;
goto find_page ;
}
ret = - EIO ;
goto out ;
2006-03-30 17:15:30 +04:00
}
2006-04-19 17:57:31 +04:00
} else
2006-03-30 17:16:46 +04:00
SetPageUptodate ( page ) ;
2006-03-30 17:15:30 +04:00
}
}
2006-04-25 17:42:00 +04:00
ret = mapping - > a_ops - > prepare_write ( file , page , offset , offset + this_len ) ;
2006-04-03 01:04:46 +04:00
if ( ret = = AOP_TRUNCATED_PAGE ) {
page_cache_release ( page ) ;
goto find_page ;
} else if ( ret )
2006-03-30 17:15:30 +04:00
goto out ;
2006-04-03 01:11:04 +04:00
if ( ! ( buf - > flags & PIPE_BUF_FLAG_STOLEN ) ) {
2006-03-30 17:16:46 +04:00
char * dst = kmap_atomic ( page , KM_USER0 ) ;
2006-04-25 17:42:00 +04:00
memcpy ( dst + offset , src + buf - > offset , this_len ) ;
2006-03-30 17:16:46 +04:00
flush_dcache_page ( page ) ;
kunmap_atomic ( dst , KM_USER0 ) ;
}
2006-03-30 17:15:30 +04:00
2006-04-25 17:42:00 +04:00
ret = mapping - > a_ops - > commit_write ( file , page , offset , offset + this_len ) ;
2006-04-03 01:04:46 +04:00
if ( ret = = AOP_TRUNCATED_PAGE ) {
page_cache_release ( page ) ;
goto find_page ;
} else if ( ret )
2006-03-30 17:15:30 +04:00
goto out ;
2006-04-25 17:42:00 +04:00
/*
* Return the number of bytes written .
*/
ret = this_len ;
2006-04-10 11:01:01 +04:00
mark_page_accessed ( page ) ;
2006-04-03 01:04:46 +04:00
balance_dirty_pages_ratelimited ( mapping ) ;
2006-03-30 17:15:30 +04:00
out :
2006-04-19 17:57:31 +04:00
if ( ! ( buf - > flags & PIPE_BUF_FLAG_STOLEN ) )
2006-03-30 17:16:46 +04:00
page_cache_release ( page ) ;
2006-04-19 17:57:31 +04:00
unlock_page ( page ) ;
2006-04-10 11:02:40 +04:00
out_nomem :
2006-03-30 17:15:30 +04:00
buf - > ops - > unmap ( info , buf ) ;
return ret ;
}
2006-04-03 01:05:09 +04:00
/*
* Pipe input worker . Most of this logic works like a regular pipe , the
* key here is the ' actor ' worker passed in that actually moves the data
* to the wanted destination . See pipe_to_file / pipe_to_sendpage above .
*/
2006-04-26 16:39:29 +04:00
ssize_t splice_from_pipe ( struct pipe_inode_info * pipe , struct file * out ,
loff_t * ppos , size_t len , unsigned int flags ,
splice_actor * actor )
2006-03-30 17:15:30 +04:00
{
int ret , do_wakeup , err ;
struct splice_desc sd ;
ret = 0 ;
do_wakeup = 0 ;
sd . total_len = len ;
sd . flags = flags ;
sd . file = out ;
2006-04-11 16:57:50 +04:00
sd . pos = * ppos ;
2006-03-30 17:15:30 +04:00
2006-04-10 17:18:35 +04:00
if ( pipe - > inode )
mutex_lock ( & pipe - > inode - > i_mutex ) ;
2006-03-30 17:15:30 +04:00
for ( ; ; ) {
2006-04-11 15:53:56 +04:00
if ( pipe - > nrbufs ) {
struct pipe_buffer * buf = pipe - > bufs + pipe - > curbuf ;
2006-03-30 17:15:30 +04:00
struct pipe_buf_operations * ops = buf - > ops ;
sd . len = buf - > len ;
if ( sd . len > sd . total_len )
sd . len = sd . total_len ;
2006-04-10 17:18:35 +04:00
err = actor ( pipe , buf , & sd ) ;
2006-04-25 17:42:00 +04:00
if ( err < = 0 ) {
2006-03-30 17:15:30 +04:00
if ( ! ret & & err ! = - ENODATA )
ret = err ;
break ;
}
2006-04-25 17:42:00 +04:00
ret + = err ;
buf - > offset + = err ;
buf - > len - = err ;
sd . len - = err ;
sd . pos + = err ;
sd . total_len - = err ;
if ( sd . len )
continue ;
2006-04-11 15:57:21 +04:00
2006-03-30 17:15:30 +04:00
if ( ! buf - > len ) {
buf - > ops = NULL ;
2006-04-10 17:18:35 +04:00
ops - > release ( pipe , buf ) ;
2006-04-11 15:53:56 +04:00
pipe - > curbuf = ( pipe - > curbuf + 1 ) & ( PIPE_BUFFERS - 1 ) ;
pipe - > nrbufs - - ;
if ( pipe - > inode )
do_wakeup = 1 ;
2006-03-30 17:15:30 +04:00
}
if ( ! sd . total_len )
break ;
}
2006-04-11 15:53:56 +04:00
if ( pipe - > nrbufs )
2006-03-30 17:15:30 +04:00
continue ;
2006-04-10 17:18:35 +04:00
if ( ! pipe - > writers )
2006-03-30 17:15:30 +04:00
break ;
2006-04-10 17:18:35 +04:00
if ( ! pipe - > waiting_writers ) {
2006-03-30 17:15:30 +04:00
if ( ret )
break ;
}
2006-04-02 23:46:35 +04:00
if ( flags & SPLICE_F_NONBLOCK ) {
if ( ! ret )
ret = - EAGAIN ;
break ;
}
2006-03-30 17:15:30 +04:00
if ( signal_pending ( current ) ) {
if ( ! ret )
ret = - ERESTARTSYS ;
break ;
}
if ( do_wakeup ) {
2006-04-10 11:03:32 +04:00
smp_mb ( ) ;
2006-04-10 17:18:35 +04:00
if ( waitqueue_active ( & pipe - > wait ) )
wake_up_interruptible_sync ( & pipe - > wait ) ;
kill_fasync ( & pipe - > fasync_writers , SIGIO , POLL_OUT ) ;
2006-03-30 17:15:30 +04:00
do_wakeup = 0 ;
}
2006-04-10 17:18:35 +04:00
pipe_wait ( pipe ) ;
2006-03-30 17:15:30 +04:00
}
2006-04-10 17:18:35 +04:00
if ( pipe - > inode )
mutex_unlock ( & pipe - > inode - > i_mutex ) ;
2006-03-30 17:15:30 +04:00
if ( do_wakeup ) {
2006-04-10 11:03:32 +04:00
smp_mb ( ) ;
2006-04-10 17:18:35 +04:00
if ( waitqueue_active ( & pipe - > wait ) )
wake_up_interruptible ( & pipe - > wait ) ;
kill_fasync ( & pipe - > fasync_writers , SIGIO , POLL_OUT ) ;
2006-03-30 17:15:30 +04:00
}
return ret ;
}
2006-04-03 01:05:09 +04:00
/**
* generic_file_splice_write - splice data from a pipe to a file
2006-04-10 17:18:35 +04:00
* @ pipe : pipe info
2006-04-03 01:05:09 +04:00
* @ out : file to write to
* @ len : number of bytes to splice
* @ flags : splice modifier flags
*
* Will either move or copy pages ( determined by @ flags options ) from
* the given pipe inode to the given file .
*
*/
2006-04-10 17:18:35 +04:00
ssize_t
generic_file_splice_write ( struct pipe_inode_info * pipe , struct file * out ,
2006-04-11 16:57:50 +04:00
loff_t * ppos , size_t len , unsigned int flags )
2006-03-30 17:15:30 +04:00
{
2006-04-03 01:04:46 +04:00
struct address_space * mapping = out - > f_mapping ;
2006-04-10 17:18:35 +04:00
ssize_t ret ;
2006-04-26 16:39:29 +04:00
ret = splice_from_pipe ( pipe , out , ppos , len , flags , pipe_to_file ) ;
2006-04-19 17:57:05 +04:00
if ( ret > 0 ) {
2006-04-03 01:04:46 +04:00
struct inode * inode = mapping - > host ;
2006-04-19 17:57:05 +04:00
* ppos + = ret ;
/*
* If file or inode is SYNC and we actually wrote some data ,
* sync it .
*/
if ( unlikely ( ( out - > f_flags & O_SYNC ) | | IS_SYNC ( inode ) ) ) {
int err ;
mutex_lock ( & inode - > i_mutex ) ;
err = generic_osync_inode ( inode , mapping ,
OSYNC_METADATA | OSYNC_DATA ) ;
mutex_unlock ( & inode - > i_mutex ) ;
2006-04-03 01:04:46 +04:00
2006-04-19 17:57:05 +04:00
if ( err )
ret = err ;
}
2006-04-03 01:04:46 +04:00
}
return ret ;
2006-03-30 17:15:30 +04:00
}
2006-04-03 01:06:05 +04:00
EXPORT_SYMBOL ( generic_file_splice_write ) ;
2006-04-03 01:05:09 +04:00
/**
* generic_splice_sendpage - splice data from a pipe to a socket
* @ inode : pipe inode
* @ out : socket to write to
* @ len : number of bytes to splice
* @ flags : splice modifier flags
*
* Will send @ len bytes from the pipe to a network socket . No data copying
* is involved .
*
*/
2006-04-10 17:18:35 +04:00
ssize_t generic_splice_sendpage ( struct pipe_inode_info * pipe , struct file * out ,
2006-04-11 16:57:50 +04:00
loff_t * ppos , size_t len , unsigned int flags )
2006-03-30 17:15:30 +04:00
{
2006-04-26 16:39:29 +04:00
return splice_from_pipe ( pipe , out , ppos , len , flags , pipe_to_sendpage ) ;
2006-03-30 17:15:30 +04:00
}
2006-04-03 01:06:05 +04:00
EXPORT_SYMBOL ( generic_splice_sendpage ) ;
2006-03-31 08:06:13 +04:00
2006-04-03 01:05:09 +04:00
/*
* Attempt to initiate a splice from pipe to file .
*/
2006-04-10 17:18:35 +04:00
static long do_splice_from ( struct pipe_inode_info * pipe , struct file * out ,
2006-04-11 16:57:50 +04:00
loff_t * ppos , size_t len , unsigned int flags )
2006-03-30 17:15:30 +04:00
{
int ret ;
2006-04-11 15:56:09 +04:00
if ( unlikely ( ! out - > f_op | | ! out - > f_op - > splice_write ) )
2006-03-30 17:15:30 +04:00
return - EINVAL ;
2006-04-11 15:56:09 +04:00
if ( unlikely ( ! ( out - > f_mode & FMODE_WRITE ) ) )
2006-03-30 17:15:30 +04:00
return - EBADF ;
2006-04-11 16:57:50 +04:00
ret = rw_verify_area ( WRITE , out , ppos , len ) ;
2006-03-30 17:15:30 +04:00
if ( unlikely ( ret < 0 ) )
return ret ;
2006-04-11 16:57:50 +04:00
return out - > f_op - > splice_write ( pipe , out , ppos , len , flags ) ;
2006-03-30 17:15:30 +04:00
}
2006-04-03 01:05:09 +04:00
/*
* Attempt to initiate a splice from a file to a pipe .
*/
2006-04-11 16:57:50 +04:00
static long do_splice_to ( struct file * in , loff_t * ppos ,
struct pipe_inode_info * pipe , size_t len ,
unsigned int flags )
2006-03-30 17:15:30 +04:00
{
2006-04-11 16:57:50 +04:00
loff_t isize , left ;
2006-03-30 17:15:30 +04:00
int ret ;
2006-04-11 15:56:09 +04:00
if ( unlikely ( ! in - > f_op | | ! in - > f_op - > splice_read ) )
2006-03-30 17:15:30 +04:00
return - EINVAL ;
2006-04-11 15:56:09 +04:00
if ( unlikely ( ! ( in - > f_mode & FMODE_READ ) ) )
2006-03-30 17:15:30 +04:00
return - EBADF ;
2006-04-11 16:57:50 +04:00
ret = rw_verify_area ( READ , in , ppos , len ) ;
2006-03-30 17:15:30 +04:00
if ( unlikely ( ret < 0 ) )
return ret ;
isize = i_size_read ( in - > f_mapping - > host ) ;
2006-04-11 16:57:50 +04:00
if ( unlikely ( * ppos > = isize ) )
2006-03-30 17:15:30 +04:00
return 0 ;
2006-04-11 16:57:50 +04:00
left = isize - * ppos ;
2006-04-11 15:56:09 +04:00
if ( unlikely ( left < len ) )
2006-03-30 17:15:30 +04:00
len = left ;
2006-04-11 16:57:50 +04:00
return in - > f_op - > splice_read ( in , ppos , pipe , len , flags ) ;
2006-03-30 17:15:30 +04:00
}
2006-04-11 16:57:50 +04:00
long do_splice_direct ( struct file * in , loff_t * ppos , struct file * out ,
size_t len , unsigned int flags )
2006-04-11 15:52:07 +04:00
{
struct pipe_inode_info * pipe ;
long ret , bytes ;
2006-04-11 16:57:50 +04:00
loff_t out_off ;
2006-04-11 15:52:07 +04:00
umode_t i_mode ;
int i ;
/*
* We require the input being a regular file , as we don ' t want to
* randomly drop data for eg socket - > socket splicing . Use the
* piped splicing for that !
*/
i_mode = in - > f_dentry - > d_inode - > i_mode ;
if ( unlikely ( ! S_ISREG ( i_mode ) & & ! S_ISBLK ( i_mode ) ) )
return - EINVAL ;
/*
* neither in nor out is a pipe , setup an internal pipe attached to
* ' out ' and transfer the wanted data from ' in ' to ' out ' through that
*/
pipe = current - > splice_pipe ;
2006-04-11 15:56:09 +04:00
if ( unlikely ( ! pipe ) ) {
2006-04-11 15:52:07 +04:00
pipe = alloc_pipe_info ( NULL ) ;
if ( ! pipe )
return - ENOMEM ;
/*
* We don ' t have an immediate reader , but we ' ll read the stuff
2006-04-26 16:39:29 +04:00
* out of the pipe right after the splice_to_pipe ( ) . So set
2006-04-11 15:52:07 +04:00
* PIPE_READERS appropriately .
*/
pipe - > readers = 1 ;
current - > splice_pipe = pipe ;
}
/*
2006-04-11 15:57:21 +04:00
* Do the splice .
2006-04-11 15:52:07 +04:00
*/
ret = 0 ;
bytes = 0 ;
2006-04-11 16:57:50 +04:00
out_off = 0 ;
2006-04-11 15:52:07 +04:00
while ( len ) {
size_t read_len , max_read_len ;
/*
* Do at most PIPE_BUFFERS pages worth of transfer :
*/
max_read_len = min ( len , ( size_t ) ( PIPE_BUFFERS * PAGE_SIZE ) ) ;
2006-04-11 16:57:50 +04:00
ret = do_splice_to ( in , ppos , pipe , max_read_len , flags ) ;
2006-04-11 15:52:07 +04:00
if ( unlikely ( ret < 0 ) )
goto out_release ;
read_len = ret ;
/*
* NOTE : nonblocking mode only applies to the input . We
* must not do the output in nonblocking mode as then we
* could get stuck data in the internal pipe :
*/
2006-04-11 16:57:50 +04:00
ret = do_splice_from ( pipe , out , & out_off , read_len ,
2006-04-11 15:52:07 +04:00
flags & ~ SPLICE_F_NONBLOCK ) ;
if ( unlikely ( ret < 0 ) )
goto out_release ;
bytes + = ret ;
len - = ret ;
/*
* In nonblocking mode , if we got back a short read then
* that was due to either an IO error or due to the
* pagecache entry not being there . In the IO error case
* the _next_ splice attempt will produce a clean IO error
* return value ( not a short read ) , so in both cases it ' s
* correct to break out of the loop here :
*/
if ( ( flags & SPLICE_F_NONBLOCK ) & & ( read_len < max_read_len ) )
break ;
}
pipe - > nrbufs = pipe - > curbuf = 0 ;
return bytes ;
out_release :
/*
* If we did an incomplete transfer we must release
* the pipe buffers in question :
*/
for ( i = 0 ; i < PIPE_BUFFERS ; i + + ) {
struct pipe_buffer * buf = pipe - > bufs + i ;
if ( buf - > ops ) {
buf - > ops - > release ( pipe , buf ) ;
buf - > ops = NULL ;
}
}
pipe - > nrbufs = pipe - > curbuf = 0 ;
/*
* If we transferred some data , return the number of bytes :
*/
if ( bytes > 0 )
return bytes ;
return ret ;
}
EXPORT_SYMBOL ( do_splice_direct ) ;
2006-04-03 01:05:09 +04:00
/*
* Determine where to splice to / from .
*/
2006-04-10 17:18:58 +04:00
static long do_splice ( struct file * in , loff_t __user * off_in ,
struct file * out , loff_t __user * off_out ,
size_t len , unsigned int flags )
2006-03-30 17:15:30 +04:00
{
2006-04-10 17:18:35 +04:00
struct pipe_inode_info * pipe ;
2006-04-11 16:57:50 +04:00
loff_t offset , * off ;
2006-04-19 17:57:05 +04:00
long ret ;
2006-03-30 17:15:30 +04:00
2006-04-10 17:18:35 +04:00
pipe = in - > f_dentry - > d_inode - > i_pipe ;
2006-04-10 17:18:58 +04:00
if ( pipe ) {
if ( off_in )
return - ESPIPE ;
2006-04-11 15:52:07 +04:00
if ( off_out ) {
if ( out - > f_op - > llseek = = no_llseek )
return - EINVAL ;
2006-04-11 16:57:50 +04:00
if ( copy_from_user ( & offset , off_out , sizeof ( loff_t ) ) )
2006-04-11 15:52:07 +04:00
return - EFAULT ;
2006-04-11 16:57:50 +04:00
off = & offset ;
} else
off = & out - > f_pos ;
2006-04-10 17:18:58 +04:00
2006-04-19 17:57:05 +04:00
ret = do_splice_from ( pipe , out , off , len , flags ) ;
if ( off_out & & copy_to_user ( off_out , off , sizeof ( loff_t ) ) )
ret = - EFAULT ;
return ret ;
2006-04-10 17:18:58 +04:00
}
2006-03-30 17:15:30 +04:00
2006-04-10 17:18:35 +04:00
pipe = out - > f_dentry - > d_inode - > i_pipe ;
2006-04-10 17:18:58 +04:00
if ( pipe ) {
if ( off_out )
return - ESPIPE ;
2006-04-11 15:52:07 +04:00
if ( off_in ) {
if ( in - > f_op - > llseek = = no_llseek )
return - EINVAL ;
2006-04-11 16:57:50 +04:00
if ( copy_from_user ( & offset , off_in , sizeof ( loff_t ) ) )
2006-04-11 15:52:07 +04:00
return - EFAULT ;
2006-04-11 16:57:50 +04:00
off = & offset ;
} else
off = & in - > f_pos ;
2006-04-10 17:18:58 +04:00
2006-04-19 17:57:05 +04:00
ret = do_splice_to ( in , off , pipe , len , flags ) ;
if ( off_in & & copy_to_user ( off_in , off , sizeof ( loff_t ) ) )
ret = - EFAULT ;
return ret ;
2006-04-10 17:18:58 +04:00
}
2006-03-30 17:15:30 +04:00
return - EINVAL ;
}
2006-04-26 12:59:21 +04:00
/*
* Map an iov into an array of pages and offset / length tupples . With the
* partial_page structure , we can map several non - contiguous ranges into
* our ones pages [ ] map instead of splitting that operation into pieces .
* Could easily be exported as a generic helper for other users , in which
* case one would probably want to add a ' max_nr_pages ' parameter as well .
*/
static int get_iovec_page_array ( const struct iovec __user * iov ,
unsigned int nr_vecs , struct page * * pages ,
struct partial_page * partial )
{
int buffers = 0 , error = 0 ;
/*
* It ' s ok to take the mmap_sem for reading , even
* across a " get_user() " .
*/
down_read ( & current - > mm - > mmap_sem ) ;
while ( nr_vecs ) {
unsigned long off , npages ;
void __user * base ;
size_t len ;
int i ;
/*
* Get user address base and length for this iovec .
*/
error = get_user ( base , & iov - > iov_base ) ;
if ( unlikely ( error ) )
break ;
error = get_user ( len , & iov - > iov_len ) ;
if ( unlikely ( error ) )
break ;
/*
* Sanity check this iovec . 0 read succeeds .
*/
if ( unlikely ( ! len ) )
break ;
error = - EFAULT ;
if ( unlikely ( ! base ) )
break ;
/*
* Get this base offset and number of pages , then map
* in the user pages .
*/
off = ( unsigned long ) base & ~ PAGE_MASK ;
npages = ( off + len + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
if ( npages > PIPE_BUFFERS - buffers )
npages = PIPE_BUFFERS - buffers ;
error = get_user_pages ( current , current - > mm ,
( unsigned long ) base , npages , 0 , 0 ,
& pages [ buffers ] , NULL ) ;
if ( unlikely ( error < = 0 ) )
break ;
/*
* Fill this contiguous range into the partial page map .
*/
for ( i = 0 ; i < error ; i + + ) {
const int plen = min_t ( size_t , len , PAGE_SIZE ) - off ;
partial [ buffers ] . offset = off ;
partial [ buffers ] . len = plen ;
off = 0 ;
len - = plen ;
buffers + + ;
}
/*
* We didn ' t complete this iov , stop here since it probably
* means we have to move some of this into a pipe to
* be able to continue .
*/
if ( len )
break ;
/*
* Don ' t continue if we mapped fewer pages than we asked for ,
* or if we mapped the max number of pages that we have
* room for .
*/
if ( error < npages | | buffers = = PIPE_BUFFERS )
break ;
nr_vecs - - ;
iov + + ;
}
up_read ( & current - > mm - > mmap_sem ) ;
if ( buffers )
return buffers ;
return error ;
}
/*
* vmsplice splices a user address range into a pipe . It can be thought of
* as splice - from - memory , where the regular splice is splice - from - file ( or
* to file ) . In both cases the output is a pipe , naturally .
*
* Note that vmsplice only supports splicing _from_ user memory to a pipe ,
* not the other way around . Splicing from user memory is a simple operation
* that can be supported without any funky alignment restrictions or nasty
* vm tricks . We simply map in the user memory and fill them into a pipe .
* The reverse isn ' t quite as easy , though . There are two possible solutions
* for that :
*
* - memcpy ( ) the data internally , at which point we might as well just
* do a regular read ( ) on the buffer anyway .
* - Lots of nasty vm tricks , that are neither fast nor flexible ( it
* has restriction limitations on both ends of the pipe ) .
*
* Alas , it isn ' t here .
*
*/
static long do_vmsplice ( struct file * file , const struct iovec __user * iov ,
unsigned long nr_segs , unsigned int flags )
{
struct pipe_inode_info * pipe = file - > f_dentry - > d_inode - > i_pipe ;
struct page * pages [ PIPE_BUFFERS ] ;
struct partial_page partial [ PIPE_BUFFERS ] ;
struct splice_pipe_desc spd = {
. pages = pages ,
. partial = partial ,
. flags = flags ,
. ops = & user_page_pipe_buf_ops ,
} ;
if ( unlikely ( ! pipe ) )
return - EBADF ;
if ( unlikely ( nr_segs > UIO_MAXIOV ) )
return - EINVAL ;
else if ( unlikely ( ! nr_segs ) )
return 0 ;
spd . nr_pages = get_iovec_page_array ( iov , nr_segs , pages , partial ) ;
if ( spd . nr_pages < = 0 )
return spd . nr_pages ;
2006-04-26 16:39:29 +04:00
return splice_to_pipe ( pipe , & spd ) ;
2006-04-26 12:59:21 +04:00
}
asmlinkage long sys_vmsplice ( int fd , const struct iovec __user * iov ,
unsigned long nr_segs , unsigned int flags )
{
struct file * file ;
long error ;
int fput ;
error = - EBADF ;
file = fget_light ( fd , & fput ) ;
if ( file ) {
if ( file - > f_mode & FMODE_WRITE )
error = do_vmsplice ( file , iov , nr_segs , flags ) ;
fput_light ( file , fput ) ;
}
return error ;
}
2006-04-10 17:18:58 +04:00
asmlinkage long sys_splice ( int fd_in , loff_t __user * off_in ,
int fd_out , loff_t __user * off_out ,
size_t len , unsigned int flags )
2006-03-30 17:15:30 +04:00
{
long error ;
struct file * in , * out ;
int fput_in , fput_out ;
if ( unlikely ( ! len ) )
return 0 ;
error = - EBADF ;
2006-04-10 17:18:58 +04:00
in = fget_light ( fd_in , & fput_in ) ;
2006-03-30 17:15:30 +04:00
if ( in ) {
if ( in - > f_mode & FMODE_READ ) {
2006-04-10 17:18:58 +04:00
out = fget_light ( fd_out , & fput_out ) ;
2006-03-30 17:15:30 +04:00
if ( out ) {
if ( out - > f_mode & FMODE_WRITE )
2006-04-10 17:18:58 +04:00
error = do_splice ( in , off_in ,
out , off_out ,
len , flags ) ;
2006-03-30 17:15:30 +04:00
fput_light ( out , fput_out ) ;
}
}
fput_light ( in , fput_in ) ;
}
return error ;
}
2006-04-11 17:51:17 +04:00
/*
* Link contents of ipipe to opipe .
*/
static int link_pipe ( struct pipe_inode_info * ipipe ,
struct pipe_inode_info * opipe ,
size_t len , unsigned int flags )
{
struct pipe_buffer * ibuf , * obuf ;
2006-04-19 17:56:40 +04:00
int ret , do_wakeup , i , ipipe_first ;
ret = do_wakeup = ipipe_first = 0 ;
2006-04-11 17:51:17 +04:00
/*
* Potential ABBA deadlock , work around it by ordering lock
* grabbing by inode address . Otherwise two different processes
* could deadlock ( one doing tee from A - > B , the other from B - > A ) .
*/
if ( ipipe - > inode < opipe - > inode ) {
2006-04-19 17:56:40 +04:00
ipipe_first = 1 ;
2006-04-11 17:51:17 +04:00
mutex_lock ( & ipipe - > inode - > i_mutex ) ;
mutex_lock ( & opipe - > inode - > i_mutex ) ;
} else {
mutex_lock ( & opipe - > inode - > i_mutex ) ;
mutex_lock ( & ipipe - > inode - > i_mutex ) ;
}
for ( i = 0 ; ; i + + ) {
if ( ! opipe - > readers ) {
send_sig ( SIGPIPE , current , 0 ) ;
if ( ! ret )
ret = - EPIPE ;
break ;
}
if ( ipipe - > nrbufs - i ) {
ibuf = ipipe - > bufs + ( ( ipipe - > curbuf + i ) & ( PIPE_BUFFERS - 1 ) ) ;
/*
* If we have room , fill this buffer
*/
if ( opipe - > nrbufs < PIPE_BUFFERS ) {
int nbuf = ( opipe - > curbuf + opipe - > nrbufs ) & ( PIPE_BUFFERS - 1 ) ;
/*
* Get a reference to this pipe buffer ,
* so we can copy the contents over .
*/
ibuf - > ops - > get ( ipipe , ibuf ) ;
obuf = opipe - > bufs + nbuf ;
* obuf = * ibuf ;
if ( obuf - > len > len )
obuf - > len = len ;
opipe - > nrbufs + + ;
do_wakeup = 1 ;
ret + = obuf - > len ;
len - = obuf - > len ;
if ( ! len )
break ;
if ( opipe - > nrbufs < PIPE_BUFFERS )
continue ;
}
/*
* We have input available , but no output room .
2006-04-19 17:56:40 +04:00
* If we already copied data , return that . If we
* need to drop the opipe lock , it must be ordered
* last to avoid deadlocks .
2006-04-11 17:51:17 +04:00
*/
2006-04-19 17:56:40 +04:00
if ( ( flags & SPLICE_F_NONBLOCK ) | | ! ipipe_first ) {
2006-04-11 17:51:17 +04:00
if ( ! ret )
ret = - EAGAIN ;
break ;
}
if ( signal_pending ( current ) ) {
if ( ! ret )
ret = - ERESTARTSYS ;
break ;
}
if ( do_wakeup ) {
smp_mb ( ) ;
if ( waitqueue_active ( & opipe - > wait ) )
wake_up_interruptible ( & opipe - > wait ) ;
kill_fasync ( & opipe - > fasync_readers , SIGIO , POLL_IN ) ;
do_wakeup = 0 ;
}
opipe - > waiting_writers + + ;
pipe_wait ( opipe ) ;
opipe - > waiting_writers - - ;
continue ;
}
/*
* No input buffers , do the usual checks for available
* writers and blocking and wait if necessary
*/
if ( ! ipipe - > writers )
break ;
if ( ! ipipe - > waiting_writers ) {
if ( ret )
break ;
}
2006-04-19 17:56:40 +04:00
/*
* pipe_wait ( ) drops the ipipe mutex . To avoid deadlocks
* with another process , we can only safely do that if
* the ipipe lock is ordered last .
*/
if ( ( flags & SPLICE_F_NONBLOCK ) | | ipipe_first ) {
2006-04-11 17:51:17 +04:00
if ( ! ret )
ret = - EAGAIN ;
break ;
}
if ( signal_pending ( current ) ) {
if ( ! ret )
ret = - ERESTARTSYS ;
break ;
}
if ( waitqueue_active ( & ipipe - > wait ) )
wake_up_interruptible_sync ( & ipipe - > wait ) ;
kill_fasync ( & ipipe - > fasync_writers , SIGIO , POLL_OUT ) ;
pipe_wait ( ipipe ) ;
}
mutex_unlock ( & ipipe - > inode - > i_mutex ) ;
mutex_unlock ( & opipe - > inode - > i_mutex ) ;
if ( do_wakeup ) {
smp_mb ( ) ;
if ( waitqueue_active ( & opipe - > wait ) )
wake_up_interruptible ( & opipe - > wait ) ;
kill_fasync ( & opipe - > fasync_readers , SIGIO , POLL_IN ) ;
}
return ret ;
}
/*
* This is a tee ( 1 ) implementation that works on pipes . It doesn ' t copy
* any data , it simply references the ' in ' pages on the ' out ' pipe .
* The ' flags ' used are the SPLICE_F_ * variants , currently the only
* applicable one is SPLICE_F_NONBLOCK .
*/
static long do_tee ( struct file * in , struct file * out , size_t len ,
unsigned int flags )
{
struct pipe_inode_info * ipipe = in - > f_dentry - > d_inode - > i_pipe ;
struct pipe_inode_info * opipe = out - > f_dentry - > d_inode - > i_pipe ;
/*
* Link ipipe to the two output pipes , consuming as we go along .
*/
if ( ipipe & & opipe )
return link_pipe ( ipipe , opipe , len , flags ) ;
return - EINVAL ;
}
asmlinkage long sys_tee ( int fdin , int fdout , size_t len , unsigned int flags )
{
struct file * in ;
int error , fput_in ;
if ( unlikely ( ! len ) )
return 0 ;
error = - EBADF ;
in = fget_light ( fdin , & fput_in ) ;
if ( in ) {
if ( in - > f_mode & FMODE_READ ) {
int fput_out ;
struct file * out = fget_light ( fdout , & fput_out ) ;
if ( out ) {
if ( out - > f_mode & FMODE_WRITE )
error = do_tee ( in , out , len , flags ) ;
fput_light ( out , fput_out ) ;
}
}
fput_light ( in , fput_in ) ;
}
return error ;
}