2005-09-10 00:10:27 +04:00
/*
FUSE : Filesystem in Userspace
2006-04-11 09:54:55 +04:00
Copyright ( C ) 2001 - 2006 Miklos Szeredi < miklos @ szeredi . hu >
2005-09-10 00:10:27 +04:00
This program can be distributed under the terms of the GNU GPL .
See the file COPYING .
*/
# include "fuse_i.h"
# include <linux/init.h>
# include <linux/module.h>
# include <linux/poll.h>
# include <linux/uio.h>
# include <linux/miscdevice.h>
# include <linux/pagemap.h>
# include <linux/file.h>
# include <linux/slab.h>
MODULE_ALIAS_MISCDEV ( FUSE_MINOR ) ;
2006-12-07 07:33:20 +03:00
static struct kmem_cache * fuse_req_cachep ;
2005-09-10 00:10:27 +04:00
2006-01-17 09:14:28 +03:00
static struct fuse_conn * fuse_get_conn ( struct file * file )
2005-09-10 00:10:27 +04:00
{
2006-04-11 09:54:55 +04:00
/*
* Lockless access is OK , because file - > private data is set
* once during mount and is valid until the file is released .
*/
return file - > private_data ;
2005-09-10 00:10:27 +04:00
}
2006-01-17 09:14:28 +03:00
static void fuse_request_init ( struct fuse_req * req )
2005-09-10 00:10:27 +04:00
{
memset ( req , 0 , sizeof ( * req ) ) ;
INIT_LIST_HEAD ( & req - > list ) ;
2006-06-25 16:48:54 +04:00
INIT_LIST_HEAD ( & req - > intr_entry ) ;
2005-09-10 00:10:27 +04:00
init_waitqueue_head ( & req - > waitq ) ;
atomic_set ( & req - > count , 1 ) ;
}
struct fuse_req * fuse_request_alloc ( void )
{
2006-12-07 07:33:17 +03:00
struct fuse_req * req = kmem_cache_alloc ( fuse_req_cachep , GFP_KERNEL ) ;
2005-09-10 00:10:27 +04:00
if ( req )
fuse_request_init ( req ) ;
return req ;
}
void fuse_request_free ( struct fuse_req * req )
{
kmem_cache_free ( fuse_req_cachep , req ) ;
}
2006-01-17 09:14:28 +03:00
static void block_sigs ( sigset_t * oldset )
2005-09-10 00:10:27 +04:00
{
sigset_t mask ;
siginitsetinv ( & mask , sigmask ( SIGKILL ) ) ;
sigprocmask ( SIG_BLOCK , & mask , oldset ) ;
}
2006-01-17 09:14:28 +03:00
static void restore_sigs ( sigset_t * oldset )
2005-09-10 00:10:27 +04:00
{
sigprocmask ( SIG_SETMASK , oldset , NULL ) ;
}
static void __fuse_get_request ( struct fuse_req * req )
{
atomic_inc ( & req - > count ) ;
}
/* Must be called with > 1 refcount */
static void __fuse_put_request ( struct fuse_req * req )
{
BUG_ON ( atomic_read ( & req - > count ) < 2 ) ;
atomic_dec ( & req - > count ) ;
}
2006-06-25 16:48:52 +04:00
static void fuse_req_init_context ( struct fuse_req * req )
{
req - > in . h . uid = current - > fsuid ;
req - > in . h . gid = current - > fsgid ;
req - > in . h . pid = current - > pid ;
}
2006-04-11 09:54:58 +04:00
struct fuse_req * fuse_get_req ( struct fuse_conn * fc )
2005-09-10 00:10:27 +04:00
{
2006-04-11 09:54:59 +04:00
struct fuse_req * req ;
sigset_t oldset ;
2006-04-11 23:16:09 +04:00
int intr ;
2006-04-11 09:54:59 +04:00
int err ;
2006-04-11 23:16:09 +04:00
atomic_inc ( & fc - > num_waiting ) ;
2006-04-11 09:54:59 +04:00
block_sigs ( & oldset ) ;
2006-04-11 23:16:09 +04:00
intr = wait_event_interruptible ( fc - > blocked_waitq , ! fc - > blocked ) ;
2006-04-11 09:54:59 +04:00
restore_sigs ( & oldset ) ;
2006-04-11 23:16:09 +04:00
err = - EINTR ;
if ( intr )
goto out ;
2006-04-11 09:54:59 +04:00
2006-06-25 16:48:50 +04:00
err = - ENOTCONN ;
if ( ! fc - > connected )
goto out ;
2006-04-11 09:54:59 +04:00
req = fuse_request_alloc ( ) ;
2006-04-11 23:16:09 +04:00
err = - ENOMEM ;
2006-04-11 09:54:58 +04:00
if ( ! req )
2006-04-11 23:16:09 +04:00
goto out ;
2005-09-10 00:10:27 +04:00
2006-06-25 16:48:52 +04:00
fuse_req_init_context ( req ) ;
2006-04-11 23:16:09 +04:00
req - > waiting = 1 ;
2005-09-10 00:10:27 +04:00
return req ;
2006-04-11 23:16:09 +04:00
out :
atomic_dec ( & fc - > num_waiting ) ;
return ERR_PTR ( err ) ;
2005-09-10 00:10:27 +04:00
}
2006-06-25 16:48:52 +04:00
/*
* Return request in fuse_file - > reserved_req . However that may
* currently be in use . If that is the case , wait for it to become
* available .
*/
static struct fuse_req * get_reserved_req ( struct fuse_conn * fc ,
struct file * file )
{
struct fuse_req * req = NULL ;
struct fuse_file * ff = file - > private_data ;
do {
wait_event ( fc - > blocked_waitq , ff - > reserved_req ) ;
spin_lock ( & fc - > lock ) ;
if ( ff - > reserved_req ) {
req = ff - > reserved_req ;
ff - > reserved_req = NULL ;
get_file ( file ) ;
req - > stolen_file = file ;
}
spin_unlock ( & fc - > lock ) ;
} while ( ! req ) ;
return req ;
}
/*
* Put stolen request back into fuse_file - > reserved_req
*/
static void put_reserved_req ( struct fuse_conn * fc , struct fuse_req * req )
{
struct file * file = req - > stolen_file ;
struct fuse_file * ff = file - > private_data ;
spin_lock ( & fc - > lock ) ;
fuse_request_init ( req ) ;
BUG_ON ( ff - > reserved_req ) ;
ff - > reserved_req = req ;
wake_up ( & fc - > blocked_waitq ) ;
spin_unlock ( & fc - > lock ) ;
fput ( file ) ;
}
/*
* Gets a requests for a file operation , always succeeds
*
* This is used for sending the FLUSH request , which must get to
* userspace , due to POSIX locks which may need to be unlocked .
*
* If allocation fails due to OOM , use the reserved request in
* fuse_file .
*
* This is very unlikely to deadlock accidentally , since the
* filesystem should not have it ' s own file open . If deadlock is
* intentional , it can still be broken by " aborting " the filesystem .
*/
struct fuse_req * fuse_get_req_nofail ( struct fuse_conn * fc , struct file * file )
{
struct fuse_req * req ;
atomic_inc ( & fc - > num_waiting ) ;
wait_event ( fc - > blocked_waitq , ! fc - > blocked ) ;
req = fuse_request_alloc ( ) ;
if ( ! req )
req = get_reserved_req ( fc , file ) ;
fuse_req_init_context ( req ) ;
req - > waiting = 1 ;
return req ;
}
2005-09-10 00:10:27 +04:00
void fuse_put_request ( struct fuse_conn * fc , struct fuse_req * req )
2006-02-05 10:27:40 +03:00
{
if ( atomic_dec_and_test ( & req - > count ) ) {
2006-04-11 23:16:09 +04:00
if ( req - > waiting )
atomic_dec ( & fc - > num_waiting ) ;
2006-06-25 16:48:52 +04:00
if ( req - > stolen_file )
put_reserved_req ( fc , req ) ;
else
fuse_request_free ( req ) ;
2006-02-05 10:27:40 +03:00
}
}
2005-09-10 00:10:27 +04:00
/*
* This function is called when a request is finished . Either a reply
2006-06-25 16:48:53 +04:00
* has arrived or it was aborted ( and not yet sent ) or some error
2006-01-17 09:14:26 +03:00
* occurred during communication with userspace , or the device file
2006-06-25 16:48:50 +04:00
* was closed . The requester thread is woken up ( if still waiting ) ,
* the ' end ' callback is called if given , else the reference to the
* request is released
2006-02-05 10:27:40 +03:00
*
2006-04-11 09:54:55 +04:00
* Called with fc - > lock , unlocks it
2005-09-10 00:10:27 +04:00
*/
static void request_end ( struct fuse_conn * fc , struct fuse_req * req )
2006-09-29 12:59:25 +04:00
__releases ( fc - > lock )
2005-09-10 00:10:27 +04:00
{
2006-06-25 16:48:50 +04:00
void ( * end ) ( struct fuse_conn * , struct fuse_req * ) = req - > end ;
req - > end = NULL ;
2006-01-17 09:14:31 +03:00
list_del ( & req - > list ) ;
2006-06-25 16:48:54 +04:00
list_del ( & req - > intr_entry ) ;
2006-01-17 09:14:31 +03:00
req - > state = FUSE_REQ_FINISHED ;
2006-06-25 16:48:50 +04:00
if ( req - > background ) {
if ( fc - > num_background = = FUSE_MAX_BACKGROUND ) {
fc - > blocked = 0 ;
wake_up_all ( & fc - > blocked_waitq ) ;
}
fc - > num_background - - ;
2005-09-10 00:10:27 +04:00
}
2006-06-25 16:48:50 +04:00
spin_unlock ( & fc - > lock ) ;
dput ( req - > dentry ) ;
mntput ( req - > vfsmount ) ;
2005-09-10 00:10:27 +04:00
if ( req - > file )
2006-06-25 16:48:50 +04:00
fput ( req - > file ) ;
wake_up ( & req - > waitq ) ;
if ( end )
end ( fc , req ) ;
else
fuse_put_request ( fc , req ) ;
2005-09-10 00:10:27 +04:00
}
2006-06-25 16:48:54 +04:00
static void wait_answer_interruptible ( struct fuse_conn * fc ,
struct fuse_req * req )
{
if ( signal_pending ( current ) )
return ;
spin_unlock ( & fc - > lock ) ;
wait_event_interruptible ( req - > waitq , req - > state = = FUSE_REQ_FINISHED ) ;
spin_lock ( & fc - > lock ) ;
}
static void queue_interrupt ( struct fuse_conn * fc , struct fuse_req * req )
{
list_add_tail ( & req - > intr_entry , & fc - > interrupts ) ;
wake_up ( & fc - > waitq ) ;
kill_fasync ( & fc - > fasync , SIGIO , POLL_IN ) ;
}
2006-04-11 09:54:55 +04:00
/* Called with fc->lock held. Releases, and then reacquires it. */
2005-09-10 00:10:39 +04:00
static void request_wait_answer ( struct fuse_conn * fc , struct fuse_req * req )
2005-09-10 00:10:27 +04:00
{
2006-06-25 16:48:54 +04:00
if ( ! fc - > no_interrupt ) {
/* Any signal may interrupt this */
wait_answer_interruptible ( fc , req ) ;
2005-09-10 00:10:27 +04:00
2006-06-25 16:48:54 +04:00
if ( req - > aborted )
goto aborted ;
if ( req - > state = = FUSE_REQ_FINISHED )
return ;
req - > interrupted = 1 ;
if ( req - > state = = FUSE_REQ_SENT )
queue_interrupt ( fc , req ) ;
}
if ( req - > force ) {
spin_unlock ( & fc - > lock ) ;
2006-06-25 16:48:50 +04:00
wait_event ( req - > waitq , req - > state = = FUSE_REQ_FINISHED ) ;
2006-06-25 16:48:54 +04:00
spin_lock ( & fc - > lock ) ;
} else {
sigset_t oldset ;
/* Only fatal signals may interrupt this */
2006-06-25 16:48:50 +04:00
block_sigs ( & oldset ) ;
2006-06-25 16:48:54 +04:00
wait_answer_interruptible ( fc , req ) ;
2006-06-25 16:48:50 +04:00
restore_sigs ( & oldset ) ;
}
2005-09-10 00:10:27 +04:00
2006-06-25 16:48:54 +04:00
if ( req - > aborted )
goto aborted ;
if ( req - > state = = FUSE_REQ_FINISHED )
return ;
req - > out . h . error = - EINTR ;
req - > aborted = 1 ;
aborted :
2005-09-10 00:10:27 +04:00
if ( req - > locked ) {
/* This is uninterruptible sleep, because data is
being copied to / from the buffers of req . During
locked state , there mustn ' t be any filesystem
operation ( e . g . page fault ) , since that could lead
to deadlock */
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
wait_event ( req - > waitq , ! req - > locked ) ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
}
2006-01-17 09:14:31 +03:00
if ( req - > state = = FUSE_REQ_PENDING ) {
2005-09-10 00:10:27 +04:00
list_del ( & req - > list ) ;
__fuse_put_request ( req ) ;
2006-06-25 16:48:50 +04:00
} else if ( req - > state = = FUSE_REQ_SENT ) {
spin_unlock ( & fc - > lock ) ;
wait_event ( req - > waitq , req - > state = = FUSE_REQ_FINISHED ) ;
spin_lock ( & fc - > lock ) ;
}
2005-09-10 00:10:27 +04:00
}
static unsigned len_args ( unsigned numargs , struct fuse_arg * args )
{
unsigned nbytes = 0 ;
unsigned i ;
for ( i = 0 ; i < numargs ; i + + )
nbytes + = args [ i ] . size ;
return nbytes ;
}
2006-06-25 16:48:54 +04:00
static u64 fuse_get_unique ( struct fuse_conn * fc )
{
fc - > reqctr + + ;
/* zero is special */
if ( fc - > reqctr = = 0 )
fc - > reqctr = 1 ;
return fc - > reqctr ;
}
2005-09-10 00:10:27 +04:00
static void queue_request ( struct fuse_conn * fc , struct fuse_req * req )
{
2006-06-25 16:48:54 +04:00
req - > in . h . unique = fuse_get_unique ( fc ) ;
2005-09-10 00:10:27 +04:00
req - > in . h . len = sizeof ( struct fuse_in_header ) +
len_args ( req - > in . numargs , ( struct fuse_arg * ) req - > in . args ) ;
list_add_tail ( & req - > list , & fc - > pending ) ;
2006-01-17 09:14:31 +03:00
req - > state = FUSE_REQ_PENDING ;
2006-04-11 23:16:09 +04:00
if ( ! req - > waiting ) {
req - > waiting = 1 ;
atomic_inc ( & fc - > num_waiting ) ;
}
2005-09-10 00:10:27 +04:00
wake_up ( & fc - > waitq ) ;
2006-04-11 09:54:52 +04:00
kill_fasync ( & fc - > fasync , SIGIO , POLL_IN ) ;
2005-09-10 00:10:27 +04:00
}
2005-09-10 00:10:39 +04:00
void request_send ( struct fuse_conn * fc , struct fuse_req * req )
2005-09-10 00:10:27 +04:00
{
req - > isreply = 1 ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2005-09-10 00:10:31 +04:00
if ( ! fc - > connected )
2005-09-10 00:10:27 +04:00
req - > out . h . error = - ENOTCONN ;
else if ( fc - > conn_error )
req - > out . h . error = - ECONNREFUSED ;
else {
queue_request ( fc , req ) ;
/* acquire extra reference, since request is still needed
after request_end ( ) */
__fuse_get_request ( req ) ;
2005-09-10 00:10:39 +04:00
request_wait_answer ( fc , req ) ;
2005-09-10 00:10:27 +04:00
}
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
}
static void request_send_nowait ( struct fuse_conn * fc , struct fuse_req * req )
{
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2005-09-10 00:10:31 +04:00
if ( fc - > connected ) {
2006-06-25 16:48:50 +04:00
req - > background = 1 ;
fc - > num_background + + ;
if ( fc - > num_background = = FUSE_MAX_BACKGROUND )
fc - > blocked = 1 ;
2005-09-10 00:10:27 +04:00
queue_request ( fc , req ) ;
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
} else {
req - > out . h . error = - ENOTCONN ;
request_end ( fc , req ) ;
}
}
void request_send_noreply ( struct fuse_conn * fc , struct fuse_req * req )
{
req - > isreply = 0 ;
request_send_nowait ( fc , req ) ;
}
void request_send_background ( struct fuse_conn * fc , struct fuse_req * req )
{
req - > isreply = 1 ;
request_send_nowait ( fc , req ) ;
}
/*
* Lock the request . Up to the next unlock_request ( ) there mustn ' t be
* anything that could cause a page - fault . If the request was already
2006-06-25 16:48:53 +04:00
* aborted bail out .
2005-09-10 00:10:27 +04:00
*/
2006-04-11 09:54:55 +04:00
static int lock_request ( struct fuse_conn * fc , struct fuse_req * req )
2005-09-10 00:10:27 +04:00
{
int err = 0 ;
if ( req ) {
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2006-06-25 16:48:53 +04:00
if ( req - > aborted )
2005-09-10 00:10:27 +04:00
err = - ENOENT ;
else
req - > locked = 1 ;
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
}
return err ;
}
/*
2006-06-25 16:48:53 +04:00
* Unlock request . If it was aborted during being locked , the
2005-09-10 00:10:27 +04:00
* requester thread is currently waiting for it to be unlocked , so
* wake it up .
*/
2006-04-11 09:54:55 +04:00
static void unlock_request ( struct fuse_conn * fc , struct fuse_req * req )
2005-09-10 00:10:27 +04:00
{
if ( req ) {
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
req - > locked = 0 ;
2006-06-25 16:48:53 +04:00
if ( req - > aborted )
2005-09-10 00:10:27 +04:00
wake_up ( & req - > waitq ) ;
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
}
}
struct fuse_copy_state {
2006-04-11 09:54:55 +04:00
struct fuse_conn * fc ;
2005-09-10 00:10:27 +04:00
int write ;
struct fuse_req * req ;
const struct iovec * iov ;
unsigned long nr_segs ;
unsigned long seglen ;
unsigned long addr ;
struct page * pg ;
void * mapaddr ;
void * buf ;
unsigned len ;
} ;
2006-04-11 09:54:55 +04:00
static void fuse_copy_init ( struct fuse_copy_state * cs , struct fuse_conn * fc ,
int write , struct fuse_req * req ,
const struct iovec * iov , unsigned long nr_segs )
2005-09-10 00:10:27 +04:00
{
memset ( cs , 0 , sizeof ( * cs ) ) ;
2006-04-11 09:54:55 +04:00
cs - > fc = fc ;
2005-09-10 00:10:27 +04:00
cs - > write = write ;
cs - > req = req ;
cs - > iov = iov ;
cs - > nr_segs = nr_segs ;
}
/* Unmap and put previous page of userspace buffer */
2006-01-17 09:14:28 +03:00
static void fuse_copy_finish ( struct fuse_copy_state * cs )
2005-09-10 00:10:27 +04:00
{
if ( cs - > mapaddr ) {
kunmap_atomic ( cs - > mapaddr , KM_USER0 ) ;
if ( cs - > write ) {
flush_dcache_page ( cs - > pg ) ;
set_page_dirty_lock ( cs - > pg ) ;
}
put_page ( cs - > pg ) ;
cs - > mapaddr = NULL ;
}
}
/*
* Get another pagefull of userspace buffer , and map it to kernel
* address space , and lock request
*/
static int fuse_copy_fill ( struct fuse_copy_state * cs )
{
unsigned long offset ;
int err ;
2006-04-11 09:54:55 +04:00
unlock_request ( cs - > fc , cs - > req ) ;
2005-09-10 00:10:27 +04:00
fuse_copy_finish ( cs ) ;
if ( ! cs - > seglen ) {
BUG_ON ( ! cs - > nr_segs ) ;
cs - > seglen = cs - > iov [ 0 ] . iov_len ;
cs - > addr = ( unsigned long ) cs - > iov [ 0 ] . iov_base ;
cs - > iov + + ;
cs - > nr_segs - - ;
}
down_read ( & current - > mm - > mmap_sem ) ;
err = get_user_pages ( current , current - > mm , cs - > addr , 1 , cs - > write , 0 ,
& cs - > pg , NULL ) ;
up_read ( & current - > mm - > mmap_sem ) ;
if ( err < 0 )
return err ;
BUG_ON ( err ! = 1 ) ;
offset = cs - > addr % PAGE_SIZE ;
cs - > mapaddr = kmap_atomic ( cs - > pg , KM_USER0 ) ;
cs - > buf = cs - > mapaddr + offset ;
cs - > len = min ( PAGE_SIZE - offset , cs - > seglen ) ;
cs - > seglen - = cs - > len ;
cs - > addr + = cs - > len ;
2006-04-11 09:54:55 +04:00
return lock_request ( cs - > fc , cs - > req ) ;
2005-09-10 00:10:27 +04:00
}
/* Do as much copy to/from userspace buffer as we can */
2006-01-17 09:14:28 +03:00
static int fuse_copy_do ( struct fuse_copy_state * cs , void * * val , unsigned * size )
2005-09-10 00:10:27 +04:00
{
unsigned ncpy = min ( * size , cs - > len ) ;
if ( val ) {
if ( cs - > write )
memcpy ( cs - > buf , * val , ncpy ) ;
else
memcpy ( * val , cs - > buf , ncpy ) ;
* val + = ncpy ;
}
* size - = ncpy ;
cs - > len - = ncpy ;
cs - > buf + = ncpy ;
return ncpy ;
}
/*
* Copy a page in the request to / from the userspace buffer . Must be
* done atomically
*/
2006-01-17 09:14:28 +03:00
static int fuse_copy_page ( struct fuse_copy_state * cs , struct page * page ,
unsigned offset , unsigned count , int zeroing )
2005-09-10 00:10:27 +04:00
{
if ( page & & zeroing & & count < PAGE_SIZE ) {
void * mapaddr = kmap_atomic ( page , KM_USER1 ) ;
memset ( mapaddr , 0 , PAGE_SIZE ) ;
kunmap_atomic ( mapaddr , KM_USER1 ) ;
}
while ( count ) {
int err ;
if ( ! cs - > len & & ( err = fuse_copy_fill ( cs ) ) )
return err ;
if ( page ) {
void * mapaddr = kmap_atomic ( page , KM_USER1 ) ;
void * buf = mapaddr + offset ;
offset + = fuse_copy_do ( cs , & buf , & count ) ;
kunmap_atomic ( mapaddr , KM_USER1 ) ;
} else
offset + = fuse_copy_do ( cs , NULL , & count ) ;
}
if ( page & & ! cs - > write )
flush_dcache_page ( page ) ;
return 0 ;
}
/* Copy pages in the request to/from userspace buffer */
static int fuse_copy_pages ( struct fuse_copy_state * cs , unsigned nbytes ,
int zeroing )
{
unsigned i ;
struct fuse_req * req = cs - > req ;
unsigned offset = req - > page_offset ;
unsigned count = min ( nbytes , ( unsigned ) PAGE_SIZE - offset ) ;
for ( i = 0 ; i < req - > num_pages & & ( nbytes | | zeroing ) ; i + + ) {
struct page * page = req - > pages [ i ] ;
int err = fuse_copy_page ( cs , page , offset , count , zeroing ) ;
if ( err )
return err ;
nbytes - = count ;
count = min ( nbytes , ( unsigned ) PAGE_SIZE ) ;
offset = 0 ;
}
return 0 ;
}
/* Copy a single argument in the request to/from userspace buffer */
static int fuse_copy_one ( struct fuse_copy_state * cs , void * val , unsigned size )
{
while ( size ) {
int err ;
if ( ! cs - > len & & ( err = fuse_copy_fill ( cs ) ) )
return err ;
fuse_copy_do ( cs , & val , & size ) ;
}
return 0 ;
}
/* Copy request arguments to/from userspace buffer */
static int fuse_copy_args ( struct fuse_copy_state * cs , unsigned numargs ,
unsigned argpages , struct fuse_arg * args ,
int zeroing )
{
int err = 0 ;
unsigned i ;
for ( i = 0 ; ! err & & i < numargs ; i + + ) {
struct fuse_arg * arg = & args [ i ] ;
if ( i = = numargs - 1 & & argpages )
err = fuse_copy_pages ( cs , arg - > size , zeroing ) ;
else
err = fuse_copy_one ( cs , arg - > value , arg - > size ) ;
}
return err ;
}
2006-06-25 16:48:54 +04:00
static int request_pending ( struct fuse_conn * fc )
{
return ! list_empty ( & fc - > pending ) | | ! list_empty ( & fc - > interrupts ) ;
}
2005-09-10 00:10:27 +04:00
/* Wait until a request is available on the pending list */
static void request_wait ( struct fuse_conn * fc )
{
DECLARE_WAITQUEUE ( wait , current ) ;
add_wait_queue_exclusive ( & fc - > waitq , & wait ) ;
2006-06-25 16:48:54 +04:00
while ( fc - > connected & & ! request_pending ( fc ) ) {
2005-09-10 00:10:27 +04:00
set_current_state ( TASK_INTERRUPTIBLE ) ;
if ( signal_pending ( current ) )
break ;
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
schedule ( ) ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
}
set_current_state ( TASK_RUNNING ) ;
remove_wait_queue ( & fc - > waitq , & wait ) ;
}
2006-06-25 16:48:54 +04:00
/*
* Transfer an interrupt request to userspace
*
* Unlike other requests this is assembled on demand , without a need
* to allocate a separate fuse_req structure .
*
* Called with fc - > lock held , releases it
*/
static int fuse_read_interrupt ( struct fuse_conn * fc , struct fuse_req * req ,
const struct iovec * iov , unsigned long nr_segs )
2006-09-29 12:59:25 +04:00
__releases ( fc - > lock )
2006-06-25 16:48:54 +04:00
{
struct fuse_copy_state cs ;
struct fuse_in_header ih ;
struct fuse_interrupt_in arg ;
unsigned reqsize = sizeof ( ih ) + sizeof ( arg ) ;
int err ;
list_del_init ( & req - > intr_entry ) ;
req - > intr_unique = fuse_get_unique ( fc ) ;
memset ( & ih , 0 , sizeof ( ih ) ) ;
memset ( & arg , 0 , sizeof ( arg ) ) ;
ih . len = reqsize ;
ih . opcode = FUSE_INTERRUPT ;
ih . unique = req - > intr_unique ;
arg . unique = req - > in . h . unique ;
spin_unlock ( & fc - > lock ) ;
if ( iov_length ( iov , nr_segs ) < reqsize )
return - EINVAL ;
fuse_copy_init ( & cs , fc , 1 , NULL , iov , nr_segs ) ;
err = fuse_copy_one ( & cs , & ih , sizeof ( ih ) ) ;
if ( ! err )
err = fuse_copy_one ( & cs , & arg , sizeof ( arg ) ) ;
fuse_copy_finish ( & cs ) ;
return err ? err : reqsize ;
}
2005-09-10 00:10:27 +04:00
/*
* Read a single request into the userspace filesystem ' s buffer . This
* function waits until a request is available , then removes it from
* the pending list and copies request data to userspace buffer . If
2006-06-25 16:48:53 +04:00
* no reply is needed ( FORGET ) or request has been aborted or there
* was an error during the copying then it ' s finished by calling
2005-09-10 00:10:27 +04:00
* request_end ( ) . Otherwise add it to the processing list , and set
* the ' sent ' flag .
*/
2006-10-01 10:28:47 +04:00
static ssize_t fuse_dev_read ( struct kiocb * iocb , const struct iovec * iov ,
unsigned long nr_segs , loff_t pos )
2005-09-10 00:10:27 +04:00
{
int err ;
struct fuse_req * req ;
struct fuse_in * in ;
struct fuse_copy_state cs ;
unsigned reqsize ;
2006-10-01 10:28:47 +04:00
struct file * file = iocb - > ki_filp ;
2006-04-11 09:54:55 +04:00
struct fuse_conn * fc = fuse_get_conn ( file ) ;
if ( ! fc )
return - EPERM ;
2005-09-10 00:10:27 +04:00
2006-01-06 11:19:40 +03:00
restart :
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2006-04-11 09:54:53 +04:00
err = - EAGAIN ;
if ( ( file - > f_flags & O_NONBLOCK ) & & fc - > connected & &
2006-06-25 16:48:54 +04:00
! request_pending ( fc ) )
2006-04-11 09:54:53 +04:00
goto err_unlock ;
2005-09-10 00:10:27 +04:00
request_wait ( fc ) ;
err = - ENODEV ;
2006-01-17 09:14:34 +03:00
if ( ! fc - > connected )
2005-09-10 00:10:27 +04:00
goto err_unlock ;
err = - ERESTARTSYS ;
2006-06-25 16:48:54 +04:00
if ( ! request_pending ( fc ) )
2005-09-10 00:10:27 +04:00
goto err_unlock ;
2006-06-25 16:48:54 +04:00
if ( ! list_empty ( & fc - > interrupts ) ) {
req = list_entry ( fc - > interrupts . next , struct fuse_req ,
intr_entry ) ;
return fuse_read_interrupt ( fc , req , iov , nr_segs ) ;
}
2005-09-10 00:10:27 +04:00
req = list_entry ( fc - > pending . next , struct fuse_req , list ) ;
2006-01-17 09:14:31 +03:00
req - > state = FUSE_REQ_READING ;
2006-01-17 09:14:31 +03:00
list_move ( & req - > list , & fc - > io ) ;
2005-09-10 00:10:27 +04:00
in = & req - > in ;
2006-01-06 11:19:40 +03:00
reqsize = in - > h . len ;
/* If request is too large, reply with an error and restart the read */
if ( iov_length ( iov , nr_segs ) < reqsize ) {
req - > out . h . error = - EIO ;
/* SETXATTR is special, since it may contain too large data */
if ( in - > h . opcode = = FUSE_SETXATTR )
req - > out . h . error = - E2BIG ;
request_end ( fc , req ) ;
goto restart ;
2005-09-10 00:10:27 +04:00
}
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
fuse_copy_init ( & cs , fc , 1 , req , iov , nr_segs ) ;
2006-01-06 11:19:40 +03:00
err = fuse_copy_one ( & cs , & in - > h , sizeof ( in - > h ) ) ;
if ( ! err )
err = fuse_copy_args ( & cs , in - > numargs , in - > argpages ,
( struct fuse_arg * ) in - > args , 0 ) ;
2005-09-10 00:10:27 +04:00
fuse_copy_finish ( & cs ) ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
req - > locked = 0 ;
2006-06-25 16:48:53 +04:00
if ( ! err & & req - > aborted )
2005-09-10 00:10:27 +04:00
err = - ENOENT ;
if ( err ) {
2006-06-25 16:48:53 +04:00
if ( ! req - > aborted )
2005-09-10 00:10:27 +04:00
req - > out . h . error = - EIO ;
request_end ( fc , req ) ;
return err ;
}
if ( ! req - > isreply )
request_end ( fc , req ) ;
else {
2006-01-17 09:14:31 +03:00
req - > state = FUSE_REQ_SENT ;
2006-01-17 09:14:31 +03:00
list_move_tail ( & req - > list , & fc - > processing ) ;
2006-06-25 16:48:54 +04:00
if ( req - > interrupted )
queue_interrupt ( fc , req ) ;
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
}
return reqsize ;
err_unlock :
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
return err ;
}
/* Look up request on processing list by unique ID */
static struct fuse_req * request_find ( struct fuse_conn * fc , u64 unique )
{
struct list_head * entry ;
list_for_each ( entry , & fc - > processing ) {
struct fuse_req * req ;
req = list_entry ( entry , struct fuse_req , list ) ;
2006-06-25 16:48:54 +04:00
if ( req - > in . h . unique = = unique | | req - > intr_unique = = unique )
2005-09-10 00:10:27 +04:00
return req ;
}
return NULL ;
}
static int copy_out_args ( struct fuse_copy_state * cs , struct fuse_out * out ,
unsigned nbytes )
{
unsigned reqsize = sizeof ( struct fuse_out_header ) ;
if ( out - > h . error )
return nbytes ! = reqsize ? - EINVAL : 0 ;
reqsize + = len_args ( out - > numargs , out - > args ) ;
if ( reqsize < nbytes | | ( reqsize > nbytes & & ! out - > argvar ) )
return - EINVAL ;
else if ( reqsize > nbytes ) {
struct fuse_arg * lastarg = & out - > args [ out - > numargs - 1 ] ;
unsigned diffsize = reqsize - nbytes ;
if ( diffsize > lastarg - > size )
return - EINVAL ;
lastarg - > size - = diffsize ;
}
return fuse_copy_args ( cs , out - > numargs , out - > argpages , out - > args ,
out - > page_zeroing ) ;
}
/*
* Write a single reply to a request . First the header is copied from
* the write buffer . The request is then searched on the processing
* list by the unique ID found in the header . If found , then remove
* it from the list and copy the rest of the buffer to the request .
* The request is finished by calling request_end ( )
*/
2006-10-01 10:28:47 +04:00
static ssize_t fuse_dev_write ( struct kiocb * iocb , const struct iovec * iov ,
unsigned long nr_segs , loff_t pos )
2005-09-10 00:10:27 +04:00
{
int err ;
unsigned nbytes = iov_length ( iov , nr_segs ) ;
struct fuse_req * req ;
struct fuse_out_header oh ;
struct fuse_copy_state cs ;
2006-10-01 10:28:47 +04:00
struct fuse_conn * fc = fuse_get_conn ( iocb - > ki_filp ) ;
2005-09-10 00:10:27 +04:00
if ( ! fc )
2006-04-11 09:54:56 +04:00
return - EPERM ;
2005-09-10 00:10:27 +04:00
2006-04-11 09:54:55 +04:00
fuse_copy_init ( & cs , fc , 0 , NULL , iov , nr_segs ) ;
2005-09-10 00:10:27 +04:00
if ( nbytes < sizeof ( struct fuse_out_header ) )
return - EINVAL ;
err = fuse_copy_one ( & cs , & oh , sizeof ( oh ) ) ;
if ( err )
goto err_finish ;
err = - EINVAL ;
if ( ! oh . unique | | oh . error < = - 1000 | | oh . error > 0 | |
oh . len ! = nbytes )
goto err_finish ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2006-01-17 09:14:41 +03:00
err = - ENOENT ;
if ( ! fc - > connected )
goto err_unlock ;
2005-09-10 00:10:27 +04:00
req = request_find ( fc , oh . unique ) ;
if ( ! req )
goto err_unlock ;
2006-06-25 16:48:53 +04:00
if ( req - > aborted ) {
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
fuse_copy_finish ( & cs ) ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2006-01-17 09:14:25 +03:00
request_end ( fc , req ) ;
2005-09-10 00:10:27 +04:00
return - ENOENT ;
}
2006-06-25 16:48:54 +04:00
/* Is it an interrupt reply? */
if ( req - > intr_unique = = oh . unique ) {
err = - EINVAL ;
if ( nbytes ! = sizeof ( struct fuse_out_header ) )
goto err_unlock ;
if ( oh . error = = - ENOSYS )
fc - > no_interrupt = 1 ;
else if ( oh . error = = - EAGAIN )
queue_interrupt ( fc , req ) ;
spin_unlock ( & fc - > lock ) ;
fuse_copy_finish ( & cs ) ;
return nbytes ;
}
req - > state = FUSE_REQ_WRITING ;
2006-01-17 09:14:31 +03:00
list_move ( & req - > list , & fc - > io ) ;
2005-09-10 00:10:27 +04:00
req - > out . h = oh ;
req - > locked = 1 ;
cs . req = req ;
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
err = copy_out_args ( & cs , & req - > out , nbytes ) ;
fuse_copy_finish ( & cs ) ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
req - > locked = 0 ;
if ( ! err ) {
2006-06-25 16:48:53 +04:00
if ( req - > aborted )
2005-09-10 00:10:27 +04:00
err = - ENOENT ;
2006-06-25 16:48:53 +04:00
} else if ( ! req - > aborted )
2005-09-10 00:10:27 +04:00
req - > out . h . error = - EIO ;
request_end ( fc , req ) ;
return err ? err : nbytes ;
err_unlock :
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
err_finish :
fuse_copy_finish ( & cs ) ;
return err ;
}
static unsigned fuse_dev_poll ( struct file * file , poll_table * wait )
{
unsigned mask = POLLOUT | POLLWRNORM ;
2006-04-11 09:54:50 +04:00
struct fuse_conn * fc = fuse_get_conn ( file ) ;
2005-09-10 00:10:27 +04:00
if ( ! fc )
2006-04-11 09:54:50 +04:00
return POLLERR ;
2005-09-10 00:10:27 +04:00
poll_wait ( file , & fc - > waitq , wait ) ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2006-04-11 09:54:50 +04:00
if ( ! fc - > connected )
mask = POLLERR ;
2006-06-25 16:48:54 +04:00
else if ( request_pending ( fc ) )
2006-04-11 09:54:50 +04:00
mask | = POLLIN | POLLRDNORM ;
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
return mask ;
}
2006-01-17 09:14:41 +03:00
/*
* Abort all requests on the given list ( pending or processing )
*
2006-04-11 09:54:55 +04:00
* This function releases and reacquires fc - > lock
2006-01-17 09:14:41 +03:00
*/
2005-09-10 00:10:27 +04:00
static void end_requests ( struct fuse_conn * fc , struct list_head * head )
{
while ( ! list_empty ( head ) ) {
struct fuse_req * req ;
req = list_entry ( head - > next , struct fuse_req , list ) ;
req - > out . h . error = - ECONNABORTED ;
request_end ( fc , req ) ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2005-09-10 00:10:27 +04:00
}
}
2006-01-17 09:14:41 +03:00
/*
* Abort requests under I / O
*
2006-06-25 16:48:53 +04:00
* The requests are set to aborted and finished , and the request
2006-01-17 09:14:41 +03:00
* waiter is woken up . This will make request_wait_answer ( ) wait
* until the request is unlocked and then return .
2006-01-17 09:14:42 +03:00
*
* If the request is asynchronous , then the end function needs to be
* called after waiting for the request to be unlocked ( if it was
* locked ) .
2006-01-17 09:14:41 +03:00
*/
static void end_io_requests ( struct fuse_conn * fc )
{
while ( ! list_empty ( & fc - > io ) ) {
2006-01-17 09:14:42 +03:00
struct fuse_req * req =
list_entry ( fc - > io . next , struct fuse_req , list ) ;
void ( * end ) ( struct fuse_conn * , struct fuse_req * ) = req - > end ;
2006-06-25 16:48:53 +04:00
req - > aborted = 1 ;
2006-01-17 09:14:41 +03:00
req - > out . h . error = - ECONNABORTED ;
req - > state = FUSE_REQ_FINISHED ;
list_del_init ( & req - > list ) ;
wake_up ( & req - > waitq ) ;
2006-01-17 09:14:42 +03:00
if ( end ) {
req - > end = NULL ;
/* The end function will consume this reference */
__fuse_get_request ( req ) ;
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2006-01-17 09:14:42 +03:00
wait_event ( req - > waitq , ! req - > locked ) ;
end ( fc , req ) ;
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2006-01-17 09:14:42 +03:00
}
2006-01-17 09:14:41 +03:00
}
}
/*
* Abort all requests .
*
* Emergency exit in case of a malicious or accidental deadlock , or
* just a hung filesystem .
*
* The same effect is usually achievable through killing the
* filesystem daemon and all users of the filesystem . The exception
* is the combination of an asynchronous request and the tricky
* deadlock ( see Documentation / filesystems / fuse . txt ) .
*
* During the aborting , progression of requests from the pending and
* processing lists onto the io list , and progression of new requests
* onto the pending list is prevented by req - > connected being false .
*
* Progression of requests under I / O to the processing list is
2006-06-25 16:48:53 +04:00
* prevented by the req - > aborted flag being true for these requests .
* For this reason requests on the io list must be aborted first .
2006-01-17 09:14:41 +03:00
*/
void fuse_abort_conn ( struct fuse_conn * fc )
{
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2006-01-17 09:14:41 +03:00
if ( fc - > connected ) {
fc - > connected = 0 ;
2006-06-25 16:48:50 +04:00
fc - > blocked = 0 ;
2006-01-17 09:14:41 +03:00
end_io_requests ( fc ) ;
end_requests ( fc , & fc - > pending ) ;
end_requests ( fc , & fc - > processing ) ;
wake_up_all ( & fc - > waitq ) ;
2006-06-25 16:48:50 +04:00
wake_up_all ( & fc - > blocked_waitq ) ;
2006-04-11 09:54:52 +04:00
kill_fasync ( & fc - > fasync , SIGIO , POLL_IN ) ;
2006-01-17 09:14:41 +03:00
}
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2006-01-17 09:14:41 +03:00
}
2005-09-10 00:10:27 +04:00
static int fuse_dev_release ( struct inode * inode , struct file * file )
{
2006-04-11 09:54:55 +04:00
struct fuse_conn * fc = fuse_get_conn ( file ) ;
2005-09-10 00:10:27 +04:00
if ( fc ) {
2006-04-11 09:54:55 +04:00
spin_lock ( & fc - > lock ) ;
2005-09-10 00:10:31 +04:00
fc - > connected = 0 ;
2005-09-10 00:10:27 +04:00
end_requests ( fc , & fc - > pending ) ;
end_requests ( fc , & fc - > processing ) ;
2006-04-11 09:54:55 +04:00
spin_unlock ( & fc - > lock ) ;
2006-04-11 09:54:52 +04:00
fasync_helper ( - 1 , file , 0 , & fc - > fasync ) ;
2006-06-25 16:48:51 +04:00
fuse_conn_put ( fc ) ;
2006-04-11 09:54:52 +04:00
}
2006-01-17 09:14:35 +03:00
2005-09-10 00:10:27 +04:00
return 0 ;
}
2006-04-11 09:54:52 +04:00
static int fuse_dev_fasync ( int fd , struct file * file , int on )
{
struct fuse_conn * fc = fuse_get_conn ( file ) ;
if ( ! fc )
2006-04-11 09:54:56 +04:00
return - EPERM ;
2006-04-11 09:54:52 +04:00
/* No locking - fasync_helper does its own locking */
return fasync_helper ( fd , file , on , & fc - > fasync ) ;
}
2006-03-28 13:56:42 +04:00
const struct file_operations fuse_dev_operations = {
2005-09-10 00:10:27 +04:00
. owner = THIS_MODULE ,
. llseek = no_llseek ,
2006-10-01 10:28:47 +04:00
. read = do_sync_read ,
. aio_read = fuse_dev_read ,
. write = do_sync_write ,
. aio_write = fuse_dev_write ,
2005-09-10 00:10:27 +04:00
. poll = fuse_dev_poll ,
. release = fuse_dev_release ,
2006-04-11 09:54:52 +04:00
. fasync = fuse_dev_fasync ,
2005-09-10 00:10:27 +04:00
} ;
static struct miscdevice fuse_miscdevice = {
. minor = FUSE_MINOR ,
. name = " fuse " ,
. fops = & fuse_dev_operations ,
} ;
int __init fuse_dev_init ( void )
{
int err = - ENOMEM ;
fuse_req_cachep = kmem_cache_create ( " fuse_request " ,
sizeof ( struct fuse_req ) ,
0 , 0 , NULL , NULL ) ;
if ( ! fuse_req_cachep )
goto out ;
err = misc_register ( & fuse_miscdevice ) ;
if ( err )
goto out_cache_clean ;
return 0 ;
out_cache_clean :
kmem_cache_destroy ( fuse_req_cachep ) ;
out :
return err ;
}
void fuse_dev_cleanup ( void )
{
misc_deregister ( & fuse_miscdevice ) ;
kmem_cache_destroy ( fuse_req_cachep ) ;
}