2007-04-27 02:49:28 +04:00
/* internal AFS stuff
2005-04-17 02:20:36 +04:00
*
2007-04-27 02:55:03 +04:00
* Copyright ( C ) 2002 , 2007 Red Hat , Inc . All Rights Reserved .
2005-04-17 02:20:36 +04:00
* Written by David Howells ( dhowells @ redhat . com )
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/compiler.h>
# include <linux/kernel.h>
# include <linux/fs.h>
# include <linux/pagemap.h>
2007-04-27 02:55:03 +04:00
# include <linux/rxrpc.h>
2007-04-27 02:57:07 +04:00
# include <linux/key.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/workqueue.h>
2007-05-31 11:40:52 +04:00
# include <linux/sched.h>
2009-10-02 02:44:27 +04:00
# include <linux/fscache.h>
2010-04-22 13:58:18 +04:00
# include <linux/backing-dev.h>
2017-02-10 19:34:07 +03:00
# include <linux/uuid.h>
2016-08-30 11:49:29 +03:00
# include <net/af_rxrpc.h>
2007-05-31 11:40:52 +04:00
2007-04-27 02:55:03 +04:00
# include "afs.h"
# include "afs_vl.h"
# define AFS_CELL_MAX_ADDRS 15
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
struct pagevec ;
2007-04-27 02:55:03 +04:00
struct afs_call ;
typedef enum {
AFS_VL_NEW , /* new, uninitialised record */
AFS_VL_CREATING , /* creating record */
AFS_VL_VALID , /* record is pending */
AFS_VL_NO_VOLUME , /* no such volume available */
AFS_VL_UPDATING , /* update in progress */
AFS_VL_VOLUME_DELETED , /* volume was deleted */
AFS_VL_UNCERTAIN , /* uncertain state (update failed) */
} __attribute__ ( ( packed ) ) afs_vlocation_state_t ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:57:07 +04:00
struct afs_mount_params {
bool rwpath ; /* T if the parent should be considered R/W */
bool force ; /* T to force cell type */
2010-08-11 12:38:04 +04:00
bool autocell ; /* T if set auto mount operation */
2007-04-27 02:57:07 +04:00
afs_voltype_t type ; /* type of volume requested */
int volnamesz ; /* size of volume name */
const char * volname ; /* name of volume to mount */
struct afs_cell * cell ; /* cell in which to find volume */
struct afs_volume * volume ; /* volume record */
struct key * key ; /* key to use for secure mounting */
} ;
2017-01-05 13:38:34 +03:00
enum afs_call_state {
AFS_CALL_REQUESTING , /* request is being sent for outgoing call */
AFS_CALL_AWAIT_REPLY , /* awaiting reply to outgoing call */
AFS_CALL_AWAIT_OP_ID , /* awaiting op ID on incoming call */
AFS_CALL_AWAIT_REQUEST , /* awaiting request data on incoming call */
AFS_CALL_REPLYING , /* replying to incoming call */
AFS_CALL_AWAIT_ACK , /* awaiting final ACK of incoming call */
AFS_CALL_COMPLETE , /* Completed or failed */
} ;
2007-04-27 02:55:03 +04:00
/*
* a record of an in - progress RxRPC call
*/
struct afs_call {
const struct afs_call_type * type ; /* type of call */
wait_queue_head_t waitq ; /* processes awaiting completion */
2017-01-05 13:38:36 +03:00
struct work_struct async_work ; /* async I/O processor */
2007-04-27 02:55:03 +04:00
struct work_struct work ; /* actual work processor */
struct rxrpc_call * rxcall ; /* RxRPC call handle */
struct key * key ; /* security for this call */
struct afs_server * server ; /* server affected by incoming CM call */
void * request ; /* request data (first part) */
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
struct address_space * mapping ; /* page set */
struct afs_writeback * wb ; /* writeback being performed */
2007-04-27 02:55:03 +04:00
void * buffer ; /* reply receive buffer */
void * reply ; /* reply buffer (first part) */
void * reply2 ; /* reply buffer (second part) */
void * reply3 ; /* reply buffer (third part) */
2007-04-27 02:59:35 +04:00
void * reply4 ; /* reply buffer (fourth part) */
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
pgoff_t first ; /* first page in mapping to deal with */
pgoff_t last ; /* last page in mapping to deal with */
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 22:42:14 +03:00
size_t offset ; /* offset into received data store */
2017-01-05 13:38:36 +03:00
atomic_t usage ;
2017-01-05 13:38:34 +03:00
enum afs_call_state state ;
2007-04-27 02:55:03 +04:00
int error ; /* error code */
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 22:42:14 +03:00
u32 abort_code ; /* Remote abort ID or 0 */
2007-04-27 02:55:03 +04:00
unsigned request_size ; /* size of request data */
unsigned reply_max ; /* maximum size of reply */
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
unsigned first_offset ; /* offset into mapping[first] */
unsigned last_to ; /* amount of mapping[last] */
2007-04-27 02:55:03 +04:00
unsigned char unmarshall ; /* unmarshalling phase */
bool incoming ; /* T if incoming call */
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
bool send_pages ; /* T if data from mapping should be sent */
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 22:42:14 +03:00
bool need_attention ; /* T if RxRPC poked us */
2017-01-05 13:38:36 +03:00
bool async ; /* T if asynchronous */
2007-04-27 02:55:03 +04:00
u16 service_id ; /* RxRPC service ID to call */
__be16 port ; /* target UDP port */
2016-10-13 10:27:10 +03:00
u32 operation_ID ; /* operation ID for an incoming call */
2007-04-27 02:55:03 +04:00
u32 count ; /* count for use in unmarshalling */
__be32 tmp ; /* place to extract temporary data */
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
afs_dataversion_t store_version ; /* updated version expected from store */
2007-04-27 02:55:03 +04:00
} ;
struct afs_call_type {
2007-04-27 02:57:07 +04:00
const char * name ;
2007-04-27 02:55:03 +04:00
/* deliver request or reply data to an call
* - returning an error will cause the call to be aborted
*/
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 22:42:14 +03:00
int ( * deliver ) ( struct afs_call * call ) ;
2007-04-27 02:55:03 +04:00
/* map an abort code to an error number */
int ( * abort_to_error ) ( u32 abort_code ) ;
/* clean up a call */
void ( * destructor ) ( struct afs_call * call ) ;
2017-01-05 13:38:36 +03:00
/* Work function */
void ( * work ) ( struct work_struct * work ) ;
2007-04-27 02:55:03 +04:00
} ;
2017-01-05 13:38:34 +03:00
/*
* Record of an outstanding read operation on a vnode .
*/
struct afs_read {
loff_t pos ; /* Where to start reading */
loff_t len ; /* How much to read */
loff_t actual_len ; /* How much we're actually getting */
atomic_t usage ;
unsigned int remain ; /* Amount remaining */
unsigned int index ; /* Which page we're reading into */
unsigned int pg_offset ; /* Offset in page we're at */
unsigned int nr_pages ;
void ( * page_done ) ( struct afs_call * , struct afs_read * ) ;
struct page * pages [ ] ;
} ;
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
/*
* record of an outstanding writeback on a vnode
*/
struct afs_writeback {
struct list_head link ; /* link in vnode->writebacks */
struct work_struct writer ; /* work item to perform the writeback */
struct afs_vnode * vnode ; /* vnode to which this write applies */
struct key * key ; /* owner of this write */
wait_queue_head_t waitq ; /* completion and ready wait queue */
pgoff_t first ; /* first page in batch */
pgoff_t point ; /* last page in current store op */
pgoff_t last ; /* last page in batch (inclusive) */
unsigned offset_first ; /* offset into first page of start of write */
unsigned to_last ; /* offset into last page of end of write */
int num_conflicts ; /* count of conflicting writes in list */
int usage ;
bool conflicts ; /* T if has dependent conflicts */
enum {
AFS_WBACK_SYNCING , /* synchronisation being performed */
AFS_WBACK_PENDING , /* write pending */
AFS_WBACK_CONFLICTING , /* conflicting writes posted */
AFS_WBACK_WRITING , /* writing back */
AFS_WBACK_COMPLETE /* the writeback record has been unlinked */
} state __attribute__ ( ( packed ) ) ;
} ;
2007-04-27 02:55:03 +04:00
/*
* AFS superblock private data
* - there ' s one superblock per volume
*/
struct afs_super_info {
struct afs_volume * volume ; /* volume record */
char rwparent ; /* T if parent is R/W AFS volume */
} ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
static inline struct afs_super_info * AFS_FS_S ( struct super_block * sb )
{
return sb - > s_fs_info ;
2005-04-17 02:20:36 +04:00
}
2007-04-27 02:55:03 +04:00
extern struct file_system_type afs_fs_type ;
/*
* entry in the cached cell catalogue
*/
struct afs_cache_cell {
2007-04-27 02:57:07 +04:00
char name [ AFS_MAXCELLNAME ] ; /* cell name (padded with NULs) */
struct in_addr vl_servers [ 15 ] ; /* cached cell VL servers */
2007-04-27 02:55:03 +04:00
} ;
/*
* AFS cell record
*/
struct afs_cell {
atomic_t usage ;
struct list_head link ; /* main cell list link */
2007-04-27 02:57:07 +04:00
struct key * anonymous_key ; /* anonymous user key for this cell */
2007-04-27 02:55:03 +04:00
struct list_head proc_link ; /* /proc cell list link */
2009-04-03 19:42:41 +04:00
# ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie * cache ; /* caching cookie */
2007-04-27 02:55:03 +04:00
# endif
/* server record management */
rwlock_t servers_lock ; /* active server list lock */
struct list_head servers ; /* active server list */
/* volume location record management */
struct rw_semaphore vl_sem ; /* volume management serialisation semaphore */
struct list_head vl_list ; /* cell's active VL record list */
spinlock_t vl_lock ; /* vl_list lock */
unsigned short vl_naddrs ; /* number of VL servers in addr list */
unsigned short vl_curr_svix ; /* current server index */
struct in_addr vl_addrs [ AFS_CELL_MAX_ADDRS ] ; /* cell VL server addresses */
char name [ 0 ] ; /* cell name - must go last */
} ;
/*
* entry in the cached volume location catalogue
*/
struct afs_cache_vlocation {
2007-04-27 02:57:07 +04:00
/* volume name (lowercase, padded with NULs) */
uint8_t name [ AFS_MAXVOLNAME + 1 ] ;
2007-04-27 02:55:03 +04:00
uint8_t nservers ; /* number of entries used in servers[] */
uint8_t vidmask ; /* voltype mask for vid[] */
uint8_t srvtmask [ 8 ] ; /* voltype masks for servers[] */
# define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */
# define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */
# define AFS_VOL_VTM_BAK 0x04 /* backup version of the volume is available (on this server) */
afs_volid_t vid [ 3 ] ; /* volume IDs for R/W, R/O and Bak volumes */
struct in_addr servers [ 8 ] ; /* fileserver addresses */
time_t rtime ; /* last retrieval time */
} ;
/*
* volume - > vnode hash table entry
*/
struct afs_cache_vhash {
afs_voltype_t vtype ; /* which volume variation */
uint8_t hash_bucket ; /* which hash bucket this represents */
} __attribute__ ( ( packed ) ) ;
/*
* AFS volume location record
*/
struct afs_vlocation {
atomic_t usage ;
time_t time_of_death ; /* time at which put reduced usage to 0 */
struct list_head link ; /* link in cell volume location list */
struct list_head grave ; /* link in master graveyard list */
struct list_head update ; /* link in master update list */
struct afs_cell * cell ; /* cell to which volume belongs */
2009-04-03 19:42:41 +04:00
# ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie * cache ; /* caching cookie */
2007-04-27 02:55:03 +04:00
# endif
struct afs_cache_vlocation vldb ; /* volume information DB record */
struct afs_volume * vols [ 3 ] ; /* volume access record pointer (index by type) */
wait_queue_head_t waitq ; /* status change waitqueue */
time_t update_at ; /* time at which record should be updated */
2007-04-27 07:39:14 +04:00
spinlock_t lock ; /* access lock */
2007-04-27 02:55:03 +04:00
afs_vlocation_state_t state ; /* volume location state */
unsigned short upd_rej_cnt ; /* ENOMEDIUM count during update */
unsigned short upd_busy_cnt ; /* EBUSY count during update */
bool valid ; /* T if valid */
} ;
/*
* AFS fileserver record
*/
struct afs_server {
atomic_t usage ;
time_t time_of_death ; /* time at which put reduced usage to 0 */
struct in_addr addr ; /* server address */
struct afs_cell * cell ; /* cell in which server resides */
struct list_head link ; /* link in cell's server list */
struct list_head grave ; /* link in master graveyard list */
struct rb_node master_rb ; /* link in master by-addr tree */
struct rw_semaphore sem ; /* access lock */
/* file service access */
struct rb_root fs_vnodes ; /* vnodes backed by this server (ordered by FID) */
unsigned long fs_act_jif ; /* time at which last activity occurred */
unsigned long fs_dead_jif ; /* time at which no longer to be considered dead */
spinlock_t fs_lock ; /* access lock */
int fs_state ; /* 0 or reason FS currently marked dead (-errno) */
/* callback promise management */
struct rb_root cb_promises ; /* vnode expiration list (ordered earliest first) */
struct delayed_work cb_updater ; /* callback updater */
struct delayed_work cb_break_work ; /* collected break dispatcher */
wait_queue_head_t cb_break_waitq ; /* space available in cb_break waitqueue */
spinlock_t cb_lock ; /* access lock */
struct afs_callback cb_break [ 64 ] ; /* ring of callbacks awaiting breaking */
atomic_t cb_break_n ; /* number of pending breaks */
u8 cb_break_head ; /* head of callback breaking ring */
u8 cb_break_tail ; /* tail of callback breaking ring */
} ;
/*
* AFS volume access record
*/
struct afs_volume {
atomic_t usage ;
struct afs_cell * cell ; /* cell to which belongs (unrefd ptr) */
struct afs_vlocation * vlocation ; /* volume location */
2009-04-03 19:42:41 +04:00
# ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie * cache ; /* caching cookie */
2007-04-27 02:55:03 +04:00
# endif
afs_volid_t vid ; /* volume ID */
afs_voltype_t type ; /* type of volume */
char type_force ; /* force volume type (suppress R/O -> R/W) */
unsigned short nservers ; /* number of server slots filled */
unsigned short rjservers ; /* number of servers discarded due to -ENOMEDIUM */
struct afs_server * servers [ 8 ] ; /* servers on which volume resides (ordered) */
struct rw_semaphore server_sem ; /* lock for accessing current server */
2010-04-22 13:58:18 +04:00
struct backing_dev_info bdi ;
2007-04-27 02:55:03 +04:00
} ;
/*
* vnode catalogue entry
*/
struct afs_cache_vnode {
afs_vnodeid_t vnode_id ; /* vnode ID */
unsigned vnode_unique ; /* vnode ID uniquifier */
afs_dataversion_t data_version ; /* data version */
} ;
/*
* AFS inode private data
*/
struct afs_vnode {
struct inode vfs_inode ; /* the VFS's inode record */
struct afs_volume * volume ; /* volume on which vnode resides */
struct afs_server * server ; /* server currently supplying this file */
struct afs_fid fid ; /* the file identifier for this inode */
struct afs_file_status status ; /* AFS status info for this file */
2009-04-03 19:42:41 +04:00
# ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie * cache ; /* caching cookie */
2007-04-27 02:55:03 +04:00
# endif
2007-04-27 02:57:07 +04:00
struct afs_permits * permits ; /* cache of permits so far obtained */
struct mutex permits_lock ; /* lock for altering permits list */
2007-04-27 02:59:35 +04:00
struct mutex validate_lock ; /* lock for validating this vnode */
2007-04-27 02:55:03 +04:00
wait_queue_head_t update_waitq ; /* status fetch waitqueue */
2007-04-27 02:59:35 +04:00
int update_cnt ; /* number of outstanding ops that will update the
2007-04-27 02:55:03 +04:00
* status */
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
spinlock_t writeback_lock ; /* lock for writebacks */
2007-04-27 02:55:03 +04:00
spinlock_t lock ; /* waitqueue/flags lock */
unsigned long flags ;
# define AFS_VNODE_CB_BROKEN 0 /* set if vnode's callback was broken */
2007-04-27 02:59:35 +04:00
# define AFS_VNODE_UNSET 1 /* set if vnode attributes not yet set */
2007-04-27 02:55:03 +04:00
# define AFS_VNODE_MODIFIED 2 /* set if vnode's data modified */
# define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
# define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */
# define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */
2007-07-16 10:40:12 +04:00
# define AFS_VNODE_LOCKING 6 /* set if waiting for lock on vnode */
# define AFS_VNODE_READLOCKED 7 /* set if vnode is read-locked on the server */
# define AFS_VNODE_WRITELOCKED 8 /* set if vnode is write-locked on the server */
# define AFS_VNODE_UNLOCKING 9 /* set if vnode is being unlocked on the server */
2010-08-11 12:38:04 +04:00
# define AFS_VNODE_AUTOCELL 10 /* set if Vnode is an auto mount point */
# define AFS_VNODE_PSEUDODIR 11 /* set if Vnode is a pseudo directory */
2007-04-27 02:55:03 +04:00
2007-04-27 02:57:07 +04:00
long acl_order ; /* ACL check count (callback break count) */
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
struct list_head writebacks ; /* alterations in pagecache that need writing */
2007-07-16 10:40:12 +04:00
struct list_head pending_locks ; /* locks waiting to be granted */
struct list_head granted_locks ; /* locks granted on this file */
struct delayed_work lock_work ; /* work to be done in locking */
struct key * unlock_key ; /* key to be used in unlocking */
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
2007-04-27 02:55:03 +04:00
/* outstanding callback notification on this file */
struct rb_node server_rb ; /* link in server->fs_vnodes */
struct rb_node cb_promise ; /* link in server->cb_promises */
struct work_struct cb_broken_work ; /* work to be done on callback break */
time_t cb_expires ; /* time at which callback expires */
time_t cb_expires_at ; /* time used to order cb_promise */
unsigned cb_version ; /* callback version */
unsigned cb_expiry ; /* callback expiry time */
afs_callback_type_t cb_type ; /* type of callback */
bool cb_promised ; /* true if promise still holds */
} ;
2007-04-27 02:57:07 +04:00
/*
* cached security record for one user ' s attempt to access a vnode
*/
struct afs_permit {
struct key * key ; /* RxRPC ticket holding a security context */
afs_access_t access_mask ; /* access mask for this key */
} ;
/*
* cache of security records from attempts to access a vnode
*/
struct afs_permits {
struct rcu_head rcu ; /* disposal procedure */
int count ; /* number of records */
struct afs_permit permits [ 0 ] ; /* the permits so far examined */
} ;
2007-04-27 02:58:17 +04:00
/*
* record of one of a system ' s set of network interfaces
*/
struct afs_interface {
struct in_addr address ; /* IPv4 address bound to interface */
struct in_addr netmask ; /* netmask applied to address */
unsigned mtu ; /* MTU of interface */
} ;
2007-04-27 02:55:03 +04:00
/*****************************************************************************/
2009-04-03 19:42:41 +04:00
/*
* cache . c
*/
# ifdef CONFIG_AFS_FSCACHE
extern struct fscache_netfs afs_cache_netfs ;
extern struct fscache_cookie_def afs_cell_cache_index_def ;
extern struct fscache_cookie_def afs_vlocation_cache_index_def ;
extern struct fscache_cookie_def afs_volume_cache_index_def ;
extern struct fscache_cookie_def afs_vnode_cache_index_def ;
# else
# define afs_cell_cache_index_def (*(struct fscache_cookie_def *) NULL)
# define afs_vlocation_cache_index_def (*(struct fscache_cookie_def *) NULL)
# define afs_volume_cache_index_def (*(struct fscache_cookie_def *) NULL)
# define afs_vnode_cache_index_def (*(struct fscache_cookie_def *) NULL)
# endif
2007-04-27 02:55:03 +04:00
/*
* callback . c
*/
extern void afs_init_callback_state ( struct afs_server * ) ;
extern void afs_broken_callback_work ( struct work_struct * ) ;
extern void afs_break_callbacks ( struct afs_server * , size_t ,
struct afs_callback [ ] ) ;
2007-04-27 02:59:35 +04:00
extern void afs_discard_callback_on_delete ( struct afs_vnode * ) ;
2007-04-27 02:55:03 +04:00
extern void afs_give_up_callback ( struct afs_vnode * ) ;
extern void afs_dispatch_give_up_callbacks ( struct work_struct * ) ;
extern void afs_flush_callback_breaks ( struct afs_server * ) ;
extern int __init afs_callback_update_init ( void ) ;
2007-05-03 14:12:46 +04:00
extern void afs_callback_update_kill ( void ) ;
2007-04-27 02:55:03 +04:00
2005-04-17 02:20:36 +04:00
/*
* cell . c
*/
extern struct rw_semaphore afs_proc_cells_sem ;
extern struct list_head afs_proc_cells ;
2007-04-27 02:55:03 +04:00
# define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
extern int afs_cell_init ( char * ) ;
2010-08-11 12:38:04 +04:00
extern struct afs_cell * afs_cell_create ( const char * , unsigned , char * , bool ) ;
extern struct afs_cell * afs_cell_lookup ( const char * , unsigned , bool ) ;
2007-04-27 02:55:03 +04:00
extern struct afs_cell * afs_grab_cell ( struct afs_cell * ) ;
extern void afs_put_cell ( struct afs_cell * ) ;
extern void afs_cell_purge ( void ) ;
/*
* cmservice . c
*/
extern bool afs_cm_incoming_call ( struct afs_call * ) ;
2005-04-17 02:20:36 +04:00
/*
* dir . c
*/
2007-02-12 11:55:38 +03:00
extern const struct inode_operations afs_dir_inode_operations ;
2011-01-13 04:04:20 +03:00
extern const struct dentry_operations afs_fs_dentry_operations ;
2006-03-28 13:56:42 +04:00
extern const struct file_operations afs_dir_file_operations ;
2005-04-17 02:20:36 +04:00
/*
* file . c
*/
2006-06-28 15:26:44 +04:00
extern const struct address_space_operations afs_fs_aops ;
2007-02-12 11:55:38 +03:00
extern const struct inode_operations afs_file_inode_operations ;
2007-04-27 02:57:07 +04:00
extern const struct file_operations afs_file_operations ;
extern int afs_open ( struct inode * , struct file * ) ;
extern int afs_release ( struct inode * , struct file * ) ;
2010-05-21 18:27:09 +04:00
extern int afs_page_filler ( void * , struct page * ) ;
2017-01-05 13:38:34 +03:00
extern void afs_put_read ( struct afs_read * ) ;
2005-04-17 02:20:36 +04:00
2007-07-16 10:40:12 +04:00
/*
* flock . c
*/
extern void __exit afs_kill_lock_manager ( void ) ;
extern void afs_lock_work ( struct work_struct * ) ;
extern void afs_lock_may_be_available ( struct afs_vnode * ) ;
extern int afs_lock ( struct file * , int , struct file_lock * ) ;
extern int afs_flock ( struct file * , int , struct file_lock * ) ;
2007-04-27 02:55:03 +04:00
/*
* fsclient . c
*/
2007-04-27 02:57:07 +04:00
extern int afs_fs_fetch_file_status ( struct afs_server * , struct key * ,
struct afs_vnode * , struct afs_volsync * ,
2017-01-05 13:38:36 +03:00
bool ) ;
extern int afs_fs_give_up_callbacks ( struct afs_server * , bool ) ;
2007-04-27 02:57:07 +04:00
extern int afs_fs_fetch_data ( struct afs_server * , struct key * ,
2017-01-05 13:38:36 +03:00
struct afs_vnode * , struct afs_read * , bool ) ;
2007-04-27 02:59:35 +04:00
extern int afs_fs_create ( struct afs_server * , struct key * ,
struct afs_vnode * , const char * , umode_t ,
struct afs_fid * , struct afs_file_status * ,
2017-01-05 13:38:36 +03:00
struct afs_callback * , bool ) ;
2007-04-27 02:59:35 +04:00
extern int afs_fs_remove ( struct afs_server * , struct key * ,
2017-01-05 13:38:36 +03:00
struct afs_vnode * , const char * , bool , bool ) ;
2007-04-27 02:59:35 +04:00
extern int afs_fs_link ( struct afs_server * , struct key * , struct afs_vnode * ,
2017-01-05 13:38:36 +03:00
struct afs_vnode * , const char * , bool ) ;
2007-04-27 02:59:35 +04:00
extern int afs_fs_symlink ( struct afs_server * , struct key * ,
struct afs_vnode * , const char * , const char * ,
2017-01-05 13:38:36 +03:00
struct afs_fid * , struct afs_file_status * , bool ) ;
2007-04-27 02:59:35 +04:00
extern int afs_fs_rename ( struct afs_server * , struct key * ,
struct afs_vnode * , const char * ,
2017-01-05 13:38:36 +03:00
struct afs_vnode * , const char * , bool ) ;
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
extern int afs_fs_store_data ( struct afs_server * , struct afs_writeback * ,
2017-01-05 13:38:36 +03:00
pgoff_t , pgoff_t , unsigned , unsigned , bool ) ;
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
extern int afs_fs_setattr ( struct afs_server * , struct key * ,
2017-01-05 13:38:36 +03:00
struct afs_vnode * , struct iattr * , bool ) ;
2007-05-11 09:22:20 +04:00
extern int afs_fs_get_volume_status ( struct afs_server * , struct key * ,
struct afs_vnode * ,
2017-01-05 13:38:36 +03:00
struct afs_volume_status * , bool ) ;
2007-07-16 10:40:12 +04:00
extern int afs_fs_set_lock ( struct afs_server * , struct key * ,
2017-01-05 13:38:36 +03:00
struct afs_vnode * , afs_lock_type_t , bool ) ;
2007-07-16 10:40:12 +04:00
extern int afs_fs_extend_lock ( struct afs_server * , struct key * ,
2017-01-05 13:38:36 +03:00
struct afs_vnode * , bool ) ;
2007-07-16 10:40:12 +04:00
extern int afs_fs_release_lock ( struct afs_server * , struct key * ,
2017-01-05 13:38:36 +03:00
struct afs_vnode * , bool ) ;
2007-04-27 02:55:03 +04:00
2005-04-17 02:20:36 +04:00
/*
* inode . c
*/
2010-08-11 12:38:04 +04:00
extern struct inode * afs_iget_autocell ( struct inode * , const char * , int ,
struct key * ) ;
2007-04-27 02:57:07 +04:00
extern struct inode * afs_iget ( struct super_block * , struct key * ,
2007-04-27 02:59:35 +04:00
struct afs_fid * , struct afs_file_status * ,
struct afs_callback * ) ;
2007-05-09 13:33:45 +04:00
extern void afs_zap_data ( struct afs_vnode * ) ;
2007-04-27 02:59:35 +04:00
extern int afs_validate ( struct afs_vnode * , struct key * ) ;
2007-05-09 13:33:45 +04:00
extern int afs_getattr ( struct vfsmount * , struct dentry * , struct kstat * ) ;
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
extern int afs_setattr ( struct dentry * , struct iattr * ) ;
2010-06-07 22:34:48 +04:00
extern void afs_evict_inode ( struct inode * ) ;
2010-08-11 12:38:04 +04:00
extern int afs_drop_inode ( struct inode * ) ;
2005-04-17 02:20:36 +04:00
/*
* main . c
*/
2011-01-14 18:56:37 +03:00
extern struct workqueue_struct * afs_wq ;
2017-02-10 19:34:07 +03:00
extern struct uuid_v1 afs_uuid ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
/*
* misc . c
*/
extern int afs_abort_to_error ( u32 ) ;
2005-04-17 02:20:36 +04:00
/*
* mntpt . c
*/
2007-02-12 11:55:38 +03:00
extern const struct inode_operations afs_mntpt_inode_operations ;
2010-08-11 12:38:04 +04:00
extern const struct inode_operations afs_autocell_inode_operations ;
2006-03-28 13:56:42 +04:00
extern const struct file_operations afs_mntpt_file_operations ;
2005-04-17 02:20:36 +04:00
2011-01-14 22:04:05 +03:00
extern struct vfsmount * afs_d_automount ( struct path * ) ;
2007-04-27 02:57:07 +04:00
extern int afs_mntpt_check_symlink ( struct afs_vnode * , struct key * ) ;
2007-04-27 02:55:03 +04:00
extern void afs_mntpt_kill_timer ( void ) ;
2005-04-17 02:20:36 +04:00
2017-02-10 19:34:07 +03:00
/*
* netdevices . c
*/
extern int afs_get_ipv4_interfaces ( struct afs_interface * , size_t , bool ) ;
2005-04-17 02:20:36 +04:00
/*
* proc . c
*/
extern int afs_proc_init ( void ) ;
extern void afs_proc_cleanup ( void ) ;
2007-04-27 02:49:28 +04:00
extern int afs_proc_cell_setup ( struct afs_cell * ) ;
extern void afs_proc_cell_remove ( struct afs_cell * ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
/*
* rxrpc . c
*/
2016-08-30 11:49:29 +03:00
extern struct socket * afs_socket ;
2017-01-05 13:38:36 +03:00
extern atomic_t afs_outstanding_calls ;
2016-08-30 11:49:29 +03:00
2007-04-27 02:55:03 +04:00
extern int afs_open_socket ( void ) ;
extern void afs_close_socket ( void ) ;
2017-01-05 13:38:36 +03:00
extern void afs_put_call ( struct afs_call * ) ;
extern int afs_queue_call_work ( struct afs_call * ) ;
2017-01-05 13:38:36 +03:00
extern int afs_make_call ( struct in_addr * , struct afs_call * , gfp_t , bool ) ;
2007-04-27 02:55:03 +04:00
extern struct afs_call * afs_alloc_flat_call ( const struct afs_call_type * ,
size_t , size_t ) ;
extern void afs_flat_call_destructor ( struct afs_call * ) ;
extern void afs_send_empty_reply ( struct afs_call * ) ;
2007-04-27 02:58:17 +04:00
extern void afs_send_simple_reply ( struct afs_call * , const void * , size_t ) ;
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 22:42:14 +03:00
extern int afs_extract_data ( struct afs_call * , void * , size_t , bool ) ;
2007-04-27 02:55:03 +04:00
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 22:42:14 +03:00
static inline int afs_transfer_reply ( struct afs_call * call )
rxrpc: Fix races between skb free, ACK generation and replying
Inside the kafs filesystem it is possible to occasionally have a call
processed and terminated before we've had a chance to check whether we need
to clean up the rx queue for that call because afs_send_simple_reply() ends
the call when it is done, but this is done in a workqueue item that might
happen to run to completion before afs_deliver_to_call() completes.
Further, it is possible for rxrpc_kernel_send_data() to be called to send a
reply before the last request-phase data skb is released. The rxrpc skb
destructor is where the ACK processing is done and the call state is
advanced upon release of the last skb. ACK generation is also deferred to
a work item because it's possible that the skb destructor is not called in
a context where kernel_sendmsg() can be invoked.
To this end, the following changes are made:
(1) kernel_rxrpc_data_consumed() is added. This should be called whenever
an skb is emptied so as to crank the ACK and call states. This does
not release the skb, however. kernel_rxrpc_free_skb() must now be
called to achieve that. These together replace
rxrpc_kernel_data_delivered().
(2) kernel_rxrpc_data_consumed() is wrapped by afs_data_consumed().
This makes afs_deliver_to_call() easier to work as the skb can simply
be discarded unconditionally here without trying to work out what the
return value of the ->deliver() function means.
The ->deliver() functions can, via afs_data_complete(),
afs_transfer_reply() and afs_extract_data() mark that an skb has been
consumed (thereby cranking the state) without the need to
conditionally free the skb to make sure the state is correct on an
incoming call for when the call processor tries to send the reply.
(3) rxrpc_recvmsg() now has to call kernel_rxrpc_data_consumed() when it
has finished with a packet and MSG_PEEK isn't set.
(4) rxrpc_packet_destructor() no longer calls rxrpc_hard_ACK_data().
Because of this, we no longer need to clear the destructor and put the
call before we free the skb in cases where we don't want the ACK/call
state to be cranked.
(5) The ->deliver() call-type callbacks are made to return -EAGAIN rather
than 0 if they expect more data (afs_extract_data() returns -EAGAIN to
the delivery function already), and the caller is now responsible for
producing an abort if that was the last packet.
(6) There are many bits of unmarshalling code where:
ret = afs_extract_data(call, skb, last, ...);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
is to be found. As -EAGAIN can now be passed back to the caller, we
now just return if ret < 0:
ret = afs_extract_data(call, skb, last, ...);
if (ret < 0)
return ret;
(7) Checks for trailing data and empty final data packets has been
consolidated as afs_data_complete(). So:
if (skb->len > 0)
return -EBADMSG;
if (!last)
return 0;
becomes:
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
(8) afs_transfer_reply() now checks the amount of data it has against the
amount of data desired and the amount of data in the skb and returns
an error to induce an abort if we don't get exactly what we want.
Without these changes, the following oops can occasionally be observed,
particularly if some printks are inserted into the delivery path:
general protection fault: 0000 [#1] SMP
Modules linked in: kafs(E) af_rxrpc(E) [last unloaded: af_rxrpc]
CPU: 0 PID: 1305 Comm: kworker/u8:3 Tainted: G E 4.7.0-fsdevel+ #1303
Hardware name: ASUS All Series/H97-PLUS, BIOS 2306 10/09/2014
Workqueue: kafsd afs_async_workfn [kafs]
task: ffff88040be041c0 ti: ffff88040c070000 task.ti: ffff88040c070000
RIP: 0010:[<ffffffff8108fd3c>] [<ffffffff8108fd3c>] __lock_acquire+0xcf/0x15a1
RSP: 0018:ffff88040c073bc0 EFLAGS: 00010002
RAX: 6b6b6b6b6b6b6b6b RBX: 0000000000000000 RCX: ffff88040d29a710
RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff88040d29a710
RBP: ffff88040c073c70 R08: 0000000000000001 R09: 0000000000000001
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: ffff88040be041c0 R15: ffffffff814c928f
FS: 0000000000000000(0000) GS:ffff88041fa00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fa4595f4750 CR3: 0000000001c14000 CR4: 00000000001406f0
Stack:
0000000000000006 000000000be04930 0000000000000000 ffff880400000000
ffff880400000000 ffffffff8108f847 ffff88040be041c0 ffffffff81050446
ffff8803fc08a920 ffff8803fc08a958 ffff88040be041c0 ffff88040c073c38
Call Trace:
[<ffffffff8108f847>] ? mark_held_locks+0x5e/0x74
[<ffffffff81050446>] ? __local_bh_enable_ip+0x9b/0xa1
[<ffffffff8108f9ca>] ? trace_hardirqs_on_caller+0x16d/0x189
[<ffffffff810915f4>] lock_acquire+0x122/0x1b6
[<ffffffff810915f4>] ? lock_acquire+0x122/0x1b6
[<ffffffff814c928f>] ? skb_dequeue+0x18/0x61
[<ffffffff81609dbf>] _raw_spin_lock_irqsave+0x35/0x49
[<ffffffff814c928f>] ? skb_dequeue+0x18/0x61
[<ffffffff814c928f>] skb_dequeue+0x18/0x61
[<ffffffffa009aa92>] afs_deliver_to_call+0x344/0x39d [kafs]
[<ffffffffa009ab37>] afs_process_async_call+0x4c/0xd5 [kafs]
[<ffffffffa0099e9c>] afs_async_workfn+0xe/0x10 [kafs]
[<ffffffff81063a3a>] process_one_work+0x29d/0x57c
[<ffffffff81064ac2>] worker_thread+0x24a/0x385
[<ffffffff81064878>] ? rescuer_thread+0x2d0/0x2d0
[<ffffffff810696f5>] kthread+0xf3/0xfb
[<ffffffff8160a6ff>] ret_from_fork+0x1f/0x40
[<ffffffff81069602>] ? kthread_create_on_node+0x1cf/0x1cf
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-03 16:11:40 +03:00
{
rxrpc: Don't expose skbs to in-kernel users [ver #2]
Don't expose skbs to in-kernel users, such as the AFS filesystem, but
instead provide a notification hook the indicates that a call needs
attention and another that indicates that there's a new call to be
collected.
This makes the following possibilities more achievable:
(1) Call refcounting can be made simpler if skbs don't hold refs to calls.
(2) skbs referring to non-data events will be able to be freed much sooner
rather than being queued for AFS to pick up as rxrpc_kernel_recv_data
will be able to consult the call state.
(3) We can shortcut the receive phase when a call is remotely aborted
because we don't have to go through all the packets to get to the one
cancelling the operation.
(4) It makes it easier to do encryption/decryption directly between AFS's
buffers and sk_buffs.
(5) Encryption/decryption can more easily be done in the AFS's thread
contexts - usually that of the userspace process that issued a syscall
- rather than in one of rxrpc's background threads on a workqueue.
(6) AFS will be able to wait synchronously on a call inside AF_RXRPC.
To make this work, the following interface function has been added:
int rxrpc_kernel_recv_data(
struct socket *sock, struct rxrpc_call *call,
void *buffer, size_t bufsize, size_t *_offset,
bool want_more, u32 *_abort_code);
This is the recvmsg equivalent. It allows the caller to find out about the
state of a specific call and to transfer received data into a buffer
piecemeal.
afs_extract_data() and rxrpc_kernel_recv_data() now do all the extraction
logic between them. They don't wait synchronously yet because the socket
lock needs to be dealt with.
Five interface functions have been removed:
rxrpc_kernel_is_data_last()
rxrpc_kernel_get_abort_code()
rxrpc_kernel_get_error_number()
rxrpc_kernel_free_skb()
rxrpc_kernel_data_consumed()
As a temporary hack, sk_buffs going to an in-kernel call are queued on the
rxrpc_call struct (->knlrecv_queue) rather than being handed over to the
in-kernel user. To process the queue internally, a temporary function,
temp_deliver_data() has been added. This will be replaced with common code
between the rxrpc_recvmsg() path and the kernel_rxrpc_recv_data() path in a
future patch.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-30 22:42:14 +03:00
return afs_extract_data ( call , call - > buffer , call - > reply_max , false ) ;
rxrpc: Fix races between skb free, ACK generation and replying
Inside the kafs filesystem it is possible to occasionally have a call
processed and terminated before we've had a chance to check whether we need
to clean up the rx queue for that call because afs_send_simple_reply() ends
the call when it is done, but this is done in a workqueue item that might
happen to run to completion before afs_deliver_to_call() completes.
Further, it is possible for rxrpc_kernel_send_data() to be called to send a
reply before the last request-phase data skb is released. The rxrpc skb
destructor is where the ACK processing is done and the call state is
advanced upon release of the last skb. ACK generation is also deferred to
a work item because it's possible that the skb destructor is not called in
a context where kernel_sendmsg() can be invoked.
To this end, the following changes are made:
(1) kernel_rxrpc_data_consumed() is added. This should be called whenever
an skb is emptied so as to crank the ACK and call states. This does
not release the skb, however. kernel_rxrpc_free_skb() must now be
called to achieve that. These together replace
rxrpc_kernel_data_delivered().
(2) kernel_rxrpc_data_consumed() is wrapped by afs_data_consumed().
This makes afs_deliver_to_call() easier to work as the skb can simply
be discarded unconditionally here without trying to work out what the
return value of the ->deliver() function means.
The ->deliver() functions can, via afs_data_complete(),
afs_transfer_reply() and afs_extract_data() mark that an skb has been
consumed (thereby cranking the state) without the need to
conditionally free the skb to make sure the state is correct on an
incoming call for when the call processor tries to send the reply.
(3) rxrpc_recvmsg() now has to call kernel_rxrpc_data_consumed() when it
has finished with a packet and MSG_PEEK isn't set.
(4) rxrpc_packet_destructor() no longer calls rxrpc_hard_ACK_data().
Because of this, we no longer need to clear the destructor and put the
call before we free the skb in cases where we don't want the ACK/call
state to be cranked.
(5) The ->deliver() call-type callbacks are made to return -EAGAIN rather
than 0 if they expect more data (afs_extract_data() returns -EAGAIN to
the delivery function already), and the caller is now responsible for
producing an abort if that was the last packet.
(6) There are many bits of unmarshalling code where:
ret = afs_extract_data(call, skb, last, ...);
switch (ret) {
case 0: break;
case -EAGAIN: return 0;
default: return ret;
}
is to be found. As -EAGAIN can now be passed back to the caller, we
now just return if ret < 0:
ret = afs_extract_data(call, skb, last, ...);
if (ret < 0)
return ret;
(7) Checks for trailing data and empty final data packets has been
consolidated as afs_data_complete(). So:
if (skb->len > 0)
return -EBADMSG;
if (!last)
return 0;
becomes:
ret = afs_data_complete(call, skb, last);
if (ret < 0)
return ret;
(8) afs_transfer_reply() now checks the amount of data it has against the
amount of data desired and the amount of data in the skb and returns
an error to induce an abort if we don't get exactly what we want.
Without these changes, the following oops can occasionally be observed,
particularly if some printks are inserted into the delivery path:
general protection fault: 0000 [#1] SMP
Modules linked in: kafs(E) af_rxrpc(E) [last unloaded: af_rxrpc]
CPU: 0 PID: 1305 Comm: kworker/u8:3 Tainted: G E 4.7.0-fsdevel+ #1303
Hardware name: ASUS All Series/H97-PLUS, BIOS 2306 10/09/2014
Workqueue: kafsd afs_async_workfn [kafs]
task: ffff88040be041c0 ti: ffff88040c070000 task.ti: ffff88040c070000
RIP: 0010:[<ffffffff8108fd3c>] [<ffffffff8108fd3c>] __lock_acquire+0xcf/0x15a1
RSP: 0018:ffff88040c073bc0 EFLAGS: 00010002
RAX: 6b6b6b6b6b6b6b6b RBX: 0000000000000000 RCX: ffff88040d29a710
RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff88040d29a710
RBP: ffff88040c073c70 R08: 0000000000000001 R09: 0000000000000001
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: ffff88040be041c0 R15: ffffffff814c928f
FS: 0000000000000000(0000) GS:ffff88041fa00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fa4595f4750 CR3: 0000000001c14000 CR4: 00000000001406f0
Stack:
0000000000000006 000000000be04930 0000000000000000 ffff880400000000
ffff880400000000 ffffffff8108f847 ffff88040be041c0 ffffffff81050446
ffff8803fc08a920 ffff8803fc08a958 ffff88040be041c0 ffff88040c073c38
Call Trace:
[<ffffffff8108f847>] ? mark_held_locks+0x5e/0x74
[<ffffffff81050446>] ? __local_bh_enable_ip+0x9b/0xa1
[<ffffffff8108f9ca>] ? trace_hardirqs_on_caller+0x16d/0x189
[<ffffffff810915f4>] lock_acquire+0x122/0x1b6
[<ffffffff810915f4>] ? lock_acquire+0x122/0x1b6
[<ffffffff814c928f>] ? skb_dequeue+0x18/0x61
[<ffffffff81609dbf>] _raw_spin_lock_irqsave+0x35/0x49
[<ffffffff814c928f>] ? skb_dequeue+0x18/0x61
[<ffffffff814c928f>] skb_dequeue+0x18/0x61
[<ffffffffa009aa92>] afs_deliver_to_call+0x344/0x39d [kafs]
[<ffffffffa009ab37>] afs_process_async_call+0x4c/0xd5 [kafs]
[<ffffffffa0099e9c>] afs_async_workfn+0xe/0x10 [kafs]
[<ffffffff81063a3a>] process_one_work+0x29d/0x57c
[<ffffffff81064ac2>] worker_thread+0x24a/0x385
[<ffffffff81064878>] ? rescuer_thread+0x2d0/0x2d0
[<ffffffff810696f5>] kthread+0xf3/0xfb
[<ffffffff8160a6ff>] ret_from_fork+0x1f/0x40
[<ffffffff81069602>] ? kthread_create_on_node+0x1cf/0x1cf
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-08-03 16:11:40 +03:00
}
2007-04-27 02:57:07 +04:00
/*
* security . c
*/
extern void afs_clear_permits ( struct afs_vnode * ) ;
extern void afs_cache_permit ( struct afs_vnode * , struct key * , long ) ;
2007-05-09 13:33:45 +04:00
extern void afs_zap_permits ( struct rcu_head * ) ;
2007-04-27 02:57:07 +04:00
extern struct key * afs_request_key ( struct afs_cell * ) ;
2011-06-21 03:28:19 +04:00
extern int afs_permission ( struct inode * , int ) ;
2007-04-27 02:57:07 +04:00
2007-04-27 02:55:03 +04:00
/*
* server . c
*/
extern spinlock_t afs_server_peer_lock ;
2007-04-27 02:59:35 +04:00
# define afs_get_server(S) \
do { \
_debug ( " GET SERVER %d " , atomic_read ( & ( S ) - > usage ) ) ; \
atomic_inc ( & ( S ) - > usage ) ; \
} while ( 0 )
2007-04-27 02:55:03 +04:00
extern struct afs_server * afs_lookup_server ( struct afs_cell * ,
const struct in_addr * ) ;
2016-08-30 11:49:29 +03:00
extern struct afs_server * afs_find_server ( const struct sockaddr_rxrpc * ) ;
2007-04-27 02:55:03 +04:00
extern void afs_put_server ( struct afs_server * ) ;
extern void __exit afs_purge_servers ( void ) ;
2007-04-27 02:57:07 +04:00
/*
* super . c
*/
extern int afs_fs_init ( void ) ;
extern void afs_fs_exit ( void ) ;
2007-04-27 02:55:03 +04:00
/*
* vlclient . c
*/
2007-04-27 02:57:07 +04:00
extern int afs_vl_get_entry_by_name ( struct in_addr * , struct key * ,
const char * , struct afs_cache_vlocation * ,
2017-01-05 13:38:36 +03:00
bool ) ;
2007-04-27 02:57:07 +04:00
extern int afs_vl_get_entry_by_id ( struct in_addr * , struct key * ,
afs_volid_t , afs_voltype_t ,
2017-01-05 13:38:36 +03:00
struct afs_cache_vlocation * , bool ) ;
2007-04-27 02:55:03 +04:00
/*
* vlocation . c
*/
# define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
extern int __init afs_vlocation_update_init ( void ) ;
extern struct afs_vlocation * afs_vlocation_lookup ( struct afs_cell * ,
2007-04-27 02:57:07 +04:00
struct key * ,
2007-04-27 02:55:03 +04:00
const char * , size_t ) ;
extern void afs_put_vlocation ( struct afs_vlocation * ) ;
2007-05-03 14:12:46 +04:00
extern void afs_vlocation_purge ( void ) ;
2007-04-27 02:55:03 +04:00
/*
* vnode . c
*/
static inline struct afs_vnode * AFS_FS_I ( struct inode * inode )
{
return container_of ( inode , struct afs_vnode , vfs_inode ) ;
}
static inline struct inode * AFS_VNODE_TO_I ( struct afs_vnode * vnode )
{
return & vnode - > vfs_inode ;
}
2007-04-27 02:59:35 +04:00
extern void afs_vnode_finalise_status_update ( struct afs_vnode * ,
struct afs_server * ) ;
2007-04-27 02:57:07 +04:00
extern int afs_vnode_fetch_status ( struct afs_vnode * , struct afs_vnode * ,
struct key * ) ;
extern int afs_vnode_fetch_data ( struct afs_vnode * , struct key * ,
2017-01-05 13:38:34 +03:00
struct afs_read * ) ;
2007-04-27 02:59:35 +04:00
extern int afs_vnode_create ( struct afs_vnode * , struct key * , const char * ,
umode_t , struct afs_fid * , struct afs_file_status * ,
struct afs_callback * , struct afs_server * * ) ;
extern int afs_vnode_remove ( struct afs_vnode * , struct key * , const char * ,
bool ) ;
extern int afs_vnode_link ( struct afs_vnode * , struct afs_vnode * , struct key * ,
const char * ) ;
extern int afs_vnode_symlink ( struct afs_vnode * , struct key * , const char * ,
const char * , struct afs_fid * ,
struct afs_file_status * , struct afs_server * * ) ;
extern int afs_vnode_rename ( struct afs_vnode * , struct afs_vnode * ,
struct key * , const char * , const char * ) ;
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
extern int afs_vnode_store_data ( struct afs_writeback * , pgoff_t , pgoff_t ,
unsigned , unsigned ) ;
extern int afs_vnode_setattr ( struct afs_vnode * , struct key * , struct iattr * ) ;
2007-05-11 09:22:20 +04:00
extern int afs_vnode_get_volume_status ( struct afs_vnode * , struct key * ,
struct afs_volume_status * ) ;
2007-07-16 10:40:12 +04:00
extern int afs_vnode_set_lock ( struct afs_vnode * , struct key * ,
afs_lock_type_t ) ;
extern int afs_vnode_extend_lock ( struct afs_vnode * , struct key * ) ;
extern int afs_vnode_release_lock ( struct afs_vnode * , struct key * ) ;
2007-04-27 02:55:03 +04:00
/*
* volume . c
*/
# define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
extern void afs_put_volume ( struct afs_volume * ) ;
2007-04-27 02:57:07 +04:00
extern struct afs_volume * afs_volume_lookup ( struct afs_mount_params * ) ;
2007-04-27 02:55:03 +04:00
extern struct afs_server * afs_volume_pick_fileserver ( struct afs_vnode * ) ;
extern int afs_volume_release_fileserver ( struct afs_vnode * ,
struct afs_server * , int ) ;
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
/*
* write . c
*/
extern int afs_set_page_dirty ( struct page * ) ;
extern void afs_put_writeback ( struct afs_writeback * ) ;
2008-10-16 09:04:32 +04:00
extern int afs_write_begin ( struct file * file , struct address_space * mapping ,
loff_t pos , unsigned len , unsigned flags ,
struct page * * pagep , void * * fsdata ) ;
extern int afs_write_end ( struct file * file , struct address_space * mapping ,
loff_t pos , unsigned len , unsigned copied ,
struct page * page , void * fsdata ) ;
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
extern int afs_writepage ( struct page * , struct writeback_control * ) ;
extern int afs_writepages ( struct address_space * , struct writeback_control * ) ;
extern void afs_pages_written_back ( struct afs_vnode * , struct afs_call * ) ;
2014-04-03 22:13:46 +04:00
extern ssize_t afs_file_write ( struct kiocb * , struct iov_iter * ) ;
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
extern int afs_writeback_all ( struct afs_vnode * ) ;
2011-07-17 04:44:56 +04:00
extern int afs_fsync ( struct file * , loff_t , loff_t , int ) ;
AFS: implement basic file write support
Implement support for writing to regular AFS files, including:
(1) write
(2) truncate
(3) fsync, fdatasync
(4) chmod, chown, chgrp, utime.
AFS writeback attempts to batch writes into as chunks as large as it can manage
up to the point that it writes back 65535 pages in one chunk or it meets a
locked page.
Furthermore, if a page has been written to using a particular key, then should
another write to that page use some other key, the first write will be flushed
before the second is allowed to take place. If the first write fails due to a
security error, then the page will be scrapped and reread before the second
write takes place.
If a page is dirty and the callback on it is broken by the server, then the
dirty data is not discarded (same behaviour as NFS).
Shared-writable mappings are not supported by this patch.
[akpm@linux-foundation.org: fix a bunch of warnings]
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-09 13:33:46 +04:00
2007-04-27 02:55:03 +04:00
/*****************************************************************************/
/*
* debug tracing
*/
2017-01-05 13:38:34 +03:00
# include <trace/events/afs.h>
2007-04-27 02:55:03 +04:00
extern unsigned afs_debug ;
# define dbgprintk(FMT,...) \
2008-04-03 13:44:01 +04:00
printk ( " [%-6.6s] " FMT " \n " , current - > comm , # # __VA_ARGS__ )
2007-04-27 02:55:03 +04:00
2008-04-30 11:55:09 +04:00
# define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
# define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
2007-04-27 02:55:03 +04:00
# define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
# if defined(__KDEBUG)
# define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
# define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
# define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
# elif defined(CONFIG_AFS_DEBUG)
# define AFS_DEBUG_KENTER 0x01
# define AFS_DEBUG_KLEAVE 0x02
# define AFS_DEBUG_KDEBUG 0x04
# define _enter(FMT,...) \
do { \
if ( unlikely ( afs_debug & AFS_DEBUG_KENTER ) ) \
kenter ( FMT , # # __VA_ARGS__ ) ; \
} while ( 0 )
# define _leave(FMT,...) \
do { \
if ( unlikely ( afs_debug & AFS_DEBUG_KLEAVE ) ) \
kleave ( FMT , # # __VA_ARGS__ ) ; \
} while ( 0 )
# define _debug(FMT,...) \
do { \
if ( unlikely ( afs_debug & AFS_DEBUG_KDEBUG ) ) \
kdebug ( FMT , # # __VA_ARGS__ ) ; \
} while ( 0 )
# else
2010-08-12 19:54:57 +04:00
# define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
# define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
# define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
2007-04-27 02:55:03 +04:00
# endif
/*
* debug assertion checking
*/
# if 1 // defined(__KDEBUGALL)
# define ASSERT(X) \
do { \
if ( unlikely ( ! ( X ) ) ) { \
printk ( KERN_ERR " \n " ) ; \
printk ( KERN_ERR " AFS: Assertion failed \n " ) ; \
BUG ( ) ; \
} \
} while ( 0 )
# define ASSERTCMP(X, OP, Y) \
do { \
if ( unlikely ( ! ( ( X ) OP ( Y ) ) ) ) { \
printk ( KERN_ERR " \n " ) ; \
printk ( KERN_ERR " AFS: Assertion failed \n " ) ; \
printk ( KERN_ERR " %lu " # OP " %lu is false \n " , \
( unsigned long ) ( X ) , ( unsigned long ) ( Y ) ) ; \
printk ( KERN_ERR " 0x%lx " # OP " 0x%lx is false \n " , \
( unsigned long ) ( X ) , ( unsigned long ) ( Y ) ) ; \
BUG ( ) ; \
} \
} while ( 0 )
2007-05-09 13:33:45 +04:00
# define ASSERTRANGE(L, OP1, N, OP2, H) \
do { \
if ( unlikely ( ! ( ( L ) OP1 ( N ) ) | | ! ( ( N ) OP2 ( H ) ) ) ) { \
printk ( KERN_ERR " \n " ) ; \
printk ( KERN_ERR " AFS: Assertion failed \n " ) ; \
printk ( KERN_ERR " %lu " # OP1 " %lu " # OP2 " %lu is false \n " , \
( unsigned long ) ( L ) , ( unsigned long ) ( N ) , \
( unsigned long ) ( H ) ) ; \
printk ( KERN_ERR " 0x%lx " # OP1 " 0x%lx " # OP2 " 0x%lx is false \n " , \
( unsigned long ) ( L ) , ( unsigned long ) ( N ) , \
( unsigned long ) ( H ) ) ; \
BUG ( ) ; \
} \
} while ( 0 )
2007-04-27 02:55:03 +04:00
# define ASSERTIF(C, X) \
do { \
if ( unlikely ( ( C ) & & ! ( X ) ) ) { \
printk ( KERN_ERR " \n " ) ; \
printk ( KERN_ERR " AFS: Assertion failed \n " ) ; \
BUG ( ) ; \
} \
} while ( 0 )
# define ASSERTIFCMP(C, X, OP, Y) \
do { \
if ( unlikely ( ( C ) & & ! ( ( X ) OP ( Y ) ) ) ) { \
printk ( KERN_ERR " \n " ) ; \
printk ( KERN_ERR " AFS: Assertion failed \n " ) ; \
printk ( KERN_ERR " %lu " # OP " %lu is false \n " , \
( unsigned long ) ( X ) , ( unsigned long ) ( Y ) ) ; \
printk ( KERN_ERR " 0x%lx " # OP " 0x%lx is false \n " , \
( unsigned long ) ( X ) , ( unsigned long ) ( Y ) ) ; \
BUG ( ) ; \
} \
} while ( 0 )
# else
# define ASSERT(X) \
do { \
} while ( 0 )
# define ASSERTCMP(X, OP, Y) \
do { \
} while ( 0 )
2007-05-09 13:33:45 +04:00
# define ASSERTRANGE(L, OP1, N, OP2, H) \
do { \
} while ( 0 )
2007-04-27 02:55:03 +04:00
# define ASSERTIF(C, X) \
do { \
} while ( 0 )
# define ASSERTIFCMP(C, X, OP, Y) \
do { \
} while ( 0 )
# endif /* __KDEBUGALL */