2005-04-17 02:20:36 +04:00
/*
2007-04-27 02:55:03 +04:00
* Copyright ( c ) 2002 , 2007 Red Hat , Inc . All rights reserved .
2005-04-17 02:20:36 +04:00
*
* This software may be freely redistributed under the terms of the
* GNU General Public License .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*
2008-06-06 09:46:18 +04:00
* Authors : David Woodhouse < dwmw2 @ infradead . org >
2005-04-17 02:20:36 +04:00
* David Howells < dhowells @ redhat . com >
*
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
2007-04-27 02:55:03 +04:00
# include <linux/circ_buf.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
# include "internal.h"
2007-04-27 02:55:03 +04:00
2007-10-17 10:26:41 +04:00
#if 0
2007-04-27 02:55:03 +04:00
unsigned afs_vnode_update_timeout = 10 ;
2007-10-17 10:26:41 +04:00
# endif /* 0 */
2007-04-27 02:55:03 +04:00
# define afs_breakring_space(server) \
CIRC_SPACE ( ( server ) - > cb_break_head , ( server ) - > cb_break_tail , \
ARRAY_SIZE ( ( server ) - > cb_break ) )
//static void afs_callback_updater(struct work_struct *);
static struct workqueue_struct * afs_callback_update_worker ;
2005-04-17 02:20:36 +04:00
/*
* allow the fileserver to request callback state ( re - ) initialisation
*/
2007-04-27 02:55:03 +04:00
void afs_init_callback_state ( struct afs_server * server )
2005-04-17 02:20:36 +04:00
{
2007-04-27 02:55:03 +04:00
struct afs_vnode * vnode ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
_enter ( " {%p} " , server ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & server - > cb_lock ) ;
2007-04-27 02:55:03 +04:00
/* kill all the promises on record from this server */
while ( ! RB_EMPTY_ROOT ( & server - > cb_promises ) ) {
vnode = rb_entry ( server - > cb_promises . rb_node ,
struct afs_vnode , cb_promise ) ;
2007-05-09 13:33:45 +04:00
_debug ( " UNPROMISE { vid=%x:%u uq=%u} " ,
2007-04-27 02:59:35 +04:00
vnode - > fid . vid , vnode - > fid . vnode , vnode - > fid . unique ) ;
2007-04-27 02:55:03 +04:00
rb_erase ( & vnode - > cb_promise , & server - > cb_promises ) ;
vnode - > cb_promised = false ;
}
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
spin_unlock ( & server - > cb_lock ) ;
_leave ( " " ) ;
}
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
/*
* handle the data invalidation side of a callback being broken
*/
void afs_broken_callback_work ( struct work_struct * work )
{
struct afs_vnode * vnode =
container_of ( work , struct afs_vnode , cb_broken_work ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
_enter ( " " ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
if ( test_bit ( AFS_VNODE_DELETED , & vnode - > flags ) )
return ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
/* we're only interested in dealing with a broken callback on *this*
* vnode and only if no - one else has dealt with it yet */
2007-04-27 02:59:35 +04:00
if ( ! mutex_trylock ( & vnode - > validate_lock ) )
2007-04-27 02:55:03 +04:00
return ; /* someone else is dealing with it */
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
if ( test_bit ( AFS_VNODE_CB_BROKEN , & vnode - > flags ) ) {
2007-04-27 02:57:07 +04:00
if ( S_ISDIR ( vnode - > vfs_inode . i_mode ) )
afs_clear_permits ( vnode ) ;
if ( afs_vnode_fetch_status ( vnode , NULL , NULL ) < 0 )
2007-04-27 02:55:03 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
if ( test_bit ( AFS_VNODE_DELETED , & vnode - > flags ) )
goto out ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
/* if the vnode's data version number changed then its contents
* are different */
2007-05-09 13:33:45 +04:00
if ( test_and_clear_bit ( AFS_VNODE_ZAP_DATA , & vnode - > flags ) )
afs_zap_data ( vnode ) ;
2005-04-17 02:20:36 +04:00
}
2007-04-27 02:55:03 +04:00
out :
2007-04-27 02:59:35 +04:00
mutex_unlock ( & vnode - > validate_lock ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
/* avoid the potential race whereby the mutex_trylock() in this
* function happens again between the clear_bit ( ) and the
* mutex_unlock ( ) */
if ( test_bit ( AFS_VNODE_CB_BROKEN , & vnode - > flags ) ) {
_debug ( " requeue " ) ;
queue_work ( afs_callback_update_worker , & vnode - > cb_broken_work ) ;
}
_leave ( " " ) ;
}
/*
* actually break a callback
*/
static void afs_break_callback ( struct afs_server * server ,
struct afs_vnode * vnode )
{
_enter ( " " ) ;
set_bit ( AFS_VNODE_CB_BROKEN , & vnode - > flags ) ;
if ( vnode - > cb_promised ) {
spin_lock ( & vnode - > lock ) ;
_debug ( " break callback " ) ;
spin_lock ( & server - > cb_lock ) ;
if ( vnode - > cb_promised ) {
rb_erase ( & vnode - > cb_promise , & server - > cb_promises ) ;
vnode - > cb_promised = false ;
}
spin_unlock ( & server - > cb_lock ) ;
queue_work ( afs_callback_update_worker , & vnode - > cb_broken_work ) ;
2007-07-16 10:40:12 +04:00
if ( list_empty ( & vnode - > granted_locks ) & &
! list_empty ( & vnode - > pending_locks ) )
afs_lock_may_be_available ( vnode ) ;
2007-04-27 02:55:03 +04:00
spin_unlock ( & vnode - > lock ) ;
}
}
/*
* allow the fileserver to explicitly break one callback
* - happens when
* - the backing file is changed
* - a lock is released
*/
static void afs_break_one_callback ( struct afs_server * server ,
struct afs_fid * fid )
{
struct afs_vnode * vnode ;
struct rb_node * p ;
_debug ( " find " ) ;
spin_lock ( & server - > fs_lock ) ;
p = server - > fs_vnodes . rb_node ;
while ( p ) {
vnode = rb_entry ( p , struct afs_vnode , server_rb ) ;
if ( fid - > vid < vnode - > fid . vid )
p = p - > rb_left ;
else if ( fid - > vid > vnode - > fid . vid )
p = p - > rb_right ;
else if ( fid - > vnode < vnode - > fid . vnode )
p = p - > rb_left ;
else if ( fid - > vnode > vnode - > fid . vnode )
p = p - > rb_right ;
else if ( fid - > unique < vnode - > fid . unique )
p = p - > rb_left ;
else if ( fid - > unique > vnode - > fid . unique )
p = p - > rb_right ;
else
goto found ;
}
/* not found so we just ignore it (it may have moved to another
* server ) */
not_available :
_debug ( " not avail " ) ;
spin_unlock ( & server - > fs_lock ) ;
_leave ( " " ) ;
return ;
found :
_debug ( " found " ) ;
ASSERTCMP ( server , = = , vnode - > server ) ;
if ( ! igrab ( AFS_VNODE_TO_I ( vnode ) ) )
goto not_available ;
spin_unlock ( & server - > fs_lock ) ;
afs_break_callback ( server , vnode ) ;
iput ( & vnode - > vfs_inode ) ;
_leave ( " " ) ;
2007-04-27 02:49:28 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* allow the fileserver to break callback promises
*/
2007-04-27 02:55:03 +04:00
void afs_break_callbacks ( struct afs_server * server , size_t count ,
struct afs_callback callbacks [ ] )
2005-04-17 02:20:36 +04:00
{
2007-04-27 02:55:03 +04:00
_enter ( " %p,%zu, " , server , count ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
ASSERT ( server ! = NULL ) ;
ASSERTCMP ( count , < = , AFSCBMAX ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
for ( ; count > 0 ; callbacks + + , count - - ) {
2005-04-17 02:20:36 +04:00
_debug ( " - Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u } " ,
callbacks - > fid . vid ,
callbacks - > fid . vnode ,
callbacks - > fid . unique ,
callbacks - > version ,
callbacks - > expiry ,
callbacks - > type
) ;
2007-04-27 02:55:03 +04:00
afs_break_one_callback ( server , & callbacks - > fid ) ;
}
_leave ( " " ) ;
return ;
}
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
/*
* record the callback for breaking
* - the caller must hold server - > cb_lock
*/
static void afs_do_give_up_callback ( struct afs_server * server ,
struct afs_vnode * vnode )
{
struct afs_callback * cb ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
_enter ( " %p,%p " , server , vnode ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
cb = & server - > cb_break [ server - > cb_break_head ] ;
cb - > fid = vnode - > fid ;
cb - > version = vnode - > cb_version ;
cb - > expiry = vnode - > cb_expiry ;
cb - > type = vnode - > cb_type ;
smp_wmb ( ) ;
server - > cb_break_head =
( server - > cb_break_head + 1 ) &
( ARRAY_SIZE ( server - > cb_break ) - 1 ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
/* defer the breaking of callbacks to try and collect as many as
* possible to ship in one operation */
switch ( atomic_inc_return ( & server - > cb_break_n ) ) {
case 1 . . . AFSCBMAX - 1 :
queue_delayed_work ( afs_callback_update_worker ,
& server - > cb_break_work , HZ * 2 ) ;
break ;
case AFSCBMAX :
afs_flush_callback_breaks ( server ) ;
break ;
default :
break ;
}
ASSERT ( server - > cb_promises . rb_node ! = NULL ) ;
rb_erase ( & vnode - > cb_promise , & server - > cb_promises ) ;
vnode - > cb_promised = false ;
_leave ( " " ) ;
}
2007-04-27 02:59:35 +04:00
/*
* discard the callback on a deleted item
*/
void afs_discard_callback_on_delete ( struct afs_vnode * vnode )
{
struct afs_server * server = vnode - > server ;
_enter ( " %d " , vnode - > cb_promised ) ;
if ( ! vnode - > cb_promised ) {
_leave ( " [not promised] " ) ;
return ;
}
ASSERT ( server ! = NULL ) ;
spin_lock ( & server - > cb_lock ) ;
if ( vnode - > cb_promised ) {
ASSERT ( server - > cb_promises . rb_node ! = NULL ) ;
rb_erase ( & vnode - > cb_promise , & server - > cb_promises ) ;
vnode - > cb_promised = false ;
}
spin_unlock ( & server - > cb_lock ) ;
_leave ( " " ) ;
}
2007-04-27 02:55:03 +04:00
/*
* give up the callback registered for a vnode on the file server when the
* inode is being cleared
*/
void afs_give_up_callback ( struct afs_vnode * vnode )
{
struct afs_server * server = vnode - > server ;
DECLARE_WAITQUEUE ( myself , current ) ;
_enter ( " %d " , vnode - > cb_promised ) ;
_debug ( " GIVE UP INODE %p " , & vnode - > vfs_inode ) ;
if ( ! vnode - > cb_promised ) {
_leave ( " [not promised] " ) ;
return ;
}
ASSERT ( server ! = NULL ) ;
spin_lock ( & server - > cb_lock ) ;
if ( vnode - > cb_promised & & afs_breakring_space ( server ) = = 0 ) {
add_wait_queue ( & server - > cb_break_waitq , & myself ) ;
for ( ; ; ) {
set_current_state ( TASK_UNINTERRUPTIBLE ) ;
if ( ! vnode - > cb_promised | |
afs_breakring_space ( server ) ! = 0 )
break ;
spin_unlock ( & server - > cb_lock ) ;
schedule ( ) ;
spin_lock ( & server - > cb_lock ) ;
2005-04-17 02:20:36 +04:00
}
2007-04-27 02:55:03 +04:00
remove_wait_queue ( & server - > cb_break_waitq , & myself ) ;
__set_current_state ( TASK_RUNNING ) ;
}
/* of course, it's always possible for the server to break this vnode's
* callback first . . . */
if ( vnode - > cb_promised )
afs_do_give_up_callback ( server , vnode ) ;
spin_unlock ( & server - > cb_lock ) ;
_leave ( " " ) ;
}
/*
* dispatch a deferred give up callbacks operation
*/
void afs_dispatch_give_up_callbacks ( struct work_struct * work )
{
struct afs_server * server =
container_of ( work , struct afs_server , cb_break_work . work ) ;
_enter ( " " ) ;
/* tell the fileserver to discard the callback promises it has
* - in the event of ENOMEM or some other error , we just forget that we
* had callbacks entirely , and the server will call us later to break
* them
*/
afs_fs_give_up_callbacks ( server , & afs_async_call ) ;
}
/*
* flush the outstanding callback breaks on a server
*/
void afs_flush_callback_breaks ( struct afs_server * server )
{
2012-08-03 21:30:47 +04:00
mod_delayed_work ( afs_callback_update_worker , & server - > cb_break_work , 0 ) ;
2007-04-27 02:55:03 +04:00
}
#if 0
/*
* update a bunch of callbacks
*/
static void afs_callback_updater ( struct work_struct * work )
{
struct afs_server * server ;
struct afs_vnode * vnode , * xvnode ;
time_t now ;
long timeout ;
int ret ;
server = container_of ( work , struct afs_server , updater ) ;
_enter ( " " ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
now = get_seconds ( ) ;
/* find the first vnode to update */
spin_lock ( & server - > cb_lock ) ;
for ( ; ; ) {
if ( RB_EMPTY_ROOT ( & server - > cb_promises ) ) {
spin_unlock ( & server - > cb_lock ) ;
_leave ( " [nothing] " ) ;
return ;
2005-04-17 02:20:36 +04:00
}
2007-04-27 02:55:03 +04:00
vnode = rb_entry ( rb_first ( & server - > cb_promises ) ,
struct afs_vnode , cb_promise ) ;
if ( atomic_read ( & vnode - > usage ) > 0 )
break ;
rb_erase ( & vnode - > cb_promise , & server - > cb_promises ) ;
vnode - > cb_promised = false ;
}
timeout = vnode - > update_at - now ;
if ( timeout > 0 ) {
queue_delayed_work ( afs_vnode_update_worker ,
& afs_vnode_update , timeout * HZ ) ;
spin_unlock ( & server - > cb_lock ) ;
_leave ( " [nothing] " ) ;
return ;
}
list_del_init ( & vnode - > update ) ;
atomic_inc ( & vnode - > usage ) ;
spin_unlock ( & server - > cb_lock ) ;
/* we can now perform the update */
_debug ( " update %s " , vnode - > vldb . name ) ;
vnode - > state = AFS_VL_UPDATING ;
vnode - > upd_rej_cnt = 0 ;
vnode - > upd_busy_cnt = 0 ;
ret = afs_vnode_update_record ( vl , & vldb ) ;
switch ( ret ) {
case 0 :
afs_vnode_apply_update ( vl , & vldb ) ;
vnode - > state = AFS_VL_UPDATING ;
break ;
case - ENOMEDIUM :
vnode - > state = AFS_VL_VOLUME_DELETED ;
break ;
default :
vnode - > state = AFS_VL_UNCERTAIN ;
break ;
}
/* and then reschedule */
_debug ( " reschedule " ) ;
vnode - > update_at = get_seconds ( ) + afs_vnode_update_timeout ;
spin_lock ( & server - > cb_lock ) ;
if ( ! list_empty ( & server - > cb_promises ) ) {
/* next update in 10 minutes, but wait at least 1 second more
* than the newest record already queued so that we don ' t spam
* the VL server suddenly with lots of requests
*/
xvnode = list_entry ( server - > cb_promises . prev ,
struct afs_vnode , update ) ;
if ( vnode - > update_at < = xvnode - > update_at )
vnode - > update_at = xvnode - > update_at + 1 ;
xvnode = list_entry ( server - > cb_promises . next ,
struct afs_vnode , update ) ;
timeout = xvnode - > update_at - now ;
if ( timeout < 0 )
timeout = 0 ;
} else {
timeout = afs_vnode_update_timeout ;
2005-04-17 02:20:36 +04:00
}
2007-04-27 02:55:03 +04:00
list_add_tail ( & vnode - > update , & server - > cb_promises ) ;
_debug ( " timeout %ld " , timeout ) ;
queue_delayed_work ( afs_vnode_update_worker ,
& afs_vnode_update , timeout * HZ ) ;
spin_unlock ( & server - > cb_lock ) ;
afs_put_vnode ( vl ) ;
}
# endif
/*
* initialise the callback update process
*/
int __init afs_callback_update_init ( void )
{
2016-09-04 18:24:11 +03:00
afs_callback_update_worker = alloc_ordered_workqueue ( " kafs_callbackd " ,
WQ_MEM_RECLAIM ) ;
2007-04-27 02:55:03 +04:00
return afs_callback_update_worker ? 0 : - ENOMEM ;
2007-04-27 02:49:28 +04:00
}
2005-04-17 02:20:36 +04:00
/*
2007-04-27 02:55:03 +04:00
* shut down the callback update process
2005-04-17 02:20:36 +04:00
*/
2007-05-03 14:12:46 +04:00
void afs_callback_update_kill ( void )
2005-04-17 02:20:36 +04:00
{
2007-04-27 02:55:03 +04:00
destroy_workqueue ( afs_callback_update_worker ) ;
2007-04-27 02:49:28 +04:00
}