2005-04-17 02:20:36 +04:00
/*
2007-04-27 02:55:03 +04:00
* Copyright ( c ) 2002 , 2007 Red Hat , Inc . All rights reserved .
2005-04-17 02:20:36 +04:00
*
* This software may be freely redistributed under the terms of the
* GNU General Public License .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*
2008-06-06 09:46:18 +04:00
* Authors : David Woodhouse < dwmw2 @ infradead . org >
2005-04-17 02:20:36 +04:00
* David Howells < dhowells @ redhat . com >
*
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
2007-04-27 02:55:03 +04:00
# include <linux/circ_buf.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
# include "internal.h"
2007-04-27 02:55:03 +04:00
2017-11-02 18:27:49 +03:00
/*
2020-05-27 17:51:30 +03:00
* Allow the fileserver to request callback state ( re - ) initialisation .
* Unfortunately , UUIDs are not guaranteed unique .
2017-11-02 18:27:49 +03:00
*/
void afs_init_callback_state ( struct afs_server * server )
{
2020-05-27 17:51:30 +03:00
rcu_read_lock ( ) ;
do {
server - > cb_s_break + + ;
server = rcu_dereference ( server - > uuid_next ) ;
} while ( 0 ) ;
rcu_read_unlock ( ) ;
2007-04-27 02:55:03 +04:00
}
/*
* actually break a callback
*/
2019-06-20 20:12:16 +03:00
void __afs_break_callback ( struct afs_vnode * vnode , enum afs_cb_break_reason reason )
2007-04-27 02:55:03 +04:00
{
_enter ( " " ) ;
2018-04-06 16:17:26 +03:00
clear_bit ( AFS_VNODE_NEW_CONTENT , & vnode - > flags ) ;
2017-11-02 18:27:49 +03:00
if ( test_and_clear_bit ( AFS_VNODE_CB_PROMISED , & vnode - > flags ) ) {
vnode - > cb_break + + ;
afs_clear_permits ( vnode ) ;
2007-04-27 02:55:03 +04:00
2019-05-11 01:03:31 +03:00
if ( vnode - > lock_state = = AFS_VNODE_LOCK_WAITING_FOR_CB )
2007-07-16 10:40:12 +04:00
afs_lock_may_be_available ( vnode ) ;
2019-06-20 20:12:16 +03:00
trace_afs_cb_break ( & vnode - > fid , vnode - > cb_break , reason , true ) ;
} else {
trace_afs_cb_break ( & vnode - > fid , vnode - > cb_break , reason , false ) ;
2007-04-27 02:55:03 +04:00
}
2018-10-20 02:57:58 +03:00
}
2017-11-02 18:27:49 +03:00
2019-06-20 20:12:16 +03:00
void afs_break_callback ( struct afs_vnode * vnode , enum afs_cb_break_reason reason )
2018-10-20 02:57:58 +03:00
{
write_seqlock ( & vnode - > cb_lock ) ;
2019-06-20 20:12:16 +03:00
__afs_break_callback ( vnode , reason ) ;
2017-11-02 18:27:49 +03:00
write_sequnlock ( & vnode - > cb_lock ) ;
2007-04-27 02:55:03 +04:00
}
2020-03-27 18:02:44 +03:00
/*
2020-04-30 03:03:49 +03:00
* Look up a volume by volume ID under RCU conditions .
2020-03-27 18:02:44 +03:00
*/
2020-04-30 03:03:49 +03:00
static struct afs_volume * afs_lookup_volume_rcu ( struct afs_cell * cell ,
afs_volid_t vid )
2020-03-27 18:02:44 +03:00
{
2020-04-30 03:03:49 +03:00
struct afs_volume * volume = NULL ;
2020-03-27 18:02:44 +03:00
struct rb_node * p ;
int seq = 0 ;
do {
/* Unfortunately, rbtree walking doesn't give reliable results
* under just the RCU read lock , so we have to check for
* changes .
*/
2020-04-30 03:03:49 +03:00
read_seqbegin_or_lock ( & cell - > volume_lock , & seq ) ;
2020-03-27 18:02:44 +03:00
2020-04-30 03:03:49 +03:00
p = rcu_dereference_raw ( cell - > volumes . rb_node ) ;
2020-03-27 18:02:44 +03:00
while ( p ) {
2020-04-30 03:03:49 +03:00
volume = rb_entry ( p , struct afs_volume , cell_node ) ;
2020-03-27 18:02:44 +03:00
2020-04-30 03:03:49 +03:00
if ( volume - > vid < vid )
2020-03-27 18:02:44 +03:00
p = rcu_dereference_raw ( p - > rb_left ) ;
2020-04-30 03:03:49 +03:00
else if ( volume - > vid > vid )
2020-03-27 18:02:44 +03:00
p = rcu_dereference_raw ( p - > rb_right ) ;
else
break ;
2020-04-30 03:03:49 +03:00
volume = NULL ;
2020-03-27 18:02:44 +03:00
}
2020-04-30 03:03:49 +03:00
} while ( need_seqretry ( & cell - > volume_lock , seq ) ) ;
2020-03-27 18:02:44 +03:00
2020-04-30 03:03:49 +03:00
done_seqretry ( & cell - > volume_lock , seq ) ;
return volume ;
2020-03-27 18:02:44 +03:00
}
2007-04-27 02:55:03 +04:00
/*
* allow the fileserver to explicitly break one callback
* - happens when
* - the backing file is changed
* - a lock is released
*/
2020-04-30 03:03:49 +03:00
static void afs_break_one_callback ( struct afs_volume * volume ,
struct afs_fid * fid )
2007-04-27 02:55:03 +04:00
{
2020-04-30 03:03:49 +03:00
struct super_block * sb ;
2007-04-27 02:55:03 +04:00
struct afs_vnode * vnode ;
2017-11-02 18:27:49 +03:00
struct inode * inode ;
2007-04-27 02:55:03 +04:00
2020-04-30 03:03:49 +03:00
if ( fid - > vnode = = 0 & & fid - > unique = = 0 ) {
/* The callback break applies to an entire volume. */
write_lock ( & volume - > cb_v_break_lock ) ;
volume - > cb_v_break + + ;
trace_afs_cb_break ( fid , volume - > cb_v_break ,
afs_cb_break_for_volume_callback , false ) ;
write_unlock ( & volume - > cb_v_break_lock ) ;
return ;
}
2018-05-13 00:31:33 +03:00
2020-04-30 03:03:49 +03:00
/* See if we can find a matching inode - even an I_NEW inode needs to
* be marked as it can have its callback broken before we finish
* setting up the local inode .
*/
sb = rcu_dereference ( volume - > sb ) ;
if ( ! sb )
return ;
inode = find_inode_rcu ( sb , fid - > vnode , afs_ilookup5_test_by_fid , fid ) ;
if ( inode ) {
vnode = AFS_FS_I ( inode ) ;
afs_break_callback ( vnode , afs_cb_break_for_callback ) ;
} else {
trace_afs_cb_miss ( fid , afs_cb_break_for_callback ) ;
2017-11-02 18:27:49 +03:00
}
2020-03-27 18:02:44 +03:00
}
2007-04-27 02:55:03 +04:00
2020-03-27 18:02:44 +03:00
static void afs_break_some_callbacks ( struct afs_server * server ,
struct afs_callback_break * cbb ,
size_t * _count )
{
struct afs_callback_break * residue = cbb ;
2020-04-30 03:03:49 +03:00
struct afs_volume * volume ;
2020-03-27 18:02:44 +03:00
afs_volid_t vid = cbb - > fid . vid ;
size_t i ;
2020-04-30 03:03:49 +03:00
volume = afs_lookup_volume_rcu ( server - > cell , vid ) ;
2020-03-27 18:02:44 +03:00
/* TODO: Find all matching volumes if we couldn't match the server and
* break them anyway .
*/
for ( i = * _count ; i > 0 ; cbb + + , i - - ) {
if ( cbb - > fid . vid = = vid ) {
_debug ( " - Fid { vl=%08llx n=%llu u=%u } " ,
cbb - > fid . vid ,
cbb - > fid . vnode ,
cbb - > fid . unique ) ;
- - * _count ;
2020-04-30 03:03:49 +03:00
if ( volume )
afs_break_one_callback ( volume , & cbb - > fid ) ;
2020-03-27 18:02:44 +03:00
} else {
* residue + + = * cbb ;
}
}
2007-04-27 02:49:28 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* allow the fileserver to break callback promises
*/
2007-04-27 02:55:03 +04:00
void afs_break_callbacks ( struct afs_server * server , size_t count ,
2018-04-09 23:12:31 +03:00
struct afs_callback_break * callbacks )
2005-04-17 02:20:36 +04:00
{
2007-04-27 02:55:03 +04:00
_enter ( " %p,%zu, " , server , count ) ;
2005-04-17 02:20:36 +04:00
2007-04-27 02:55:03 +04:00
ASSERT ( server ! = NULL ) ;
2005-04-17 02:20:36 +04:00
2020-03-27 18:02:44 +03:00
rcu_read_lock ( ) ;
2018-05-13 00:31:33 +03:00
2020-03-27 18:02:44 +03:00
while ( count > 0 )
afs_break_some_callbacks ( server , callbacks , & count ) ;
2007-04-27 02:55:03 +04:00
2020-03-27 18:02:44 +03:00
rcu_read_unlock ( ) ;
2007-04-27 02:55:03 +04:00
return ;
}