2005-04-17 02:20:36 +04:00
/* file.c: AFS filesystem file handling
*
* Copyright ( C ) 2002 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/fs.h>
# include <linux/pagemap.h>
# include <linux/buffer_head.h>
# include "volume.h"
# include "vnode.h"
# include <rxrpc/call.h>
# include "internal.h"
#if 0
static int afs_file_open ( struct inode * inode , struct file * file ) ;
static int afs_file_release ( struct inode * inode , struct file * file ) ;
# endif
static int afs_file_readpage ( struct file * file , struct page * page ) ;
static int afs_file_invalidatepage ( struct page * page , unsigned long offset ) ;
2005-10-21 11:20:48 +04:00
static int afs_file_releasepage ( struct page * page , gfp_t gfp_flags ) ;
2005-04-17 02:20:36 +04:00
struct inode_operations afs_file_inode_operations = {
. getattr = afs_inode_getattr ,
} ;
struct address_space_operations afs_fs_aops = {
. readpage = afs_file_readpage ,
. sync_page = block_sync_page ,
. set_page_dirty = __set_page_dirty_nobuffers ,
. releasepage = afs_file_releasepage ,
. invalidatepage = afs_file_invalidatepage ,
} ;
/*****************************************************************************/
/*
* deal with notification that a page was read from the cache
*/
# ifdef AFS_CACHING_SUPPORT
static void afs_file_readpage_read_complete ( void * cookie_data ,
struct page * page ,
void * data ,
int error )
{
_enter ( " %p,%p,%p,%d " , cookie_data , page , data , error ) ;
if ( error )
SetPageError ( page ) ;
else
SetPageUptodate ( page ) ;
unlock_page ( page ) ;
} /* end afs_file_readpage_read_complete() */
# endif
/*****************************************************************************/
/*
* deal with notification that a page was written to the cache
*/
# ifdef AFS_CACHING_SUPPORT
static void afs_file_readpage_write_complete ( void * cookie_data ,
struct page * page ,
void * data ,
int error )
{
_enter ( " %p,%p,%p,%d " , cookie_data , page , data , error ) ;
unlock_page ( page ) ;
} /* end afs_file_readpage_write_complete() */
# endif
/*****************************************************************************/
/*
* AFS read page from file ( or symlink )
*/
static int afs_file_readpage ( struct file * file , struct page * page )
{
struct afs_rxfs_fetch_descriptor desc ;
# ifdef AFS_CACHING_SUPPORT
struct cachefs_page * pageio ;
# endif
struct afs_vnode * vnode ;
struct inode * inode ;
int ret ;
inode = page - > mapping - > host ;
_enter ( " {%lu},{%lu} " , inode - > i_ino , page - > index ) ;
vnode = AFS_FS_I ( inode ) ;
2005-05-01 19:59:01 +04:00
BUG_ON ( ! PageLocked ( page ) ) ;
2005-04-17 02:20:36 +04:00
ret = - ESTALE ;
if ( vnode - > flags & AFS_VNODE_DELETED )
goto error ;
# ifdef AFS_CACHING_SUPPORT
ret = cachefs_page_get_private ( page , & pageio , GFP_NOIO ) ;
if ( ret < 0 )
goto error ;
/* is it cached? */
ret = cachefs_read_or_alloc_page ( vnode - > cache ,
page ,
afs_file_readpage_read_complete ,
NULL ,
GFP_KERNEL ) ;
# else
ret = - ENOBUFS ;
# endif
switch ( ret ) {
/* read BIO submitted and wb-journal entry found */
case 1 :
BUG ( ) ; // TODO - handle wb-journal match
/* read BIO submitted (page in cache) */
case 0 :
break ;
/* no page available in cache */
case - ENOBUFS :
case - ENODATA :
default :
desc . fid = vnode - > fid ;
desc . offset = page - > index < < PAGE_CACHE_SHIFT ;
desc . size = min ( ( size_t ) ( inode - > i_size - desc . offset ) ,
( size_t ) PAGE_SIZE ) ;
desc . buffer = kmap ( page ) ;
clear_page ( desc . buffer ) ;
/* read the contents of the file from the server into the
* page */
ret = afs_vnode_fetch_data ( vnode , & desc ) ;
kunmap ( page ) ;
if ( ret < 0 ) {
if ( ret = = - ENOENT ) {
_debug ( " got NOENT from server "
" - marking file deleted and stale " ) ;
vnode - > flags | = AFS_VNODE_DELETED ;
ret = - ESTALE ;
}
# ifdef AFS_CACHING_SUPPORT
cachefs_uncache_page ( vnode - > cache , page ) ;
# endif
goto error ;
}
SetPageUptodate ( page ) ;
# ifdef AFS_CACHING_SUPPORT
if ( cachefs_write_page ( vnode - > cache ,
page ,
afs_file_readpage_write_complete ,
NULL ,
GFP_KERNEL ) ! = 0
) {
cachefs_uncache_page ( vnode - > cache , page ) ;
unlock_page ( page ) ;
}
# else
unlock_page ( page ) ;
# endif
}
_leave ( " = 0 " ) ;
return 0 ;
error :
SetPageError ( page ) ;
unlock_page ( page ) ;
_leave ( " = %d " , ret ) ;
return ret ;
} /* end afs_file_readpage() */
/*****************************************************************************/
/*
* get a page cookie for the specified page
*/
# ifdef AFS_CACHING_SUPPORT
int afs_cache_get_page_cookie ( struct page * page ,
struct cachefs_page * * _page_cookie )
{
int ret ;
_enter ( " " ) ;
ret = cachefs_page_get_private ( page , _page_cookie , GFP_NOIO ) ;
_leave ( " = %d " , ret ) ;
return ret ;
} /* end afs_cache_get_page_cookie() */
# endif
/*****************************************************************************/
/*
* invalidate part or all of a page
*/
static int afs_file_invalidatepage ( struct page * page , unsigned long offset )
{
int ret = 1 ;
_enter ( " {%lu},%lu " , page - > index , offset ) ;
BUG_ON ( ! PageLocked ( page ) ) ;
if ( PagePrivate ( page ) ) {
# ifdef AFS_CACHING_SUPPORT
struct afs_vnode * vnode = AFS_FS_I ( page - > mapping - > host ) ;
cachefs_uncache_page ( vnode - > cache , page ) ;
# endif
/* We release buffers only if the entire page is being
* invalidated .
* The get_block cached value has been unconditionally
* invalidated , so real IO is not possible anymore .
*/
if ( offset = = 0 ) {
BUG_ON ( ! PageLocked ( page ) ) ;
ret = 0 ;
if ( ! PageWriteback ( page ) )
ret = page - > mapping - > a_ops - > releasepage ( page ,
0 ) ;
}
}
_leave ( " = %d " , ret ) ;
return ret ;
} /* end afs_file_invalidatepage() */
/*****************************************************************************/
/*
* release a page and cleanup its private data
*/
2005-10-21 11:20:48 +04:00
static int afs_file_releasepage ( struct page * page , gfp_t gfp_flags )
2005-04-17 02:20:36 +04:00
{
struct cachefs_page * pageio ;
_enter ( " {%lu},%x " , page - > index , gfp_flags ) ;
if ( PagePrivate ( page ) ) {
# ifdef AFS_CACHING_SUPPORT
struct afs_vnode * vnode = AFS_FS_I ( page - > mapping - > host ) ;
cachefs_uncache_page ( vnode - > cache , page ) ;
# endif
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 04:16:40 +03:00
pageio = ( struct cachefs_page * ) page_private ( page ) ;
set_page_private ( page , 0 ) ;
2005-04-17 02:20:36 +04:00
ClearPagePrivate ( page ) ;
2005-11-07 12:01:34 +03:00
kfree ( pageio ) ;
2005-04-17 02:20:36 +04:00
}
_leave ( " = 0 " ) ;
return 0 ;
} /* end afs_file_releasepage() */