2005-12-16 01:31:24 +03:00
/* -*- mode: c; c-basic-offset: 8; -*-
* vim : noexpandtab sw = 8 ts = 8 sts = 0 :
*
* uptodate . c
*
* Tracking the up - to - date - ness of a local buffer_head with respect to
* the cluster .
*
* Copyright ( C ) 2002 , 2004 , 2005 Oracle . All rights reserved .
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation ; either
* version 2 of the License , or ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public
* License along with this program ; if not , write to the
* Free Software Foundation , Inc . , 59 Temple Place - Suite 330 ,
* Boston , MA 021110 - 1307 , USA .
*
* Standard buffer head caching flags ( uptodate , etc ) are insufficient
* in a clustered environment - a buffer may be marked up to date on
* our local node but could have been modified by another cluster
* member . As a result an additional ( and performant ) caching scheme
* is required . A further requirement is that we consume as little
* memory as possible - we never pin buffer_head structures in order
* to cache them .
*
* We track the existence of up to date buffers on the inodes which
* are associated with them . Because we don ' t want to pin
* buffer_heads , this is only a ( strong ) hint and several other checks
* are made in the I / O path to ensure that we don ' t use a stale or
* invalid buffer without going to disk :
* - buffer_jbd is used liberally - if a bh is in the journal on
* this node then it * must * be up to date .
* - the standard buffer_uptodate ( ) macro is used to detect buffers
* which may be invalid ( even if we have an up to date tracking
* item for them )
*
* For a full understanding of how this code works together , one
* should read the callers in dlmglue . c , the I / O functions in
* buffer_head_io . c and ocfs2_journal_access in journal . c
*/
# include <linux/fs.h>
# include <linux/types.h>
# include <linux/slab.h>
# include <linux/highmem.h>
# include <linux/buffer_head.h>
# include <linux/rbtree.h>
# include <cluster/masklog.h>
# include "ocfs2.h"
# include "inode.h"
# include "uptodate.h"
2011-02-24 11:22:20 +03:00
# include "ocfs2_trace.h"
2005-12-16 01:31:24 +03:00
struct ocfs2_meta_cache_item {
struct rb_node c_node ;
sector_t c_block ;
} ;
2014-06-05 03:06:06 +04:00
static struct kmem_cache * ocfs2_uptodate_cachep ;
2005-12-16 01:31:24 +03:00
2009-02-11 07:00:41 +03:00
u64 ocfs2_metadata_cache_owner ( struct ocfs2_caching_info * ci )
2009-02-11 06:00:37 +03:00
{
BUG_ON ( ! ci | | ! ci - > ci_ops ) ;
return ci - > ci_ops - > co_owner ( ci ) ;
}
2009-02-11 07:00:41 +03:00
struct super_block * ocfs2_metadata_cache_get_super ( struct ocfs2_caching_info * ci )
{
BUG_ON ( ! ci | | ! ci - > ci_ops ) ;
return ci - > ci_ops - > co_get_super ( ci ) ;
}
2009-02-11 06:00:37 +03:00
static void ocfs2_metadata_cache_lock ( struct ocfs2_caching_info * ci )
{
BUG_ON ( ! ci | | ! ci - > ci_ops ) ;
ci - > ci_ops - > co_cache_lock ( ci ) ;
}
static void ocfs2_metadata_cache_unlock ( struct ocfs2_caching_info * ci )
{
BUG_ON ( ! ci | | ! ci - > ci_ops ) ;
ci - > ci_ops - > co_cache_unlock ( ci ) ;
}
2009-02-11 07:00:41 +03:00
void ocfs2_metadata_cache_io_lock ( struct ocfs2_caching_info * ci )
2009-02-11 06:00:37 +03:00
{
BUG_ON ( ! ci | | ! ci - > ci_ops ) ;
ci - > ci_ops - > co_io_lock ( ci ) ;
}
2009-02-11 07:00:41 +03:00
void ocfs2_metadata_cache_io_unlock ( struct ocfs2_caching_info * ci )
2009-02-11 06:00:37 +03:00
{
BUG_ON ( ! ci | | ! ci - > ci_ops ) ;
ci - > ci_ops - > co_io_unlock ( ci ) ;
}
2009-02-13 02:24:40 +03:00
static void ocfs2_metadata_cache_reset ( struct ocfs2_caching_info * ci ,
int clear )
{
ci - > ci_flags | = OCFS2_CACHE_FL_INLINE ;
ci - > ci_num_cached = 0 ;
2009-02-13 02:41:59 +03:00
if ( clear ) {
ci - > ci_created_trans = 0 ;
2009-02-13 02:24:40 +03:00
ci - > ci_last_trans = 0 ;
2009-02-13 02:41:59 +03:00
}
2009-02-13 02:24:40 +03:00
}
2009-02-11 03:05:07 +03:00
void ocfs2_metadata_cache_init ( struct ocfs2_caching_info * ci ,
2009-02-11 06:00:37 +03:00
const struct ocfs2_caching_operations * ops )
2005-12-16 01:31:24 +03:00
{
2009-02-11 06:00:37 +03:00
BUG_ON ( ! ops ) ;
ci - > ci_ops = ops ;
2009-02-13 02:24:40 +03:00
ocfs2_metadata_cache_reset ( ci , 1 ) ;
}
void ocfs2_metadata_cache_exit ( struct ocfs2_caching_info * ci )
{
ocfs2_metadata_cache_purge ( ci ) ;
ocfs2_metadata_cache_reset ( ci , 1 ) ;
2005-12-16 01:31:24 +03:00
}
2009-02-13 02:24:40 +03:00
2005-12-16 01:31:24 +03:00
/* No lock taken here as 'root' is not expected to be visible to other
* processes . */
static unsigned int ocfs2_purge_copied_metadata_tree ( struct rb_root * root )
{
unsigned int purged = 0 ;
struct rb_node * node ;
struct ocfs2_meta_cache_item * item ;
while ( ( node = rb_last ( root ) ) ! = NULL ) {
item = rb_entry ( node , struct ocfs2_meta_cache_item , c_node ) ;
2011-02-24 11:22:20 +03:00
trace_ocfs2_purge_copied_metadata_tree (
( unsigned long long ) item - > c_block ) ;
2005-12-16 01:31:24 +03:00
rb_erase ( & item - > c_node , root ) ;
kmem_cache_free ( ocfs2_uptodate_cachep , item ) ;
purged + + ;
}
return purged ;
}
/* Called from locking and called from ocfs2_clear_inode. Dump the
* cache for a given inode .
*
* This function is a few more lines longer than necessary due to some
* accounting done here , but I think it ' s worth tracking down those
* bugs sooner - - Mark */
2009-02-11 07:00:41 +03:00
void ocfs2_metadata_cache_purge ( struct ocfs2_caching_info * ci )
2005-12-16 01:31:24 +03:00
{
unsigned int tree , to_purge , purged ;
struct rb_root root = RB_ROOT ;
2009-02-11 06:00:37 +03:00
BUG_ON ( ! ci | | ! ci - > ci_ops ) ;
ocfs2_metadata_cache_lock ( ci ) ;
2009-02-11 03:05:07 +03:00
tree = ! ( ci - > ci_flags & OCFS2_CACHE_FL_INLINE ) ;
2005-12-16 01:31:24 +03:00
to_purge = ci - > ci_num_cached ;
2011-02-24 11:22:20 +03:00
trace_ocfs2_metadata_cache_purge (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
to_purge , tree ) ;
2005-12-16 01:31:24 +03:00
/* If we're a tree, save off the root so that we can safely
* initialize the cache . We do the work to free tree members
* without the spinlock . */
if ( tree )
root = ci - > ci_cache . ci_tree ;
2009-02-13 02:24:40 +03:00
ocfs2_metadata_cache_reset ( ci , 0 ) ;
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_unlock ( ci ) ;
2005-12-16 01:31:24 +03:00
purged = ocfs2_purge_copied_metadata_tree ( & root ) ;
/* If possible, track the number wiped so that we can more
* easily detect counting errors . Unfortunately , this is only
* meaningful for trees . */
if ( tree & & purged ! = to_purge )
2009-02-11 06:00:37 +03:00
mlog ( ML_ERROR , " Owner %llu, count = %u, purged = %u \n " ,
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
to_purge , purged ) ;
2005-12-16 01:31:24 +03:00
}
/* Returns the index in the cache array, -1 if not found.
* Requires ip_lock . */
static int ocfs2_search_cache_array ( struct ocfs2_caching_info * ci ,
sector_t item )
{
int i ;
for ( i = 0 ; i < ci - > ci_num_cached ; i + + ) {
if ( item = = ci - > ci_cache . ci_array [ i ] )
return i ;
}
return - 1 ;
}
/* Returns the cache item if found, otherwise NULL.
* Requires ip_lock . */
static struct ocfs2_meta_cache_item *
ocfs2_search_cache_tree ( struct ocfs2_caching_info * ci ,
sector_t block )
{
struct rb_node * n = ci - > ci_cache . ci_tree . rb_node ;
struct ocfs2_meta_cache_item * item = NULL ;
while ( n ) {
item = rb_entry ( n , struct ocfs2_meta_cache_item , c_node ) ;
if ( block < item - > c_block )
n = n - > rb_left ;
else if ( block > item - > c_block )
n = n - > rb_right ;
else
return item ;
}
return NULL ;
}
2009-02-11 07:00:41 +03:00
static int ocfs2_buffer_cached ( struct ocfs2_caching_info * ci ,
2005-12-16 01:31:24 +03:00
struct buffer_head * bh )
{
int index = - 1 ;
struct ocfs2_meta_cache_item * item = NULL ;
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_lock ( ci ) ;
2005-12-16 01:31:24 +03:00
2011-02-24 11:22:20 +03:00
trace_ocfs2_buffer_cached_begin (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
( unsigned long long ) bh - > b_blocknr ,
! ! ( ci - > ci_flags & OCFS2_CACHE_FL_INLINE ) ) ;
2005-12-16 01:31:24 +03:00
2009-02-11 03:05:07 +03:00
if ( ci - > ci_flags & OCFS2_CACHE_FL_INLINE )
2009-02-11 07:00:41 +03:00
index = ocfs2_search_cache_array ( ci , bh - > b_blocknr ) ;
2005-12-16 01:31:24 +03:00
else
2009-02-11 07:00:41 +03:00
item = ocfs2_search_cache_tree ( ci , bh - > b_blocknr ) ;
2005-12-16 01:31:24 +03:00
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_unlock ( ci ) ;
2005-12-16 01:31:24 +03:00
2011-02-24 11:22:20 +03:00
trace_ocfs2_buffer_cached_end ( index , item ) ;
2005-12-16 01:31:24 +03:00
return ( index ! = - 1 ) | | ( item ! = NULL ) ;
}
/* Warning: even if it returns true, this does *not* guarantee that
2010-01-26 03:57:38 +03:00
* the block is stored in our inode metadata cache .
*
2006-04-22 00:49:02 +04:00
* This can be called under lock_buffer ( )
*/
2009-02-11 07:00:41 +03:00
int ocfs2_buffer_uptodate ( struct ocfs2_caching_info * ci ,
2005-12-16 01:31:24 +03:00
struct buffer_head * bh )
{
/* Doesn't matter if the bh is in our cache or not -- if it's
* not marked uptodate then we know it can ' t have correct
* data . */
if ( ! buffer_uptodate ( bh ) )
return 0 ;
/* OCFS2 does not allow multiple nodes to be changing the same
* block at the same time . */
if ( buffer_jbd ( bh ) )
return 1 ;
/* Ok, locally the buffer is marked as up to date, now search
* our cache to see if we can trust that . */
2009-02-11 07:00:41 +03:00
return ocfs2_buffer_cached ( ci , bh ) ;
2005-12-16 01:31:24 +03:00
}
2009-02-11 07:00:41 +03:00
/*
2006-04-22 00:49:02 +04:00
* Determine whether a buffer is currently out on a read - ahead request .
2009-02-11 03:05:07 +03:00
* ci_io_sem should be held to serialize submitters with the logic here .
2006-04-22 00:49:02 +04:00
*/
2009-02-11 07:00:41 +03:00
int ocfs2_buffer_read_ahead ( struct ocfs2_caching_info * ci ,
2006-04-22 00:49:02 +04:00
struct buffer_head * bh )
{
2009-02-11 07:00:41 +03:00
return buffer_locked ( bh ) & & ocfs2_buffer_cached ( ci , bh ) ;
2006-04-22 00:49:02 +04:00
}
2005-12-16 01:31:24 +03:00
/* Requires ip_lock */
static void ocfs2_append_cache_array ( struct ocfs2_caching_info * ci ,
sector_t block )
{
2009-02-11 03:05:07 +03:00
BUG_ON ( ci - > ci_num_cached > = OCFS2_CACHE_INFO_MAX_ARRAY ) ;
2005-12-16 01:31:24 +03:00
2011-02-24 11:22:20 +03:00
trace_ocfs2_append_cache_array (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
( unsigned long long ) block , ci - > ci_num_cached ) ;
2005-12-16 01:31:24 +03:00
ci - > ci_cache . ci_array [ ci - > ci_num_cached ] = block ;
ci - > ci_num_cached + + ;
}
/* By now the caller should have checked that the item does *not*
* exist in the tree .
* Requires ip_lock . */
static void __ocfs2_insert_cache_tree ( struct ocfs2_caching_info * ci ,
struct ocfs2_meta_cache_item * new )
{
sector_t block = new - > c_block ;
struct rb_node * parent = NULL ;
struct rb_node * * p = & ci - > ci_cache . ci_tree . rb_node ;
struct ocfs2_meta_cache_item * tmp ;
2011-02-24 11:22:20 +03:00
trace_ocfs2_insert_cache_tree (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
( unsigned long long ) block , ci - > ci_num_cached ) ;
2005-12-16 01:31:24 +03:00
while ( * p ) {
parent = * p ;
tmp = rb_entry ( parent , struct ocfs2_meta_cache_item , c_node ) ;
if ( block < tmp - > c_block )
p = & ( * p ) - > rb_left ;
else if ( block > tmp - > c_block )
p = & ( * p ) - > rb_right ;
else {
/* This should never happen! */
mlog ( ML_ERROR , " Duplicate block %llu cached! \n " ,
( unsigned long long ) block ) ;
BUG ( ) ;
}
}
rb_link_node ( & new - > c_node , parent , p ) ;
rb_insert_color ( & new - > c_node , & ci - > ci_cache . ci_tree ) ;
ci - > ci_num_cached + + ;
}
2009-02-11 06:00:37 +03:00
/* co_cache_lock() must be held */
2009-02-11 07:00:41 +03:00
static inline int ocfs2_insert_can_use_array ( struct ocfs2_caching_info * ci )
2005-12-16 01:31:24 +03:00
{
2009-02-11 03:05:07 +03:00
return ( ci - > ci_flags & OCFS2_CACHE_FL_INLINE ) & &
( ci - > ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY ) ;
2005-12-16 01:31:24 +03:00
}
2009-02-11 03:05:07 +03:00
/* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the
2005-12-16 01:31:24 +03:00
* pointers in tree after we use them - this allows caller to detect
2009-02-11 06:00:37 +03:00
* when to free in case of error .
*
* The co_cache_lock ( ) must be held . */
2009-02-11 07:00:41 +03:00
static void ocfs2_expand_cache ( struct ocfs2_caching_info * ci ,
2005-12-16 01:31:24 +03:00
struct ocfs2_meta_cache_item * * tree )
{
int i ;
2009-02-11 03:05:07 +03:00
mlog_bug_on_msg ( ci - > ci_num_cached ! = OCFS2_CACHE_INFO_MAX_ARRAY ,
2009-02-11 06:00:37 +03:00
" Owner %llu, num cached = %u, should be %u \n " ,
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
ci - > ci_num_cached , OCFS2_CACHE_INFO_MAX_ARRAY ) ;
2009-02-11 03:05:07 +03:00
mlog_bug_on_msg ( ! ( ci - > ci_flags & OCFS2_CACHE_FL_INLINE ) ,
2009-02-11 06:00:37 +03:00
" Owner %llu not marked as inline anymore! \n " ,
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ) ;
2005-12-16 01:31:24 +03:00
/* Be careful to initialize the tree members *first* because
* once the ci_tree is used , the array is junk . . . */
2009-02-11 03:05:07 +03:00
for ( i = 0 ; i < OCFS2_CACHE_INFO_MAX_ARRAY ; i + + )
2005-12-16 01:31:24 +03:00
tree [ i ] - > c_block = ci - > ci_cache . ci_array [ i ] ;
2009-02-11 03:05:07 +03:00
ci - > ci_flags & = ~ OCFS2_CACHE_FL_INLINE ;
2005-12-16 01:31:24 +03:00
ci - > ci_cache . ci_tree = RB_ROOT ;
/* this will be set again by __ocfs2_insert_cache_tree */
ci - > ci_num_cached = 0 ;
2009-02-11 03:05:07 +03:00
for ( i = 0 ; i < OCFS2_CACHE_INFO_MAX_ARRAY ; i + + ) {
2005-12-16 01:31:24 +03:00
__ocfs2_insert_cache_tree ( ci , tree [ i ] ) ;
tree [ i ] = NULL ;
}
2011-02-24 11:22:20 +03:00
trace_ocfs2_expand_cache (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
ci - > ci_flags , ci - > ci_num_cached ) ;
2005-12-16 01:31:24 +03:00
}
/* Slow path function - memory allocation is necessary. See the
* comment above ocfs2_set_buffer_uptodate for more information . */
2009-02-11 07:00:41 +03:00
static void __ocfs2_set_buffer_uptodate ( struct ocfs2_caching_info * ci ,
2005-12-16 01:31:24 +03:00
sector_t block ,
int expand_tree )
{
int i ;
struct ocfs2_meta_cache_item * new = NULL ;
2009-02-11 03:05:07 +03:00
struct ocfs2_meta_cache_item * tree [ OCFS2_CACHE_INFO_MAX_ARRAY ] =
2005-12-16 01:31:24 +03:00
{ NULL , } ;
2011-02-24 11:22:20 +03:00
trace_ocfs2_set_buffer_uptodate (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
( unsigned long long ) block , expand_tree ) ;
2005-12-16 01:31:24 +03:00
2006-04-13 01:37:00 +04:00
new = kmem_cache_alloc ( ocfs2_uptodate_cachep , GFP_NOFS ) ;
2005-12-16 01:31:24 +03:00
if ( ! new ) {
mlog_errno ( - ENOMEM ) ;
return ;
}
new - > c_block = block ;
if ( expand_tree ) {
/* Do *not* allocate an array here - the removal code
* has no way of tracking that . */
2009-02-11 03:05:07 +03:00
for ( i = 0 ; i < OCFS2_CACHE_INFO_MAX_ARRAY ; i + + ) {
2005-12-16 01:31:24 +03:00
tree [ i ] = kmem_cache_alloc ( ocfs2_uptodate_cachep ,
2006-04-13 01:37:00 +04:00
GFP_NOFS ) ;
2005-12-16 01:31:24 +03:00
if ( ! tree [ i ] ) {
mlog_errno ( - ENOMEM ) ;
goto out_free ;
}
/* These are initialized in ocfs2_expand_cache! */
}
}
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_lock ( ci ) ;
2009-02-11 07:00:41 +03:00
if ( ocfs2_insert_can_use_array ( ci ) ) {
2005-12-16 01:31:24 +03:00
/* Ok, items were removed from the cache in between
* locks . Detect this and revert back to the fast path */
ocfs2_append_cache_array ( ci , block ) ;
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_unlock ( ci ) ;
2005-12-16 01:31:24 +03:00
goto out_free ;
}
if ( expand_tree )
2009-02-11 07:00:41 +03:00
ocfs2_expand_cache ( ci , tree ) ;
2005-12-16 01:31:24 +03:00
__ocfs2_insert_cache_tree ( ci , new ) ;
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_unlock ( ci ) ;
2005-12-16 01:31:24 +03:00
new = NULL ;
out_free :
if ( new )
kmem_cache_free ( ocfs2_uptodate_cachep , new ) ;
/* If these were used, then ocfs2_expand_cache re-set them to
* NULL for us . */
if ( tree [ 0 ] ) {
2009-02-11 03:05:07 +03:00
for ( i = 0 ; i < OCFS2_CACHE_INFO_MAX_ARRAY ; i + + )
2005-12-16 01:31:24 +03:00
if ( tree [ i ] )
kmem_cache_free ( ocfs2_uptodate_cachep ,
tree [ i ] ) ;
}
}
2009-02-11 06:00:37 +03:00
/* Item insertion is guarded by co_io_lock(), so the insertion path takes
2005-12-16 01:31:24 +03:00
* advantage of this by not rechecking for a duplicate insert during
* the slow case . Additionally , if the cache needs to be bumped up to
* a tree , the code will not recheck after acquiring the lock - -
* multiple paths cannot be expanding to a tree at the same time .
*
* The slow path takes into account that items can be removed
* ( including the whole tree wiped and reset ) when this process it out
* allocating memory . In those cases , it reverts back to the fast
* path .
*
* Note that this function may actually fail to insert the block if
* memory cannot be allocated . This is not fatal however ( but may
2006-04-22 00:49:02 +04:00
* result in a performance penalty )
*
* Readahead buffers can be passed in here before the I / O request is
* completed .
*/
2009-02-11 07:00:41 +03:00
void ocfs2_set_buffer_uptodate ( struct ocfs2_caching_info * ci ,
2005-12-16 01:31:24 +03:00
struct buffer_head * bh )
{
int expand ;
/* The block may very well exist in our cache already, so avoid
* doing any more work in that case . */
2009-02-11 07:00:41 +03:00
if ( ocfs2_buffer_cached ( ci , bh ) )
2005-12-16 01:31:24 +03:00
return ;
2011-02-24 11:22:20 +03:00
trace_ocfs2_set_buffer_uptodate_begin (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
( unsigned long long ) bh - > b_blocknr ) ;
2005-12-16 01:31:24 +03:00
/* No need to recheck under spinlock - insertion is guarded by
2009-02-11 06:00:37 +03:00
* co_io_lock ( ) */
ocfs2_metadata_cache_lock ( ci ) ;
2009-02-11 07:00:41 +03:00
if ( ocfs2_insert_can_use_array ( ci ) ) {
2005-12-16 01:31:24 +03:00
/* Fast case - it's an array and there's a free
* spot . */
ocfs2_append_cache_array ( ci , bh - > b_blocknr ) ;
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_unlock ( ci ) ;
2005-12-16 01:31:24 +03:00
return ;
}
expand = 0 ;
2009-02-11 03:05:07 +03:00
if ( ci - > ci_flags & OCFS2_CACHE_FL_INLINE ) {
2005-12-16 01:31:24 +03:00
/* We need to bump things up to a tree. */
expand = 1 ;
}
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_unlock ( ci ) ;
2005-12-16 01:31:24 +03:00
2009-02-11 07:00:41 +03:00
__ocfs2_set_buffer_uptodate ( ci , bh - > b_blocknr , expand ) ;
2005-12-16 01:31:24 +03:00
}
/* Called against a newly allocated buffer. Most likely nobody should
* be able to read this sort of metadata while it ' s still being
2009-02-11 06:00:37 +03:00
* allocated , but this is careful to take co_io_lock ( ) anyway . */
2009-02-11 07:00:41 +03:00
void ocfs2_set_new_buffer_uptodate ( struct ocfs2_caching_info * ci ,
2005-12-16 01:31:24 +03:00
struct buffer_head * bh )
{
/* This should definitely *not* exist in our cache */
2009-02-11 07:00:41 +03:00
BUG_ON ( ocfs2_buffer_cached ( ci , bh ) ) ;
2005-12-16 01:31:24 +03:00
set_buffer_uptodate ( bh ) ;
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_io_lock ( ci ) ;
2009-02-11 07:00:41 +03:00
ocfs2_set_buffer_uptodate ( ci , bh ) ;
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_io_unlock ( ci ) ;
2005-12-16 01:31:24 +03:00
}
/* Requires ip_lock. */
static void ocfs2_remove_metadata_array ( struct ocfs2_caching_info * ci ,
int index )
{
sector_t * array = ci - > ci_cache . ci_array ;
int bytes ;
2009-02-11 03:05:07 +03:00
BUG_ON ( index < 0 | | index > = OCFS2_CACHE_INFO_MAX_ARRAY ) ;
2005-12-16 01:31:24 +03:00
BUG_ON ( index > = ci - > ci_num_cached ) ;
BUG_ON ( ! ci - > ci_num_cached ) ;
2011-02-24 11:22:20 +03:00
trace_ocfs2_remove_metadata_array (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
index , ci - > ci_num_cached ) ;
2005-12-16 01:31:24 +03:00
ci - > ci_num_cached - - ;
/* don't need to copy if the array is now empty, or if we
* removed at the tail */
if ( ci - > ci_num_cached & & index < ci - > ci_num_cached ) {
bytes = sizeof ( sector_t ) * ( ci - > ci_num_cached - index ) ;
memmove ( & array [ index ] , & array [ index + 1 ] , bytes ) ;
}
}
/* Requires ip_lock. */
static void ocfs2_remove_metadata_tree ( struct ocfs2_caching_info * ci ,
struct ocfs2_meta_cache_item * item )
{
2011-02-24 11:22:20 +03:00
trace_ocfs2_remove_metadata_tree (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
( unsigned long long ) item - > c_block ) ;
2005-12-16 01:31:24 +03:00
rb_erase ( & item - > c_node , & ci - > ci_cache . ci_tree ) ;
ci - > ci_num_cached - - ;
}
2009-02-11 07:00:41 +03:00
static void ocfs2_remove_block_from_cache ( struct ocfs2_caching_info * ci ,
2008-08-18 13:38:47 +04:00
sector_t block )
2005-12-16 01:31:24 +03:00
{
int index ;
struct ocfs2_meta_cache_item * item = NULL ;
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_lock ( ci ) ;
2011-02-24 11:22:20 +03:00
trace_ocfs2_remove_block_from_cache (
( unsigned long long ) ocfs2_metadata_cache_owner ( ci ) ,
( unsigned long long ) block , ci - > ci_num_cached ,
ci - > ci_flags ) ;
2005-12-16 01:31:24 +03:00
2009-02-11 03:05:07 +03:00
if ( ci - > ci_flags & OCFS2_CACHE_FL_INLINE ) {
2005-12-16 01:31:24 +03:00
index = ocfs2_search_cache_array ( ci , block ) ;
if ( index ! = - 1 )
ocfs2_remove_metadata_array ( ci , index ) ;
} else {
item = ocfs2_search_cache_tree ( ci , block ) ;
if ( item )
ocfs2_remove_metadata_tree ( ci , item ) ;
}
2009-02-11 06:00:37 +03:00
ocfs2_metadata_cache_unlock ( ci ) ;
2005-12-16 01:31:24 +03:00
if ( item )
kmem_cache_free ( ocfs2_uptodate_cachep , item ) ;
}
2008-08-18 13:38:47 +04:00
/*
* Called when we remove a chunk of metadata from an inode . We don ' t
* bother reverting things to an inlined array in the case of a remove
* which moves us back under the limit .
*/
2009-02-11 07:00:41 +03:00
void ocfs2_remove_from_cache ( struct ocfs2_caching_info * ci ,
2008-08-18 13:38:47 +04:00
struct buffer_head * bh )
{
sector_t block = bh - > b_blocknr ;
2009-02-11 07:00:41 +03:00
ocfs2_remove_block_from_cache ( ci , block ) ;
2008-08-18 13:38:47 +04:00
}
/* Called when we remove xattr clusters from an inode. */
2009-02-11 07:00:41 +03:00
void ocfs2_remove_xattr_clusters_from_cache ( struct ocfs2_caching_info * ci ,
2008-08-18 13:38:47 +04:00
sector_t block ,
u32 c_len )
{
2009-02-11 07:00:41 +03:00
struct super_block * sb = ocfs2_metadata_cache_get_super ( ci ) ;
unsigned int i , b_len = ocfs2_clusters_to_blocks ( sb , 1 ) * c_len ;
2008-08-18 13:38:47 +04:00
for ( i = 0 ; i < b_len ; i + + , block + + )
2009-02-11 07:00:41 +03:00
ocfs2_remove_block_from_cache ( ci , block ) ;
2008-08-18 13:38:47 +04:00
}
2005-12-16 01:31:24 +03:00
int __init init_ocfs2_uptodate_cache ( void )
{
ocfs2_uptodate_cachep = kmem_cache_create ( " ocfs2_uptodate " ,
sizeof ( struct ocfs2_meta_cache_item ) ,
2007-07-20 05:11:58 +04:00
0 , SLAB_HWCACHE_ALIGN , NULL ) ;
2005-12-16 01:31:24 +03:00
if ( ! ocfs2_uptodate_cachep )
return - ENOMEM ;
return 0 ;
}
2006-01-07 22:07:02 +03:00
void exit_ocfs2_uptodate_cache ( void )
2005-12-16 01:31:24 +03:00
{
2018-04-06 02:18:52 +03:00
kmem_cache_destroy ( ocfs2_uptodate_cachep ) ;
2005-12-16 01:31:24 +03:00
}