2005-04-17 02:20:36 +04:00
/*
* linux / fs / mbcache . c
* ( C ) 2001 - 2002 Andreas Gruenbacher , < a . gruenbacher @ computer . org >
*/
/*
* Filesystem Meta Information Block Cache ( mbcache )
*
* The mbcache caches blocks of block devices that need to be located
* by their device / block number , as well as by other criteria ( such
* as the block ' s contents ) .
*
* There can only be one cache entry in a cache per device and block number .
* Additional indexes need not be unique in this sense . The number of
* additional indexes ( = other criteria ) can be hardwired at compile time
* or specified at cache create time .
*
* Each cache entry is of fixed size . An entry may be ` valid ' or ` invalid '
* in the cache . A valid entry is in the main hash tables of the cache ,
* and may also be in the lru list . An invalid entry is not in any hashes
* or lists .
*
* A valid cache entry is only in the lru list if no handles refer to it .
* Invalid cache entries will be freed when the last handle to the cache
* entry is released . Entries that cannot be freed immediately are put
* back on the lru list .
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/hash.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/slab.h>
# include <linux/sched.h>
# include <linux/init.h>
# include <linux/mbcache.h>
# ifdef MB_CACHE_DEBUG
# define mb_debug(f...) do { \
printk ( KERN_DEBUG f ) ; \
printk ( " \n " ) ; \
} while ( 0 )
# define mb_assert(c) do { if (!(c)) \
printk ( KERN_ERR " assertion " # c " failed \n " ) ; \
} while ( 0 )
# else
# define mb_debug(f...) do { } while(0)
# define mb_assert(c) do { } while(0)
# endif
# define mb_error(f...) do { \
printk ( KERN_ERR f ) ; \
printk ( " \n " ) ; \
} while ( 0 )
# define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
2005-05-06 03:16:09 +04:00
static DECLARE_WAIT_QUEUE_HEAD ( mb_cache_queue ) ;
2005-04-17 02:20:36 +04:00
MODULE_AUTHOR ( " Andreas Gruenbacher <a.gruenbacher@computer.org> " ) ;
MODULE_DESCRIPTION ( " Meta block cache (for extended attributes) " ) ;
MODULE_LICENSE ( " GPL " ) ;
EXPORT_SYMBOL ( mb_cache_create ) ;
EXPORT_SYMBOL ( mb_cache_shrink ) ;
EXPORT_SYMBOL ( mb_cache_destroy ) ;
EXPORT_SYMBOL ( mb_cache_entry_alloc ) ;
EXPORT_SYMBOL ( mb_cache_entry_insert ) ;
EXPORT_SYMBOL ( mb_cache_entry_release ) ;
EXPORT_SYMBOL ( mb_cache_entry_free ) ;
EXPORT_SYMBOL ( mb_cache_entry_get ) ;
# if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
EXPORT_SYMBOL ( mb_cache_entry_find_first ) ;
EXPORT_SYMBOL ( mb_cache_entry_find_next ) ;
# endif
struct mb_cache {
struct list_head c_cache_list ;
const char * c_name ;
atomic_t c_entry_count ;
2010-08-16 21:05:23 +04:00
int c_max_entries ;
2005-04-17 02:20:36 +04:00
int c_bucket_bits ;
2010-07-19 20:19:41 +04:00
struct kmem_cache * c_entry_cache ;
2005-04-17 02:20:36 +04:00
struct list_head * c_block_hash ;
2010-07-19 20:19:41 +04:00
struct list_head * c_index_hash ;
2005-04-17 02:20:36 +04:00
} ;
/*
* Global data : list of all mbcache ' s , lru list , and a spinlock for
* accessing cache data structures on SMP machines . The lru list is
* global across all mbcaches .
*/
static LIST_HEAD ( mb_cache_list ) ;
static LIST_HEAD ( mb_cache_lru_list ) ;
static DEFINE_SPINLOCK ( mb_cache_spinlock ) ;
/*
* What the mbcache registers as to get shrunk dynamically .
*/
2010-07-19 08:56:17 +04:00
static int mb_cache_shrink_fn ( struct shrinker * shrink , int nr_to_scan , gfp_t gfp_mask ) ;
2005-04-17 02:20:36 +04:00
2007-07-17 15:03:17 +04:00
static struct shrinker mb_cache_shrinker = {
. shrink = mb_cache_shrink_fn ,
. seeks = DEFAULT_SEEKS ,
} ;
2005-04-17 02:20:36 +04:00
static inline int
__mb_cache_entry_is_hashed ( struct mb_cache_entry * ce )
{
return ! list_empty ( & ce - > e_block_list ) ;
}
2006-01-15 00:20:43 +03:00
static void
2005-04-17 02:20:36 +04:00
__mb_cache_entry_unhash ( struct mb_cache_entry * ce )
{
if ( __mb_cache_entry_is_hashed ( ce ) ) {
list_del_init ( & ce - > e_block_list ) ;
2010-07-19 20:19:41 +04:00
list_del ( & ce - > e_index . o_list ) ;
2005-04-17 02:20:36 +04:00
}
}
2006-01-15 00:20:43 +03:00
static void
2005-10-21 11:20:48 +04:00
__mb_cache_entry_forget ( struct mb_cache_entry * ce , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
struct mb_cache * cache = ce - > e_cache ;
mb_assert ( ! ( ce - > e_used | | ce - > e_queued ) ) ;
2010-07-19 20:19:41 +04:00
kmem_cache_free ( cache - > c_entry_cache , ce ) ;
atomic_dec ( & cache - > c_entry_count ) ;
2005-04-17 02:20:36 +04:00
}
2006-01-15 00:20:43 +03:00
static void
2005-04-17 02:20:36 +04:00
__mb_cache_entry_release_unlock ( struct mb_cache_entry * ce )
2006-09-29 12:59:24 +04:00
__releases ( mb_cache_spinlock )
2005-04-17 02:20:36 +04:00
{
/* Wake up all processes queuing for this cache entry. */
if ( ce - > e_queued )
wake_up_all ( & mb_cache_queue ) ;
if ( ce - > e_used > = MB_CACHE_WRITER )
ce - > e_used - = MB_CACHE_WRITER ;
ce - > e_used - - ;
if ( ! ( ce - > e_used | | ce - > e_queued ) ) {
if ( ! __mb_cache_entry_is_hashed ( ce ) )
goto forget ;
mb_assert ( list_empty ( & ce - > e_lru_list ) ) ;
list_add_tail ( & ce - > e_lru_list , & mb_cache_lru_list ) ;
}
spin_unlock ( & mb_cache_spinlock ) ;
return ;
forget :
spin_unlock ( & mb_cache_spinlock ) ;
__mb_cache_entry_forget ( ce , GFP_KERNEL ) ;
}
/*
* mb_cache_shrink_fn ( ) memory pressure callback
*
* This function is called by the kernel memory management when memory
* gets low .
*
2010-07-19 08:56:17 +04:00
* @ shrink : ( ignored )
2005-04-17 02:20:36 +04:00
* @ nr_to_scan : Number of objects to scan
* @ gfp_mask : ( ignored )
*
* Returns the number of objects which are present in the cache .
*/
static int
2010-07-19 08:56:17 +04:00
mb_cache_shrink_fn ( struct shrinker * shrink , int nr_to_scan , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
LIST_HEAD ( free_list ) ;
2010-07-21 21:44:45 +04:00
struct mb_cache * cache ;
struct mb_cache_entry * entry , * tmp ;
2005-04-17 02:20:36 +04:00
int count = 0 ;
mb_debug ( " trying to free %d entries " , nr_to_scan ) ;
2010-07-21 21:44:45 +04:00
spin_lock ( & mb_cache_spinlock ) ;
2005-04-17 02:20:36 +04:00
while ( nr_to_scan - - & & ! list_empty ( & mb_cache_lru_list ) ) {
struct mb_cache_entry * ce =
list_entry ( mb_cache_lru_list . next ,
struct mb_cache_entry , e_lru_list ) ;
list_move_tail ( & ce - > e_lru_list , & free_list ) ;
__mb_cache_entry_unhash ( ce ) ;
}
2010-07-21 21:44:45 +04:00
list_for_each_entry ( cache , & mb_cache_list , c_cache_list ) {
mb_debug ( " cache %s (%d) " , cache - > c_name ,
atomic_read ( & cache - > c_entry_count ) ) ;
count + = atomic_read ( & cache - > c_entry_count ) ;
}
2005-04-17 02:20:36 +04:00
spin_unlock ( & mb_cache_spinlock ) ;
2010-07-21 21:44:45 +04:00
list_for_each_entry_safe ( entry , tmp , & free_list , e_lru_list ) {
__mb_cache_entry_forget ( entry , gfp_mask ) ;
2005-04-17 02:20:36 +04:00
}
return ( count / 100 ) * sysctl_vfs_cache_pressure ;
}
/*
* mb_cache_create ( ) create a new cache
*
* All entries in one cache are equal size . Cache entries may be from
* multiple devices . If this is the first mbcache created , registers
* the cache with kernel memory management . Returns NULL if no more
* memory was available .
*
* @ name : name of the cache ( informal )
* @ bucket_bits : log2 ( number of hash buckets )
*/
struct mb_cache *
2010-07-19 20:19:41 +04:00
mb_cache_create ( const char * name , int bucket_bits )
2005-04-17 02:20:36 +04:00
{
2010-07-19 20:19:41 +04:00
int n , bucket_count = 1 < < bucket_bits ;
2005-04-17 02:20:36 +04:00
struct mb_cache * cache = NULL ;
2010-07-19 20:19:41 +04:00
cache = kmalloc ( sizeof ( struct mb_cache ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! cache )
2010-07-19 20:19:41 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
cache - > c_name = name ;
atomic_set ( & cache - > c_entry_count , 0 ) ;
cache - > c_bucket_bits = bucket_bits ;
cache - > c_block_hash = kmalloc ( bucket_count * sizeof ( struct list_head ) ,
GFP_KERNEL ) ;
if ( ! cache - > c_block_hash )
goto fail ;
for ( n = 0 ; n < bucket_count ; n + + )
INIT_LIST_HEAD ( & cache - > c_block_hash [ n ] ) ;
2010-07-19 20:19:41 +04:00
cache - > c_index_hash = kmalloc ( bucket_count * sizeof ( struct list_head ) ,
GFP_KERNEL ) ;
if ( ! cache - > c_index_hash )
goto fail ;
for ( n = 0 ; n < bucket_count ; n + + )
INIT_LIST_HEAD ( & cache - > c_index_hash [ n ] ) ;
cache - > c_entry_cache = kmem_cache_create ( name ,
sizeof ( struct mb_cache_entry ) , 0 ,
2007-07-20 05:11:58 +04:00
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( ! cache - > c_entry_cache )
2010-07-19 20:19:41 +04:00
goto fail2 ;
2005-04-17 02:20:36 +04:00
2010-08-16 21:05:23 +04:00
/*
* Set an upper limit on the number of cache entries so that the hash
* chains won ' t grow too long .
*/
cache - > c_max_entries = bucket_count < < 4 ;
2005-04-17 02:20:36 +04:00
spin_lock ( & mb_cache_spinlock ) ;
list_add ( & cache - > c_cache_list , & mb_cache_list ) ;
spin_unlock ( & mb_cache_spinlock ) ;
return cache ;
2010-07-19 20:19:41 +04:00
fail2 :
kfree ( cache - > c_index_hash ) ;
2005-04-17 02:20:36 +04:00
fail :
2010-07-19 20:19:41 +04:00
kfree ( cache - > c_block_hash ) ;
kfree ( cache ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
/*
* mb_cache_shrink ( )
*
2006-03-28 13:56:53 +04:00
* Removes all cache entries of a device from the cache . All cache entries
2005-04-17 02:20:36 +04:00
* currently in use cannot be freed , and thus remain in the cache . All others
* are freed .
*
* @ bdev : which device ' s cache entries to shrink
*/
void
2005-07-27 22:45:15 +04:00
mb_cache_shrink ( struct block_device * bdev )
2005-04-17 02:20:36 +04:00
{
LIST_HEAD ( free_list ) ;
struct list_head * l , * ltmp ;
spin_lock ( & mb_cache_spinlock ) ;
list_for_each_safe ( l , ltmp , & mb_cache_lru_list ) {
struct mb_cache_entry * ce =
list_entry ( l , struct mb_cache_entry , e_lru_list ) ;
if ( ce - > e_bdev = = bdev ) {
list_move_tail ( & ce - > e_lru_list , & free_list ) ;
__mb_cache_entry_unhash ( ce ) ;
}
}
spin_unlock ( & mb_cache_spinlock ) ;
list_for_each_safe ( l , ltmp , & free_list ) {
__mb_cache_entry_forget ( list_entry ( l , struct mb_cache_entry ,
e_lru_list ) , GFP_KERNEL ) ;
}
}
/*
* mb_cache_destroy ( )
*
* Shrinks the cache to its minimum possible size ( hopefully 0 entries ) ,
* and then destroys it . If this was the last mbcache , un - registers the
* mbcache from kernel memory management .
*/
void
mb_cache_destroy ( struct mb_cache * cache )
{
LIST_HEAD ( free_list ) ;
struct list_head * l , * ltmp ;
spin_lock ( & mb_cache_spinlock ) ;
list_for_each_safe ( l , ltmp , & mb_cache_lru_list ) {
struct mb_cache_entry * ce =
list_entry ( l , struct mb_cache_entry , e_lru_list ) ;
if ( ce - > e_cache = = cache ) {
list_move_tail ( & ce - > e_lru_list , & free_list ) ;
__mb_cache_entry_unhash ( ce ) ;
}
}
list_del ( & cache - > c_cache_list ) ;
spin_unlock ( & mb_cache_spinlock ) ;
list_for_each_safe ( l , ltmp , & free_list ) {
__mb_cache_entry_forget ( list_entry ( l , struct mb_cache_entry ,
e_lru_list ) , GFP_KERNEL ) ;
}
if ( atomic_read ( & cache - > c_entry_count ) > 0 ) {
mb_error ( " cache %s: %d orphaned entries " ,
cache - > c_name ,
atomic_read ( & cache - > c_entry_count ) ) ;
}
kmem_cache_destroy ( cache - > c_entry_cache ) ;
2010-07-19 20:19:41 +04:00
kfree ( cache - > c_index_hash ) ;
2005-04-17 02:20:36 +04:00
kfree ( cache - > c_block_hash ) ;
kfree ( cache ) ;
}
/*
* mb_cache_entry_alloc ( )
*
* Allocates a new cache entry . The new entry will not be valid initially ,
* and thus cannot be looked up yet . It should be filled with data , and
* then inserted into the cache using mb_cache_entry_insert ( ) . Returns NULL
* if no more memory was available .
*/
struct mb_cache_entry *
2008-04-16 01:34:43 +04:00
mb_cache_entry_alloc ( struct mb_cache * cache , gfp_t gfp_flags )
2005-04-17 02:20:36 +04:00
{
2010-08-16 21:05:23 +04:00
struct mb_cache_entry * ce = NULL ;
if ( atomic_read ( & cache - > c_entry_count ) > = cache - > c_max_entries ) {
spin_lock ( & mb_cache_spinlock ) ;
if ( ! list_empty ( & mb_cache_lru_list ) ) {
ce = list_entry ( mb_cache_lru_list . next ,
struct mb_cache_entry , e_lru_list ) ;
list_del_init ( & ce - > e_lru_list ) ;
__mb_cache_entry_unhash ( ce ) ;
}
spin_unlock ( & mb_cache_spinlock ) ;
}
if ( ! ce ) {
ce = kmem_cache_alloc ( cache - > c_entry_cache , gfp_flags ) ;
if ( ! ce )
return NULL ;
2007-10-25 19:03:28 +04:00
atomic_inc ( & cache - > c_entry_count ) ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & ce - > e_lru_list ) ;
INIT_LIST_HEAD ( & ce - > e_block_list ) ;
ce - > e_cache = cache ;
ce - > e_queued = 0 ;
}
2010-08-16 21:05:23 +04:00
ce - > e_used = 1 + MB_CACHE_WRITER ;
2005-04-17 02:20:36 +04:00
return ce ;
}
/*
* mb_cache_entry_insert ( )
*
* Inserts an entry that was allocated using mb_cache_entry_alloc ( ) into
* the cache . After this , the cache entry can be looked up , but is not yet
* in the lru list as the caller still holds a handle to it . Returns 0 on
* success , or - EBUSY if a cache entry for that device + inode exists
* already ( this may happen after a failed lookup , but when another process
* has inserted the same cache entry in the meantime ) .
*
* @ bdev : device the cache entry belongs to
* @ block : block number
2010-07-19 20:19:41 +04:00
* @ key : lookup key
2005-04-17 02:20:36 +04:00
*/
int
mb_cache_entry_insert ( struct mb_cache_entry * ce , struct block_device * bdev ,
2010-07-19 20:19:41 +04:00
sector_t block , unsigned int key )
2005-04-17 02:20:36 +04:00
{
struct mb_cache * cache = ce - > e_cache ;
unsigned int bucket ;
struct list_head * l ;
2010-07-19 20:19:41 +04:00
int error = - EBUSY ;
2005-04-17 02:20:36 +04:00
bucket = hash_long ( ( unsigned long ) bdev + ( block & 0xffffffff ) ,
cache - > c_bucket_bits ) ;
spin_lock ( & mb_cache_spinlock ) ;
list_for_each_prev ( l , & cache - > c_block_hash [ bucket ] ) {
struct mb_cache_entry * ce =
list_entry ( l , struct mb_cache_entry , e_block_list ) ;
if ( ce - > e_bdev = = bdev & & ce - > e_block = = block )
goto out ;
}
__mb_cache_entry_unhash ( ce ) ;
ce - > e_bdev = bdev ;
ce - > e_block = block ;
list_add ( & ce - > e_block_list , & cache - > c_block_hash [ bucket ] ) ;
2010-07-19 20:19:41 +04:00
ce - > e_index . o_key = key ;
bucket = hash_long ( key , cache - > c_bucket_bits ) ;
list_add ( & ce - > e_index . o_list , & cache - > c_index_hash [ bucket ] ) ;
2005-04-17 02:20:36 +04:00
error = 0 ;
out :
spin_unlock ( & mb_cache_spinlock ) ;
return error ;
}
/*
* mb_cache_entry_release ( )
*
* Release a handle to a cache entry . When the last handle to a cache entry
* is released it is either freed ( if it is invalid ) or otherwise inserted
* in to the lru list .
*/
void
mb_cache_entry_release ( struct mb_cache_entry * ce )
{
spin_lock ( & mb_cache_spinlock ) ;
__mb_cache_entry_release_unlock ( ce ) ;
}
/*
* mb_cache_entry_free ( )
*
* This is equivalent to the sequence mb_cache_entry_takeout ( ) - -
* mb_cache_entry_release ( ) .
*/
void
mb_cache_entry_free ( struct mb_cache_entry * ce )
{
spin_lock ( & mb_cache_spinlock ) ;
mb_assert ( list_empty ( & ce - > e_lru_list ) ) ;
__mb_cache_entry_unhash ( ce ) ;
__mb_cache_entry_release_unlock ( ce ) ;
}
/*
* mb_cache_entry_get ( )
*
* Get a cache entry by device / block number . ( There can only be one entry
* in the cache per device and block . ) Returns NULL if no such cache entry
* exists . The returned cache entry is locked for exclusive access ( " single
* writer " ).
*/
struct mb_cache_entry *
mb_cache_entry_get ( struct mb_cache * cache , struct block_device * bdev ,
sector_t block )
{
unsigned int bucket ;
struct list_head * l ;
struct mb_cache_entry * ce ;
bucket = hash_long ( ( unsigned long ) bdev + ( block & 0xffffffff ) ,
cache - > c_bucket_bits ) ;
spin_lock ( & mb_cache_spinlock ) ;
list_for_each ( l , & cache - > c_block_hash [ bucket ] ) {
ce = list_entry ( l , struct mb_cache_entry , e_block_list ) ;
if ( ce - > e_bdev = = bdev & & ce - > e_block = = block ) {
DEFINE_WAIT ( wait ) ;
if ( ! list_empty ( & ce - > e_lru_list ) )
list_del_init ( & ce - > e_lru_list ) ;
while ( ce - > e_used > 0 ) {
ce - > e_queued + + ;
prepare_to_wait ( & mb_cache_queue , & wait ,
TASK_UNINTERRUPTIBLE ) ;
spin_unlock ( & mb_cache_spinlock ) ;
schedule ( ) ;
spin_lock ( & mb_cache_spinlock ) ;
ce - > e_queued - - ;
}
finish_wait ( & mb_cache_queue , & wait ) ;
ce - > e_used + = 1 + MB_CACHE_WRITER ;
if ( ! __mb_cache_entry_is_hashed ( ce ) ) {
__mb_cache_entry_release_unlock ( ce ) ;
return NULL ;
}
goto cleanup ;
}
}
ce = NULL ;
cleanup :
spin_unlock ( & mb_cache_spinlock ) ;
return ce ;
}
# if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
static struct mb_cache_entry *
__mb_cache_entry_find ( struct list_head * l , struct list_head * head ,
2010-07-19 20:19:41 +04:00
struct block_device * bdev , unsigned int key )
2005-04-17 02:20:36 +04:00
{
while ( l ! = head ) {
struct mb_cache_entry * ce =
2010-07-19 20:19:41 +04:00
list_entry ( l , struct mb_cache_entry , e_index . o_list ) ;
if ( ce - > e_bdev = = bdev & & ce - > e_index . o_key = = key ) {
2005-04-17 02:20:36 +04:00
DEFINE_WAIT ( wait ) ;
if ( ! list_empty ( & ce - > e_lru_list ) )
list_del_init ( & ce - > e_lru_list ) ;
/* Incrementing before holding the lock gives readers
priority over writers . */
ce - > e_used + + ;
while ( ce - > e_used > = MB_CACHE_WRITER ) {
ce - > e_queued + + ;
prepare_to_wait ( & mb_cache_queue , & wait ,
TASK_UNINTERRUPTIBLE ) ;
spin_unlock ( & mb_cache_spinlock ) ;
schedule ( ) ;
spin_lock ( & mb_cache_spinlock ) ;
ce - > e_queued - - ;
}
finish_wait ( & mb_cache_queue , & wait ) ;
if ( ! __mb_cache_entry_is_hashed ( ce ) ) {
__mb_cache_entry_release_unlock ( ce ) ;
spin_lock ( & mb_cache_spinlock ) ;
return ERR_PTR ( - EAGAIN ) ;
}
return ce ;
}
l = l - > next ;
}
return NULL ;
}
/*
* mb_cache_entry_find_first ( )
*
* Find the first cache entry on a given device with a certain key in
* an additional index . Additonal matches can be found with
* mb_cache_entry_find_next ( ) . Returns NULL if no match was found . The
* returned cache entry is locked for shared access ( " multiple readers " ) .
*
* @ cache : the cache to search
* @ bdev : the device the cache entry should belong to
* @ key : the key in the index
*/
struct mb_cache_entry *
2010-07-19 20:19:41 +04:00
mb_cache_entry_find_first ( struct mb_cache * cache , struct block_device * bdev ,
unsigned int key )
2005-04-17 02:20:36 +04:00
{
unsigned int bucket = hash_long ( key , cache - > c_bucket_bits ) ;
struct list_head * l ;
struct mb_cache_entry * ce ;
spin_lock ( & mb_cache_spinlock ) ;
2010-07-19 20:19:41 +04:00
l = cache - > c_index_hash [ bucket ] . next ;
ce = __mb_cache_entry_find ( l , & cache - > c_index_hash [ bucket ] , bdev , key ) ;
2005-04-17 02:20:36 +04:00
spin_unlock ( & mb_cache_spinlock ) ;
return ce ;
}
/*
* mb_cache_entry_find_next ( )
*
* Find the next cache entry on a given device with a certain key in an
* additional index . Returns NULL if no match could be found . The previous
* entry is atomatically released , so that mb_cache_entry_find_next ( ) can
* be called like this :
*
* entry = mb_cache_entry_find_first ( ) ;
* while ( entry ) {
* . . .
* entry = mb_cache_entry_find_next ( entry , . . . ) ;
* }
*
* @ prev : The previous match
* @ bdev : the device the cache entry should belong to
* @ key : the key in the index
*/
struct mb_cache_entry *
2010-07-19 20:19:41 +04:00
mb_cache_entry_find_next ( struct mb_cache_entry * prev ,
2005-04-17 02:20:36 +04:00
struct block_device * bdev , unsigned int key )
{
struct mb_cache * cache = prev - > e_cache ;
unsigned int bucket = hash_long ( key , cache - > c_bucket_bits ) ;
struct list_head * l ;
struct mb_cache_entry * ce ;
spin_lock ( & mb_cache_spinlock ) ;
2010-07-19 20:19:41 +04:00
l = prev - > e_index . o_list . next ;
ce = __mb_cache_entry_find ( l , & cache - > c_index_hash [ bucket ] , bdev , key ) ;
2005-04-17 02:20:36 +04:00
__mb_cache_entry_release_unlock ( prev ) ;
return ce ;
}
# endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
static int __init init_mbcache ( void )
{
2007-07-17 15:03:17 +04:00
register_shrinker ( & mb_cache_shrinker ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
static void __exit exit_mbcache ( void )
{
2007-07-17 15:03:17 +04:00
unregister_shrinker ( & mb_cache_shrinker ) ;
2005-04-17 02:20:36 +04:00
}
module_init ( init_mbcache )
module_exit ( exit_mbcache )