2006-01-16 19:50:04 +03:00
/*
* Copyright ( C ) Sistina Software , Inc . 1997 - 2003 All rights reserved .
2006-05-18 23:09:15 +04:00
* Copyright ( C ) 2004 - 2006 Red Hat , Inc . All rights reserved .
2006-01-16 19:50:04 +03:00
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2006-09-01 19:05:15 +04:00
* of the GNU General Public License version 2.
2006-01-16 19:50:04 +03:00
*/
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/completion.h>
# include <linux/buffer_head.h>
# include <linux/delay.h>
# include <linux/sort.h>
# include <linux/jhash.h>
2006-03-29 23:36:49 +04:00
# include <linux/kallsyms.h>
2006-02-28 01:23:27 +03:00
# include <linux/gfs2_ondisk.h>
2006-09-12 05:40:30 +04:00
# include <linux/list.h>
2006-09-19 09:56:29 +04:00
# include <linux/lm_interface.h>
2007-01-17 18:33:23 +03:00
# include <linux/wait.h>
2007-03-06 10:10:39 +03:00
# include <linux/module.h>
2007-01-29 14:51:45 +03:00
# include <linux/rwsem.h>
2006-01-16 19:50:04 +03:00
# include <asm/uaccess.h>
2007-03-16 13:26:37 +03:00
# include <linux/seq_file.h>
# include <linux/debugfs.h>
2007-03-24 01:05:15 +03:00
# include <linux/module.h>
# include <linux/kallsyms.h>
2006-01-16 19:50:04 +03:00
# include "gfs2.h"
2006-02-28 01:23:27 +03:00
# include "incore.h"
2006-01-16 19:50:04 +03:00
# include "glock.h"
# include "glops.h"
# include "inode.h"
# include "lm.h"
# include "lops.h"
# include "meta_io.h"
# include "quota.h"
# include "super.h"
2006-02-28 01:23:27 +03:00
# include "util.h"
2006-01-16 19:50:04 +03:00
2006-09-08 21:35:56 +04:00
struct gfs2_gl_hash_bucket {
2006-09-12 18:10:01 +04:00
struct hlist_head hb_list ;
2006-09-08 21:35:56 +04:00
} ;
2007-03-16 13:26:37 +03:00
struct glock_iter {
int hash ; /* hash bucket index */
struct gfs2_sbd * sdp ; /* incore superblock */
struct gfs2_glock * gl ; /* current glock struct */
struct hlist_head * hb_list ; /* current hash bucket ptr */
struct seq_file * seq ; /* sequence file for debugfs */
char string [ 512 ] ; /* scratch space */
} ;
2006-01-16 19:50:04 +03:00
typedef void ( * glock_examiner ) ( struct gfs2_glock * gl ) ;
2006-04-28 18:59:12 +04:00
static int gfs2_dump_lockstate ( struct gfs2_sbd * sdp ) ;
2007-03-24 01:05:15 +03:00
static int dump_glock ( struct glock_iter * gi , struct gfs2_glock * gl ) ;
2007-03-16 12:40:31 +03:00
static void gfs2_glock_xmote_th ( struct gfs2_glock * gl , struct gfs2_holder * gh ) ;
2007-01-22 20:15:34 +03:00
static void gfs2_glock_drop_th ( struct gfs2_glock * gl ) ;
2007-01-29 14:51:45 +03:00
static DECLARE_RWSEM ( gfs2_umount_flush_sem ) ;
2007-03-16 13:26:37 +03:00
static struct dentry * gfs2_root ;
2006-04-28 18:59:12 +04:00
2006-09-12 18:10:01 +04:00
# define GFS2_GL_HASH_SHIFT 15
2006-09-10 00:59:11 +04:00
# define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
# define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
2006-09-07 22:40:21 +04:00
static struct gfs2_gl_hash_bucket gl_hash_table [ GFS2_GL_HASH_SIZE ] ;
2007-03-24 01:05:15 +03:00
static struct dentry * gfs2_root ;
2006-09-10 00:59:11 +04:00
/*
* Despite what you might think , the numbers below are not arbitrary : - )
* They are taken from the ipv4 routing hash code , which is well tested
* and thus should be nearly optimal . Later on we might tweek the numbers
* but for now this should be fine .
*
* The reason for putting the locks in a separate array from the list heads
* is that we can have fewer locks than list heads and save memory . We use
* the same hash function for both , but with a different hash mask .
*/
# if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
defined ( CONFIG_PROVE_LOCKING )
# ifdef CONFIG_LOCKDEP
# define GL_HASH_LOCK_SZ 256
# else
# if NR_CPUS >= 32
# define GL_HASH_LOCK_SZ 4096
# elif NR_CPUS >= 16
# define GL_HASH_LOCK_SZ 2048
# elif NR_CPUS >= 8
# define GL_HASH_LOCK_SZ 1024
# elif NR_CPUS >= 4
# define GL_HASH_LOCK_SZ 512
# else
# define GL_HASH_LOCK_SZ 256
# endif
# endif
/* We never want more locks than chains */
# if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
# undef GL_HASH_LOCK_SZ
# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
# endif
static rwlock_t gl_hash_locks [ GL_HASH_LOCK_SZ ] ;
static inline rwlock_t * gl_lock_addr ( unsigned int x )
{
2006-09-10 02:59:27 +04:00
return & gl_hash_locks [ x & ( GL_HASH_LOCK_SZ - 1 ) ] ;
2006-09-10 00:59:11 +04:00
}
# else /* not SMP, so no spinlocks required */
2006-11-29 09:29:19 +03:00
static inline rwlock_t * gl_lock_addr ( unsigned int x )
2006-09-10 00:59:11 +04:00
{
return NULL ;
}
# endif
2006-09-07 22:40:21 +04:00
2006-01-16 19:50:04 +03:00
/**
* relaxed_state_ok - is a requested lock compatible with the current lock mode ?
* @ actual : the current state of the lock
* @ requested : the lock state that was requested by the caller
* @ flags : the modifier flags passed in by the caller
*
* Returns : 1 if the locks are compatible , 0 otherwise
*/
static inline int relaxed_state_ok ( unsigned int actual , unsigned requested ,
int flags )
{
if ( actual = = requested )
return 1 ;
if ( flags & GL_EXACT )
return 0 ;
if ( actual = = LM_ST_EXCLUSIVE & & requested = = LM_ST_SHARED )
return 1 ;
if ( actual ! = LM_ST_UNLOCKED & & ( flags & LM_FLAG_ANY ) )
return 1 ;
return 0 ;
}
/**
* gl_hash ( ) - Turn glock number into hash bucket number
* @ lock : The glock number
*
* Returns : The number of the corresponding hash bucket
*/
2006-09-07 21:12:27 +04:00
static unsigned int gl_hash ( const struct gfs2_sbd * sdp ,
const struct lm_lockname * name )
2006-01-16 19:50:04 +03:00
{
unsigned int h ;
2006-09-04 20:49:07 +04:00
h = jhash ( & name - > ln_number , sizeof ( u64 ) , 0 ) ;
2006-01-16 19:50:04 +03:00
h = jhash ( & name - > ln_type , sizeof ( unsigned int ) , h ) ;
2006-09-07 21:12:27 +04:00
h = jhash ( & sdp , sizeof ( struct gfs2_sbd * ) , h ) ;
2006-01-16 19:50:04 +03:00
h & = GFS2_GL_HASH_MASK ;
return h ;
}
/**
* glock_free ( ) - Perform a few checks and then release struct gfs2_glock
* @ gl : The glock to release
*
* Also calls lock module to release its internal structure for this glock .
*
*/
static void glock_free ( struct gfs2_glock * gl )
{
struct gfs2_sbd * sdp = gl - > gl_sbd ;
struct inode * aspace = gl - > gl_aspace ;
gfs2_lm_put_lock ( sdp , gl - > gl_lock ) ;
if ( aspace )
gfs2_aspace_put ( aspace ) ;
kmem_cache_free ( gfs2_glock_cachep , gl ) ;
}
/**
* gfs2_glock_hold ( ) - increment reference count on glock
* @ gl : The glock to hold
*
*/
void gfs2_glock_hold ( struct gfs2_glock * gl )
{
2006-09-13 18:43:37 +04:00
atomic_inc ( & gl - > gl_ref ) ;
2006-01-16 19:50:04 +03:00
}
/**
* gfs2_glock_put ( ) - Decrement reference count on glock
* @ gl : The glock to put
*
*/
int gfs2_glock_put ( struct gfs2_glock * gl )
{
int rv = 0 ;
2006-09-13 18:43:37 +04:00
struct gfs2_sbd * sdp = gl - > gl_sbd ;
2006-01-16 19:50:04 +03:00
2006-09-10 00:59:11 +04:00
write_lock ( gl_lock_addr ( gl - > gl_hash ) ) ;
2006-09-13 18:43:37 +04:00
if ( atomic_dec_and_test ( & gl - > gl_ref ) ) {
2006-09-12 18:10:01 +04:00
hlist_del ( & gl - > gl_list ) ;
2006-09-10 00:59:11 +04:00
write_unlock ( gl_lock_addr ( gl - > gl_hash ) ) ;
2006-04-21 00:57:23 +04:00
BUG_ON ( spin_is_locked ( & gl - > gl_spin ) ) ;
2006-09-13 18:43:37 +04:00
gfs2_assert ( sdp , gl - > gl_state = = LM_ST_UNLOCKED ) ;
gfs2_assert ( sdp , list_empty ( & gl - > gl_reclaim ) ) ;
gfs2_assert ( sdp , list_empty ( & gl - > gl_holders ) ) ;
gfs2_assert ( sdp , list_empty ( & gl - > gl_waiters1 ) ) ;
gfs2_assert ( sdp , list_empty ( & gl - > gl_waiters3 ) ) ;
2006-01-16 19:50:04 +03:00
glock_free ( gl ) ;
rv = 1 ;
goto out ;
}
2006-09-10 00:59:11 +04:00
write_unlock ( gl_lock_addr ( gl - > gl_hash ) ) ;
2006-08-25 01:03:05 +04:00
out :
2006-01-16 19:50:04 +03:00
return rv ;
}
/**
* search_bucket ( ) - Find struct gfs2_glock by lock number
* @ bucket : the bucket to search
* @ name : The lock name
*
* Returns : NULL , or the struct gfs2_glock with the requested number
*/
2006-09-08 21:35:56 +04:00
static struct gfs2_glock * search_bucket ( unsigned int hash ,
2006-08-30 20:50:28 +04:00
const struct gfs2_sbd * sdp ,
2006-08-30 19:16:23 +04:00
const struct lm_lockname * name )
2006-01-16 19:50:04 +03:00
{
struct gfs2_glock * gl ;
2006-09-12 18:10:01 +04:00
struct hlist_node * h ;
2006-01-16 19:50:04 +03:00
2006-09-12 18:10:01 +04:00
hlist_for_each_entry ( gl , h , & gl_hash_table [ hash ] . hb_list , gl_list ) {
2006-01-16 19:50:04 +03:00
if ( ! lm_name_equal ( & gl - > gl_name , name ) )
continue ;
2006-08-30 20:50:28 +04:00
if ( gl - > gl_sbd ! = sdp )
continue ;
2006-01-16 19:50:04 +03:00
2006-09-13 18:43:37 +04:00
atomic_inc ( & gl - > gl_ref ) ;
2006-01-16 19:50:04 +03:00
return gl ;
}
return NULL ;
}
/**
* gfs2_glock_find ( ) - Find glock by lock number
* @ sdp : The GFS2 superblock
* @ name : The lock name
*
* Returns : NULL , or the struct gfs2_glock with the requested number
*/
2006-09-07 22:40:21 +04:00
static struct gfs2_glock * gfs2_glock_find ( const struct gfs2_sbd * sdp ,
2006-08-30 19:16:23 +04:00
const struct lm_lockname * name )
2006-01-16 19:50:04 +03:00
{
2006-09-08 21:35:56 +04:00
unsigned int hash = gl_hash ( sdp , name ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_glock * gl ;
2006-09-10 00:59:11 +04:00
read_lock ( gl_lock_addr ( hash ) ) ;
2006-09-08 21:35:56 +04:00
gl = search_bucket ( hash , sdp , name ) ;
2006-09-10 00:59:11 +04:00
read_unlock ( gl_lock_addr ( hash ) ) ;
2006-01-16 19:50:04 +03:00
return gl ;
}
/**
* gfs2_glock_get ( ) - Get a glock , or create one if one doesn ' t exist
* @ sdp : The GFS2 superblock
* @ number : the lock number
* @ glops : The glock_operations to use
* @ create : If 0 , don ' t create the glock if it doesn ' t exist
* @ glp : the glock is returned here
*
* This does not lock a glock , just finds / creates structures for one .
*
* Returns : errno
*/
2006-09-04 20:49:07 +04:00
int gfs2_glock_get ( struct gfs2_sbd * sdp , u64 number ,
2006-08-30 17:30:00 +04:00
const struct gfs2_glock_operations * glops , int create ,
2006-01-16 19:50:04 +03:00
struct gfs2_glock * * glp )
{
2006-09-08 21:35:56 +04:00
struct lm_lockname name = { . ln_number = number , . ln_type = glops - > go_type } ;
2006-01-16 19:50:04 +03:00
struct gfs2_glock * gl , * tmp ;
2006-09-08 21:35:56 +04:00
unsigned int hash = gl_hash ( sdp , & name ) ;
2006-01-16 19:50:04 +03:00
int error ;
2006-09-10 00:59:11 +04:00
read_lock ( gl_lock_addr ( hash ) ) ;
2006-09-08 21:35:56 +04:00
gl = search_bucket ( hash , sdp , & name ) ;
2006-09-10 00:59:11 +04:00
read_unlock ( gl_lock_addr ( hash ) ) ;
2006-01-16 19:50:04 +03:00
if ( gl | | ! create ) {
* glp = gl ;
return 0 ;
}
gl = kmem_cache_alloc ( gfs2_glock_cachep , GFP_KERNEL ) ;
if ( ! gl )
return - ENOMEM ;
2006-08-30 18:36:52 +04:00
gl - > gl_flags = 0 ;
2006-01-16 19:50:04 +03:00
gl - > gl_name = name ;
2006-09-13 18:43:37 +04:00
atomic_set ( & gl - > gl_ref , 1 ) ;
2006-01-16 19:50:04 +03:00
gl - > gl_state = LM_ST_UNLOCKED ;
2006-09-08 21:35:56 +04:00
gl - > gl_hash = hash ;
2007-03-24 01:05:15 +03:00
gl - > gl_owner_pid = 0 ;
2006-05-19 00:25:27 +04:00
gl - > gl_ip = 0 ;
2006-01-16 19:50:04 +03:00
gl - > gl_ops = glops ;
2006-08-30 18:36:52 +04:00
gl - > gl_req_gh = NULL ;
gl - > gl_req_bh = NULL ;
gl - > gl_vn = 0 ;
gl - > gl_stamp = jiffies ;
gl - > gl_object = NULL ;
2006-01-16 19:50:04 +03:00
gl - > gl_sbd = sdp ;
2006-08-30 18:36:52 +04:00
gl - > gl_aspace = NULL ;
2006-01-16 19:50:04 +03:00
lops_init_le ( & gl - > gl_le , & gfs2_glock_lops ) ;
/* If this glock protects actual on-disk data or metadata blocks,
create a VFS inode to manage the pages / buffers holding them . */
2006-09-04 17:49:55 +04:00
if ( glops = = & gfs2_inode_glops | | glops = = & gfs2_rgrp_glops ) {
2006-01-16 19:50:04 +03:00
gl - > gl_aspace = gfs2_aspace_get ( sdp ) ;
if ( ! gl - > gl_aspace ) {
error = - ENOMEM ;
goto fail ;
}
}
error = gfs2_lm_get_lock ( sdp , & name , & gl - > gl_lock ) ;
if ( error )
goto fail_aspace ;
2006-09-10 00:59:11 +04:00
write_lock ( gl_lock_addr ( hash ) ) ;
2006-09-08 21:35:56 +04:00
tmp = search_bucket ( hash , sdp , & name ) ;
2006-01-16 19:50:04 +03:00
if ( tmp ) {
2006-09-10 00:59:11 +04:00
write_unlock ( gl_lock_addr ( hash ) ) ;
2006-01-16 19:50:04 +03:00
glock_free ( gl ) ;
gl = tmp ;
} else {
2006-09-12 18:10:01 +04:00
hlist_add_head ( & gl - > gl_list , & gl_hash_table [ hash ] . hb_list ) ;
2006-09-10 00:59:11 +04:00
write_unlock ( gl_lock_addr ( hash ) ) ;
2006-01-16 19:50:04 +03:00
}
* glp = gl ;
return 0 ;
2006-08-30 18:36:52 +04:00
fail_aspace :
2006-01-16 19:50:04 +03:00
if ( gl - > gl_aspace )
gfs2_aspace_put ( gl - > gl_aspace ) ;
2006-08-30 18:36:52 +04:00
fail :
2006-09-25 17:26:04 +04:00
kmem_cache_free ( gfs2_glock_cachep , gl ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
/**
* gfs2_holder_init - initialize a struct gfs2_holder in the default way
* @ gl : the glock
* @ state : the state we ' re requesting
* @ flags : the modifier flags
* @ gh : the holder structure
*
*/
2006-04-21 00:57:23 +04:00
void gfs2_holder_init ( struct gfs2_glock * gl , unsigned int state , unsigned flags ,
2006-01-16 19:50:04 +03:00
struct gfs2_holder * gh )
{
INIT_LIST_HEAD ( & gh - > gh_list ) ;
gh - > gh_gl = gl ;
2006-03-29 23:36:49 +04:00
gh - > gh_ip = ( unsigned long ) __builtin_return_address ( 0 ) ;
2007-03-24 01:05:15 +03:00
gh - > gh_owner_pid = current - > pid ;
2006-01-16 19:50:04 +03:00
gh - > gh_state = state ;
gh - > gh_flags = flags ;
gh - > gh_error = 0 ;
gh - > gh_iflags = 0 ;
gfs2_glock_hold ( gl ) ;
}
/**
* gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
* @ state : the state we ' re requesting
* @ flags : the modifier flags
* @ gh : the holder structure
*
* Don ' t mess with the glock .
*
*/
2006-04-21 00:57:23 +04:00
void gfs2_holder_reinit ( unsigned int state , unsigned flags , struct gfs2_holder * gh )
2006-01-16 19:50:04 +03:00
{
gh - > gh_state = state ;
2006-04-26 22:58:26 +04:00
gh - > gh_flags = flags ;
2007-03-16 12:40:31 +03:00
gh - > gh_iflags = 0 ;
2006-03-29 23:36:49 +04:00
gh - > gh_ip = ( unsigned long ) __builtin_return_address ( 0 ) ;
2006-01-16 19:50:04 +03:00
}
/**
* gfs2_holder_uninit - uninitialize a holder structure ( drop glock reference )
* @ gh : the holder structure
*
*/
void gfs2_holder_uninit ( struct gfs2_holder * gh )
{
gfs2_glock_put ( gh - > gh_gl ) ;
gh - > gh_gl = NULL ;
2006-03-29 23:36:49 +04:00
gh - > gh_ip = 0 ;
2006-01-16 19:50:04 +03:00
}
2007-03-16 12:40:31 +03:00
static void gfs2_holder_wake ( struct gfs2_holder * gh )
2007-01-17 18:33:23 +03:00
{
clear_bit ( HIF_WAIT , & gh - > gh_iflags ) ;
smp_mb ( ) ;
wake_up_bit ( & gh - > gh_iflags , HIF_WAIT ) ;
}
static int holder_wait ( void * word )
{
schedule ( ) ;
return 0 ;
}
static void wait_on_holder ( struct gfs2_holder * gh )
{
might_sleep ( ) ;
wait_on_bit ( & gh - > gh_iflags , HIF_WAIT , holder_wait , TASK_UNINTERRUPTIBLE ) ;
}
2006-01-16 19:50:04 +03:00
/**
* rq_mutex - process a mutex request in the queue
* @ gh : the glock holder
*
* Returns : 1 if the queue is blocked
*/
static int rq_mutex ( struct gfs2_holder * gh )
{
struct gfs2_glock * gl = gh - > gh_gl ;
list_del_init ( & gh - > gh_list ) ;
/* gh->gh_error never examined. */
set_bit ( GLF_LOCK , & gl - > gl_flags ) ;
2007-01-24 00:56:36 +03:00
clear_bit ( HIF_WAIT , & gh - > gh_iflags ) ;
2007-01-17 18:33:23 +03:00
smp_mb ( ) ;
wake_up_bit ( & gh - > gh_iflags , HIF_WAIT ) ;
2006-01-16 19:50:04 +03:00
return 1 ;
}
/**
* rq_promote - process a promote request in the queue
* @ gh : the glock holder
*
* Acquire a new inter - node lock , or change a lock state to more restrictive .
*
* Returns : 1 if the queue is blocked
*/
static int rq_promote ( struct gfs2_holder * gh )
{
struct gfs2_glock * gl = gh - > gh_gl ;
struct gfs2_sbd * sdp = gl - > gl_sbd ;
if ( ! relaxed_state_ok ( gl - > gl_state , gh - > gh_state , gh - > gh_flags ) ) {
if ( list_empty ( & gl - > gl_holders ) ) {
gl - > gl_req_gh = gh ;
set_bit ( GLF_LOCK , & gl - > gl_flags ) ;
spin_unlock ( & gl - > gl_spin ) ;
if ( atomic_read ( & sdp - > sd_reclaim_count ) >
gfs2_tune_get ( sdp , gt_reclaim_limit ) & &
! ( gh - > gh_flags & LM_FLAG_PRIORITY ) ) {
gfs2_reclaim_glock ( sdp ) ;
gfs2_reclaim_glock ( sdp ) ;
}
2007-03-16 12:40:31 +03:00
gfs2_glock_xmote_th ( gh - > gh_gl , gh ) ;
2006-01-16 19:50:04 +03:00
spin_lock ( & gl - > gl_spin ) ;
}
return 1 ;
}
if ( list_empty ( & gl - > gl_holders ) ) {
set_bit ( HIF_FIRST , & gh - > gh_iflags ) ;
set_bit ( GLF_LOCK , & gl - > gl_flags ) ;
} else {
struct gfs2_holder * next_gh ;
2007-01-22 20:10:39 +03:00
if ( gh - > gh_state = = LM_ST_EXCLUSIVE )
2006-01-16 19:50:04 +03:00
return 1 ;
next_gh = list_entry ( gl - > gl_holders . next , struct gfs2_holder ,
gh_list ) ;
2007-01-22 20:10:39 +03:00
if ( next_gh - > gh_state = = LM_ST_EXCLUSIVE )
2006-01-16 19:50:04 +03:00
return 1 ;
}
list_move_tail ( & gh - > gh_list , & gl - > gl_holders ) ;
gh - > gh_error = 0 ;
set_bit ( HIF_HOLDER , & gh - > gh_iflags ) ;
2007-03-16 12:40:31 +03:00
gfs2_holder_wake ( gh ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
}
/**
* rq_demote - process a demote request in the queue
* @ gh : the glock holder
*
* Returns : 1 if the queue is blocked
*/
2007-03-16 12:40:31 +03:00
static int rq_demote ( struct gfs2_glock * gl )
2006-01-16 19:50:04 +03:00
{
if ( ! list_empty ( & gl - > gl_holders ) )
return 1 ;
2007-03-16 12:40:31 +03:00
if ( gl - > gl_state = = gl - > gl_demote_state | |
gl - > gl_state = = LM_ST_UNLOCKED ) {
clear_bit ( GLF_DEMOTE , & gl - > gl_flags ) ;
return 0 ;
2006-01-16 19:50:04 +03:00
}
2007-03-16 12:40:31 +03:00
set_bit ( GLF_LOCK , & gl - > gl_flags ) ;
spin_unlock ( & gl - > gl_spin ) ;
if ( gl - > gl_demote_state = = LM_ST_UNLOCKED | |
gl - > gl_state ! = LM_ST_EXCLUSIVE )
gfs2_glock_drop_th ( gl ) ;
else
gfs2_glock_xmote_th ( gl , NULL ) ;
spin_lock ( & gl - > gl_spin ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
}
/**
* run_queue - process holder structures on a glock
* @ gl : the glock
*
*/
static void run_queue ( struct gfs2_glock * gl )
{
struct gfs2_holder * gh ;
int blocked = 1 ;
for ( ; ; ) {
if ( test_bit ( GLF_LOCK , & gl - > gl_flags ) )
break ;
if ( ! list_empty ( & gl - > gl_waiters1 ) ) {
gh = list_entry ( gl - > gl_waiters1 . next ,
struct gfs2_holder , gh_list ) ;
if ( test_bit ( HIF_MUTEX , & gh - > gh_iflags ) )
blocked = rq_mutex ( gh ) ;
else
gfs2_assert_warn ( gl - > gl_sbd , 0 ) ;
2007-03-16 12:40:31 +03:00
} else if ( test_bit ( GLF_DEMOTE , & gl - > gl_flags ) ) {
blocked = rq_demote ( gl ) ;
2006-01-16 19:50:04 +03:00
} else if ( ! list_empty ( & gl - > gl_waiters3 ) ) {
gh = list_entry ( gl - > gl_waiters3 . next ,
struct gfs2_holder , gh_list ) ;
if ( test_bit ( HIF_PROMOTE , & gh - > gh_iflags ) )
blocked = rq_promote ( gh ) ;
else
gfs2_assert_warn ( gl - > gl_sbd , 0 ) ;
} else
break ;
if ( blocked )
break ;
}
}
/**
* gfs2_glmutex_lock - acquire a local lock on a glock
* @ gl : the glock
*
* Gives caller exclusive access to manipulate a glock structure .
*/
2006-06-14 23:32:57 +04:00
static void gfs2_glmutex_lock ( struct gfs2_glock * gl )
2006-01-16 19:50:04 +03:00
{
struct gfs2_holder gh ;
gfs2_holder_init ( gl , 0 , 0 , & gh ) ;
set_bit ( HIF_MUTEX , & gh . gh_iflags ) ;
2007-01-17 18:33:23 +03:00
if ( test_and_set_bit ( HIF_WAIT , & gh . gh_iflags ) )
BUG ( ) ;
2006-01-16 19:50:04 +03:00
spin_lock ( & gl - > gl_spin ) ;
2006-09-07 22:40:21 +04:00
if ( test_and_set_bit ( GLF_LOCK , & gl - > gl_flags ) ) {
2006-01-16 19:50:04 +03:00
list_add_tail ( & gh . gh_list , & gl - > gl_waiters1 ) ;
2006-09-07 22:40:21 +04:00
} else {
2007-03-24 01:05:15 +03:00
gl - > gl_owner_pid = current - > pid ;
2006-05-19 00:25:27 +04:00
gl - > gl_ip = ( unsigned long ) __builtin_return_address ( 0 ) ;
2007-01-17 18:33:23 +03:00
clear_bit ( HIF_WAIT , & gh . gh_iflags ) ;
smp_mb ( ) ;
wake_up_bit ( & gh . gh_iflags , HIF_WAIT ) ;
2006-05-19 00:25:27 +04:00
}
2006-01-16 19:50:04 +03:00
spin_unlock ( & gl - > gl_spin ) ;
2007-01-17 18:33:23 +03:00
wait_on_holder ( & gh ) ;
2006-01-16 19:50:04 +03:00
gfs2_holder_uninit ( & gh ) ;
}
/**
* gfs2_glmutex_trylock - try to acquire a local lock on a glock
* @ gl : the glock
*
* Returns : 1 if the glock is acquired
*/
2006-04-28 18:59:12 +04:00
static int gfs2_glmutex_trylock ( struct gfs2_glock * gl )
2006-01-16 19:50:04 +03:00
{
int acquired = 1 ;
spin_lock ( & gl - > gl_spin ) ;
2006-09-07 22:40:21 +04:00
if ( test_and_set_bit ( GLF_LOCK , & gl - > gl_flags ) ) {
2006-01-16 19:50:04 +03:00
acquired = 0 ;
2006-09-07 22:40:21 +04:00
} else {
2007-03-24 01:05:15 +03:00
gl - > gl_owner_pid = current - > pid ;
2006-05-19 00:25:27 +04:00
gl - > gl_ip = ( unsigned long ) __builtin_return_address ( 0 ) ;
}
2006-01-16 19:50:04 +03:00
spin_unlock ( & gl - > gl_spin ) ;
return acquired ;
}
/**
* gfs2_glmutex_unlock - release a local lock on a glock
* @ gl : the glock
*
*/
2006-06-14 23:32:57 +04:00
static void gfs2_glmutex_unlock ( struct gfs2_glock * gl )
2006-01-16 19:50:04 +03:00
{
spin_lock ( & gl - > gl_spin ) ;
clear_bit ( GLF_LOCK , & gl - > gl_flags ) ;
2007-03-24 01:05:15 +03:00
gl - > gl_owner_pid = 0 ;
2006-05-19 00:25:27 +04:00
gl - > gl_ip = 0 ;
2006-01-16 19:50:04 +03:00
run_queue ( gl ) ;
2006-04-21 00:57:23 +04:00
BUG_ON ( ! spin_is_locked ( & gl - > gl_spin ) ) ;
2006-01-16 19:50:04 +03:00
spin_unlock ( & gl - > gl_spin ) ;
}
/**
2007-03-16 12:40:31 +03:00
* handle_callback - process a demote request
2006-01-16 19:50:04 +03:00
* @ gl : the glock
* @ state : the state the caller wants us to change to
*
2007-03-16 12:40:31 +03:00
* There are only two requests that we are going to see in actual
* practise : LM_ST_SHARED and LM_ST_UNLOCKED
2006-01-16 19:50:04 +03:00
*/
static void handle_callback ( struct gfs2_glock * gl , unsigned int state )
{
spin_lock ( & gl - > gl_spin ) ;
2007-03-16 12:40:31 +03:00
if ( test_and_set_bit ( GLF_DEMOTE , & gl - > gl_flags ) = = 0 ) {
gl - > gl_demote_state = state ;
gl - > gl_demote_time = jiffies ;
} else if ( gl - > gl_demote_state ! = LM_ST_UNLOCKED ) {
gl - > gl_demote_state = state ;
2006-01-16 19:50:04 +03:00
}
spin_unlock ( & gl - > gl_spin ) ;
}
/**
* state_change - record that the glock is now in a different state
* @ gl : the glock
* @ new_state the new state
*
*/
static void state_change ( struct gfs2_glock * gl , unsigned int new_state )
{
int held1 , held2 ;
held1 = ( gl - > gl_state ! = LM_ST_UNLOCKED ) ;
held2 = ( new_state ! = LM_ST_UNLOCKED ) ;
if ( held1 ! = held2 ) {
2006-02-23 13:11:47 +03:00
if ( held2 )
2006-01-16 19:50:04 +03:00
gfs2_glock_hold ( gl ) ;
2006-02-23 13:11:47 +03:00
else
2006-01-16 19:50:04 +03:00
gfs2_glock_put ( gl ) ;
}
gl - > gl_state = new_state ;
}
/**
* xmote_bh - Called after the lock module is done acquiring a lock
* @ gl : The glock in question
* @ ret : the int returned from the lock module
*
*/
static void xmote_bh ( struct gfs2_glock * gl , unsigned int ret )
{
struct gfs2_sbd * sdp = gl - > gl_sbd ;
2006-08-30 17:30:00 +04:00
const struct gfs2_glock_operations * glops = gl - > gl_ops ;
2006-01-16 19:50:04 +03:00
struct gfs2_holder * gh = gl - > gl_req_gh ;
int prev_state = gl - > gl_state ;
int op_done = 1 ;
gfs2_assert_warn ( sdp , test_bit ( GLF_LOCK , & gl - > gl_flags ) ) ;
2007-01-22 21:09:04 +03:00
gfs2_assert_warn ( sdp , list_empty ( & gl - > gl_holders ) ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! ( ret & LM_OUT_ASYNC ) ) ;
state_change ( gl , ret & LM_OUT_ST_MASK ) ;
if ( prev_state ! = LM_ST_UNLOCKED & & ! ( ret & LM_OUT_CACHEABLE ) ) {
if ( glops - > go_inval )
2006-11-20 18:37:45 +03:00
glops - > go_inval ( gl , DIO_METADATA ) ;
2006-01-16 19:50:04 +03:00
} else if ( gl - > gl_state = = LM_ST_DEFERRED ) {
/* We might not want to do this here.
Look at moving to the inode glops . */
if ( glops - > go_inval )
2006-11-20 18:37:45 +03:00
glops - > go_inval ( gl , 0 ) ;
2006-01-16 19:50:04 +03:00
}
/* Deal with each possible exit condition */
2007-03-16 12:40:31 +03:00
if ( ! gh ) {
2006-01-16 19:50:04 +03:00
gl - > gl_stamp = jiffies ;
2007-03-16 12:40:31 +03:00
if ( ret & LM_OUT_CANCELED )
op_done = 0 ;
else
clear_bit ( GLF_DEMOTE , & gl - > gl_flags ) ;
} else {
2006-01-16 19:50:04 +03:00
spin_lock ( & gl - > gl_spin ) ;
list_del_init ( & gh - > gh_list ) ;
gh - > gh_error = - EIO ;
2007-03-16 12:40:31 +03:00
if ( unlikely ( test_bit ( SDF_SHUTDOWN , & sdp - > sd_flags ) ) )
goto out ;
gh - > gh_error = GLR_CANCELED ;
if ( ret & LM_OUT_CANCELED )
goto out ;
if ( relaxed_state_ok ( gl - > gl_state , gh - > gh_state , gh - > gh_flags ) ) {
list_add_tail ( & gh - > gh_list , & gl - > gl_holders ) ;
2006-01-16 19:50:04 +03:00
gh - > gh_error = 0 ;
2007-03-16 12:40:31 +03:00
set_bit ( HIF_HOLDER , & gh - > gh_iflags ) ;
set_bit ( HIF_FIRST , & gh - > gh_iflags ) ;
op_done = 0 ;
goto out ;
2006-01-16 19:50:04 +03:00
}
gh - > gh_error = GLR_TRYFAILED ;
2007-03-16 12:40:31 +03:00
if ( gh - > gh_flags & ( LM_FLAG_TRY | LM_FLAG_TRY_1CB ) )
goto out ;
gh - > gh_error = - EINVAL ;
2006-01-16 19:50:04 +03:00
if ( gfs2_assert_withdraw ( sdp , 0 ) = = - 1 )
fs_err ( sdp , " ret = 0x%.8X \n " , ret ) ;
2007-03-16 12:40:31 +03:00
out :
spin_unlock ( & gl - > gl_spin ) ;
2006-01-16 19:50:04 +03:00
}
if ( glops - > go_xmote_bh )
glops - > go_xmote_bh ( gl ) ;
if ( op_done ) {
spin_lock ( & gl - > gl_spin ) ;
gl - > gl_req_gh = NULL ;
gl - > gl_req_bh = NULL ;
clear_bit ( GLF_LOCK , & gl - > gl_flags ) ;
run_queue ( gl ) ;
spin_unlock ( & gl - > gl_spin ) ;
}
gfs2_glock_put ( gl ) ;
2007-01-17 18:33:23 +03:00
if ( gh )
2007-03-16 12:40:31 +03:00
gfs2_holder_wake ( gh ) ;
2006-01-16 19:50:04 +03:00
}
/**
* gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
* @ gl : The glock in question
* @ state : the requested state
* @ flags : modifier flags to the lock call
*
*/
2007-03-16 12:40:31 +03:00
void gfs2_glock_xmote_th ( struct gfs2_glock * gl , struct gfs2_holder * gh )
2006-01-16 19:50:04 +03:00
{
struct gfs2_sbd * sdp = gl - > gl_sbd ;
2007-03-16 12:40:31 +03:00
int flags = gh ? gh - > gh_flags : 0 ;
unsigned state = gh ? gh - > gh_state : gl - > gl_demote_state ;
2006-08-30 17:30:00 +04:00
const struct gfs2_glock_operations * glops = gl - > gl_ops ;
2006-01-16 19:50:04 +03:00
int lck_flags = flags & ( LM_FLAG_TRY | LM_FLAG_TRY_1CB |
LM_FLAG_NOEXP | LM_FLAG_ANY |
LM_FLAG_PRIORITY ) ;
unsigned int lck_ret ;
2007-01-22 20:15:34 +03:00
if ( glops - > go_xmote_th )
glops - > go_xmote_th ( gl ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , test_bit ( GLF_LOCK , & gl - > gl_flags ) ) ;
2007-01-22 21:09:04 +03:00
gfs2_assert_warn ( sdp , list_empty ( & gl - > gl_holders ) ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , state ! = LM_ST_UNLOCKED ) ;
gfs2_assert_warn ( sdp , state ! = gl - > gl_state ) ;
gfs2_glock_hold ( gl ) ;
gl - > gl_req_bh = xmote_bh ;
2006-08-30 18:36:52 +04:00
lck_ret = gfs2_lm_lock ( sdp , gl - > gl_lock , gl - > gl_state , state , lck_flags ) ;
2006-01-16 19:50:04 +03:00
if ( gfs2_assert_withdraw ( sdp , ! ( lck_ret & LM_OUT_ERROR ) ) )
return ;
if ( lck_ret & LM_OUT_ASYNC )
gfs2_assert_warn ( sdp , lck_ret = = LM_OUT_ASYNC ) ;
else
xmote_bh ( gl , lck_ret ) ;
}
/**
* drop_bh - Called after a lock module unlock completes
* @ gl : the glock
* @ ret : the return status
*
* Doesn ' t wake up the process waiting on the struct gfs2_holder ( if any )
* Doesn ' t drop the reference on the glock the top half took out
*
*/
static void drop_bh ( struct gfs2_glock * gl , unsigned int ret )
{
struct gfs2_sbd * sdp = gl - > gl_sbd ;
2006-08-30 17:30:00 +04:00
const struct gfs2_glock_operations * glops = gl - > gl_ops ;
2006-01-16 19:50:04 +03:00
struct gfs2_holder * gh = gl - > gl_req_gh ;
gfs2_assert_warn ( sdp , test_bit ( GLF_LOCK , & gl - > gl_flags ) ) ;
2007-01-22 21:09:04 +03:00
gfs2_assert_warn ( sdp , list_empty ( & gl - > gl_holders ) ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! ret ) ;
state_change ( gl , LM_ST_UNLOCKED ) ;
2007-03-16 12:40:31 +03:00
clear_bit ( GLF_DEMOTE , & gl - > gl_flags ) ;
2006-01-16 19:50:04 +03:00
if ( glops - > go_inval )
2006-11-20 18:37:45 +03:00
glops - > go_inval ( gl , DIO_METADATA ) ;
2006-01-16 19:50:04 +03:00
if ( gh ) {
spin_lock ( & gl - > gl_spin ) ;
list_del_init ( & gh - > gh_list ) ;
gh - > gh_error = 0 ;
spin_unlock ( & gl - > gl_spin ) ;
}
spin_lock ( & gl - > gl_spin ) ;
gl - > gl_req_gh = NULL ;
gl - > gl_req_bh = NULL ;
clear_bit ( GLF_LOCK , & gl - > gl_flags ) ;
run_queue ( gl ) ;
spin_unlock ( & gl - > gl_spin ) ;
gfs2_glock_put ( gl ) ;
2007-01-17 18:33:23 +03:00
if ( gh )
2007-03-16 12:40:31 +03:00
gfs2_holder_wake ( gh ) ;
2006-01-16 19:50:04 +03:00
}
/**
* gfs2_glock_drop_th - call into the lock module to unlock a lock
* @ gl : the glock
*
*/
2007-01-22 20:15:34 +03:00
static void gfs2_glock_drop_th ( struct gfs2_glock * gl )
2006-01-16 19:50:04 +03:00
{
struct gfs2_sbd * sdp = gl - > gl_sbd ;
2006-08-30 17:30:00 +04:00
const struct gfs2_glock_operations * glops = gl - > gl_ops ;
2006-01-16 19:50:04 +03:00
unsigned int ret ;
2007-01-22 20:15:34 +03:00
if ( glops - > go_drop_th )
glops - > go_drop_th ( gl ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , test_bit ( GLF_LOCK , & gl - > gl_flags ) ) ;
2007-01-22 21:09:04 +03:00
gfs2_assert_warn ( sdp , list_empty ( & gl - > gl_holders ) ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , gl - > gl_state ! = LM_ST_UNLOCKED ) ;
gfs2_glock_hold ( gl ) ;
gl - > gl_req_bh = drop_bh ;
ret = gfs2_lm_unlock ( sdp , gl - > gl_lock , gl - > gl_state ) ;
if ( gfs2_assert_withdraw ( sdp , ! ( ret & LM_OUT_ERROR ) ) )
return ;
if ( ! ret )
drop_bh ( gl , ret ) ;
else
gfs2_assert_warn ( sdp , ret = = LM_OUT_ASYNC ) ;
}
/**
* do_cancels - cancel requests for locks stuck waiting on an expire flag
* @ gh : the LM_FLAG_PRIORITY holder waiting to acquire the lock
*
* Don ' t cancel GL_NOCANCEL requests .
*/
static void do_cancels ( struct gfs2_holder * gh )
{
struct gfs2_glock * gl = gh - > gh_gl ;
spin_lock ( & gl - > gl_spin ) ;
while ( gl - > gl_req_gh ! = gh & &
! test_bit ( HIF_HOLDER , & gh - > gh_iflags ) & &
! list_empty ( & gh - > gh_list ) ) {
2006-09-04 17:49:55 +04:00
if ( gl - > gl_req_bh & & ! ( gl - > gl_req_gh & &
( gl - > gl_req_gh - > gh_flags & GL_NOCANCEL ) ) ) {
2006-01-16 19:50:04 +03:00
spin_unlock ( & gl - > gl_spin ) ;
gfs2_lm_cancel ( gl - > gl_sbd , gl - > gl_lock ) ;
msleep ( 100 ) ;
spin_lock ( & gl - > gl_spin ) ;
} else {
spin_unlock ( & gl - > gl_spin ) ;
msleep ( 100 ) ;
spin_lock ( & gl - > gl_spin ) ;
}
}
spin_unlock ( & gl - > gl_spin ) ;
}
/**
* glock_wait_internal - wait on a glock acquisition
* @ gh : the glock holder
*
* Returns : 0 on success
*/
static int glock_wait_internal ( struct gfs2_holder * gh )
{
struct gfs2_glock * gl = gh - > gh_gl ;
struct gfs2_sbd * sdp = gl - > gl_sbd ;
2006-08-30 17:30:00 +04:00
const struct gfs2_glock_operations * glops = gl - > gl_ops ;
2006-01-16 19:50:04 +03:00
if ( test_bit ( HIF_ABORTED , & gh - > gh_iflags ) )
return - EIO ;
if ( gh - > gh_flags & ( LM_FLAG_TRY | LM_FLAG_TRY_1CB ) ) {
spin_lock ( & gl - > gl_spin ) ;
if ( gl - > gl_req_gh ! = gh & &
! test_bit ( HIF_HOLDER , & gh - > gh_iflags ) & &
! list_empty ( & gh - > gh_list ) ) {
list_del_init ( & gh - > gh_list ) ;
gh - > gh_error = GLR_TRYFAILED ;
run_queue ( gl ) ;
spin_unlock ( & gl - > gl_spin ) ;
return gh - > gh_error ;
}
spin_unlock ( & gl - > gl_spin ) ;
}
if ( gh - > gh_flags & LM_FLAG_PRIORITY )
do_cancels ( gh ) ;
2007-01-17 18:33:23 +03:00
wait_on_holder ( gh ) ;
2006-01-16 19:50:04 +03:00
if ( gh - > gh_error )
return gh - > gh_error ;
gfs2_assert_withdraw ( sdp , test_bit ( HIF_HOLDER , & gh - > gh_iflags ) ) ;
2006-09-07 22:40:21 +04:00
gfs2_assert_withdraw ( sdp , relaxed_state_ok ( gl - > gl_state , gh - > gh_state ,
2006-01-16 19:50:04 +03:00
gh - > gh_flags ) ) ;
if ( test_bit ( HIF_FIRST , & gh - > gh_iflags ) ) {
gfs2_assert_warn ( sdp , test_bit ( GLF_LOCK , & gl - > gl_flags ) ) ;
if ( glops - > go_lock ) {
gh - > gh_error = glops - > go_lock ( gh ) ;
if ( gh - > gh_error ) {
spin_lock ( & gl - > gl_spin ) ;
list_del_init ( & gh - > gh_list ) ;
spin_unlock ( & gl - > gl_spin ) ;
}
}
spin_lock ( & gl - > gl_spin ) ;
gl - > gl_req_gh = NULL ;
gl - > gl_req_bh = NULL ;
clear_bit ( GLF_LOCK , & gl - > gl_flags ) ;
run_queue ( gl ) ;
spin_unlock ( & gl - > gl_spin ) ;
}
return gh - > gh_error ;
}
static inline struct gfs2_holder *
2007-03-24 01:05:15 +03:00
find_holder_by_owner ( struct list_head * head , pid_t pid )
2006-01-16 19:50:04 +03:00
{
struct gfs2_holder * gh ;
list_for_each_entry ( gh , head , gh_list ) {
2007-03-24 01:05:15 +03:00
if ( gh - > gh_owner_pid = = pid )
2006-01-16 19:50:04 +03:00
return gh ;
}
return NULL ;
}
2007-03-16 13:26:37 +03:00
static void print_dbg ( struct glock_iter * gi , const char * fmt , . . . )
{
va_list args ;
va_start ( args , fmt ) ;
if ( gi ) {
vsprintf ( gi - > string , fmt , args ) ;
seq_printf ( gi - > seq , gi - > string ) ;
}
else
vprintk ( fmt , args ) ;
va_end ( args ) ;
}
2006-01-16 19:50:04 +03:00
/**
* add_to_queue - Add a holder to the wait queue ( but look for recursion )
* @ gh : the holder structure to add
*
*/
static void add_to_queue ( struct gfs2_holder * gh )
{
struct gfs2_glock * gl = gh - > gh_gl ;
struct gfs2_holder * existing ;
2007-03-24 01:05:15 +03:00
BUG_ON ( ! gh - > gh_owner_pid ) ;
2007-01-17 18:33:23 +03:00
if ( test_and_set_bit ( HIF_WAIT , & gh - > gh_iflags ) )
BUG ( ) ;
2006-04-21 00:57:23 +04:00
2007-03-24 01:05:15 +03:00
existing = find_holder_by_owner ( & gl - > gl_holders , gh - > gh_owner_pid ) ;
2006-01-16 19:50:04 +03:00
if ( existing ) {
2006-04-26 21:21:55 +04:00
print_symbol ( KERN_WARNING " original: %s \n " , existing - > gh_ip ) ;
2007-03-24 01:05:15 +03:00
printk ( KERN_INFO " pid : %d \n " , existing - > gh_owner_pid ) ;
2006-09-25 17:26:04 +04:00
printk ( KERN_INFO " lock type : %d lock state : %d \n " ,
2006-08-25 20:13:37 +04:00
existing - > gh_gl - > gl_name . ln_type , existing - > gh_gl - > gl_state ) ;
2006-04-26 21:21:55 +04:00
print_symbol ( KERN_WARNING " new: %s \n " , gh - > gh_ip ) ;
2007-03-24 01:05:15 +03:00
printk ( KERN_INFO " pid : %d \n " , gh - > gh_owner_pid ) ;
2006-09-25 17:26:04 +04:00
printk ( KERN_INFO " lock type : %d lock state : %d \n " ,
2006-08-25 20:13:37 +04:00
gl - > gl_name . ln_type , gl - > gl_state ) ;
2006-04-26 21:21:55 +04:00
BUG ( ) ;
2006-01-16 19:50:04 +03:00
}
2007-03-24 01:05:15 +03:00
existing = find_holder_by_owner ( & gl - > gl_waiters3 , gh - > gh_owner_pid ) ;
2006-01-16 19:50:04 +03:00
if ( existing ) {
2006-04-26 21:21:55 +04:00
print_symbol ( KERN_WARNING " original: %s \n " , existing - > gh_ip ) ;
print_symbol ( KERN_WARNING " new: %s \n " , gh - > gh_ip ) ;
BUG ( ) ;
2006-01-16 19:50:04 +03:00
}
if ( gh - > gh_flags & LM_FLAG_PRIORITY )
list_add ( & gh - > gh_list , & gl - > gl_waiters3 ) ;
else
2006-09-25 17:26:04 +04:00
list_add_tail ( & gh - > gh_list , & gl - > gl_waiters3 ) ;
2006-01-16 19:50:04 +03:00
}
/**
* gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock ( acquire a glock )
* @ gh : the holder structure
*
* if ( gh - > gh_flags & GL_ASYNC ) , this never returns an error
*
* Returns : 0 , GLR_TRYFAILED , or errno on failure
*/
int gfs2_glock_nq ( struct gfs2_holder * gh )
{
struct gfs2_glock * gl = gh - > gh_gl ;
struct gfs2_sbd * sdp = gl - > gl_sbd ;
int error = 0 ;
2006-05-19 00:25:27 +04:00
restart :
2006-01-16 19:50:04 +03:00
if ( unlikely ( test_bit ( SDF_SHUTDOWN , & sdp - > sd_flags ) ) ) {
set_bit ( HIF_ABORTED , & gh - > gh_iflags ) ;
return - EIO ;
}
set_bit ( HIF_PROMOTE , & gh - > gh_iflags ) ;
spin_lock ( & gl - > gl_spin ) ;
add_to_queue ( gh ) ;
run_queue ( gl ) ;
spin_unlock ( & gl - > gl_spin ) ;
if ( ! ( gh - > gh_flags & GL_ASYNC ) ) {
error = glock_wait_internal ( gh ) ;
if ( error = = GLR_CANCELED ) {
2006-04-21 00:57:23 +04:00
msleep ( 100 ) ;
2006-01-16 19:50:04 +03:00
goto restart ;
}
}
return error ;
}
/**
* gfs2_glock_poll - poll to see if an async request has been completed
* @ gh : the holder
*
* Returns : 1 if the request is ready to be gfs2_glock_wait ( ) ed on
*/
int gfs2_glock_poll ( struct gfs2_holder * gh )
{
struct gfs2_glock * gl = gh - > gh_gl ;
int ready = 0 ;
spin_lock ( & gl - > gl_spin ) ;
if ( test_bit ( HIF_HOLDER , & gh - > gh_iflags ) )
ready = 1 ;
else if ( list_empty ( & gh - > gh_list ) ) {
if ( gh - > gh_error = = GLR_CANCELED ) {
spin_unlock ( & gl - > gl_spin ) ;
2006-04-21 00:57:23 +04:00
msleep ( 100 ) ;
2006-01-16 19:50:04 +03:00
if ( gfs2_glock_nq ( gh ) )
return 1 ;
return 0 ;
} else
ready = 1 ;
}
spin_unlock ( & gl - > gl_spin ) ;
return ready ;
}
/**
* gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
* @ gh : the holder structure
*
* Returns : 0 , GLR_TRYFAILED , or errno on failure
*/
int gfs2_glock_wait ( struct gfs2_holder * gh )
{
int error ;
error = glock_wait_internal ( gh ) ;
if ( error = = GLR_CANCELED ) {
2006-04-21 00:57:23 +04:00
msleep ( 100 ) ;
2006-01-16 19:50:04 +03:00
gh - > gh_flags & = ~ GL_ASYNC ;
error = gfs2_glock_nq ( gh ) ;
}
return error ;
}
/**
* gfs2_glock_dq - dequeue a struct gfs2_holder from a glock ( release a glock )
* @ gh : the glock holder
*
*/
void gfs2_glock_dq ( struct gfs2_holder * gh )
{
struct gfs2_glock * gl = gh - > gh_gl ;
2006-08-30 17:30:00 +04:00
const struct gfs2_glock_operations * glops = gl - > gl_ops ;
2006-01-16 19:50:04 +03:00
if ( gh - > gh_flags & GL_NOCACHE )
handle_callback ( gl , LM_ST_UNLOCKED ) ;
gfs2_glmutex_lock ( gl ) ;
spin_lock ( & gl - > gl_spin ) ;
list_del_init ( & gh - > gh_list ) ;
if ( list_empty ( & gl - > gl_holders ) ) {
spin_unlock ( & gl - > gl_spin ) ;
if ( glops - > go_unlock )
glops - > go_unlock ( gh ) ;
spin_lock ( & gl - > gl_spin ) ;
2007-03-16 12:40:31 +03:00
gl - > gl_stamp = jiffies ;
2006-01-16 19:50:04 +03:00
}
clear_bit ( GLF_LOCK , & gl - > gl_flags ) ;
run_queue ( gl ) ;
spin_unlock ( & gl - > gl_spin ) ;
}
/**
* gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
* @ gh : the holder structure
*
*/
void gfs2_glock_dq_uninit ( struct gfs2_holder * gh )
{
gfs2_glock_dq ( gh ) ;
gfs2_holder_uninit ( gh ) ;
}
/**
* gfs2_glock_nq_num - acquire a glock based on lock number
* @ sdp : the filesystem
* @ number : the lock number
* @ glops : the glock operations for the type of glock
* @ state : the state to acquire the glock in
* @ flags : modifier flags for the aquisition
* @ gh : the struct gfs2_holder
*
* Returns : errno
*/
2006-09-04 20:49:07 +04:00
int gfs2_glock_nq_num ( struct gfs2_sbd * sdp , u64 number ,
2006-08-30 17:30:00 +04:00
const struct gfs2_glock_operations * glops ,
unsigned int state , int flags , struct gfs2_holder * gh )
2006-01-16 19:50:04 +03:00
{
struct gfs2_glock * gl ;
int error ;
error = gfs2_glock_get ( sdp , number , glops , CREATE , & gl ) ;
if ( ! error ) {
error = gfs2_glock_nq_init ( gl , state , flags , gh ) ;
gfs2_glock_put ( gl ) ;
}
return error ;
}
/**
* glock_compare - Compare two struct gfs2_glock structures for sorting
* @ arg_a : the first structure
* @ arg_b : the second structure
*
*/
static int glock_compare ( const void * arg_a , const void * arg_b )
{
2006-09-10 01:07:05 +04:00
const struct gfs2_holder * gh_a = * ( const struct gfs2_holder * * ) arg_a ;
const struct gfs2_holder * gh_b = * ( const struct gfs2_holder * * ) arg_b ;
const struct lm_lockname * a = & gh_a - > gh_gl - > gl_name ;
const struct lm_lockname * b = & gh_b - > gh_gl - > gl_name ;
2006-01-16 19:50:04 +03:00
if ( a - > ln_number > b - > ln_number )
2006-09-10 01:07:05 +04:00
return 1 ;
if ( a - > ln_number < b - > ln_number )
return - 1 ;
2007-01-22 20:10:39 +03:00
BUG_ON ( gh_a - > gh_gl - > gl_ops - > go_type = = gh_b - > gh_gl - > gl_ops - > go_type ) ;
2006-09-10 01:07:05 +04:00
return 0 ;
2006-01-16 19:50:04 +03:00
}
/**
* nq_m_sync - synchonously acquire more than one glock in deadlock free order
* @ num_gh : the number of structures
* @ ghs : an array of struct gfs2_holder structures
*
* Returns : 0 on success ( all glocks acquired ) ,
* errno on failure ( no glocks acquired )
*/
static int nq_m_sync ( unsigned int num_gh , struct gfs2_holder * ghs ,
struct gfs2_holder * * p )
{
unsigned int x ;
int error = 0 ;
for ( x = 0 ; x < num_gh ; x + + )
p [ x ] = & ghs [ x ] ;
sort ( p , num_gh , sizeof ( struct gfs2_holder * ) , glock_compare , NULL ) ;
for ( x = 0 ; x < num_gh ; x + + ) {
p [ x ] - > gh_flags & = ~ ( LM_FLAG_TRY | GL_ASYNC ) ;
error = gfs2_glock_nq ( p [ x ] ) ;
if ( error ) {
while ( x - - )
gfs2_glock_dq ( p [ x ] ) ;
break ;
}
}
return error ;
}
/**
* gfs2_glock_nq_m - acquire multiple glocks
* @ num_gh : the number of structures
* @ ghs : an array of struct gfs2_holder structures
*
* Figure out how big an impact this function has . Either :
* 1 ) Replace this code with code that calls gfs2_glock_prefetch ( )
* 2 ) Forget async stuff and just call nq_m_sync ( )
* 3 ) Leave it like it is
*
* Returns : 0 on success ( all glocks acquired ) ,
* errno on failure ( no glocks acquired )
*/
int gfs2_glock_nq_m ( unsigned int num_gh , struct gfs2_holder * ghs )
{
int * e ;
unsigned int x ;
int borked = 0 , serious = 0 ;
int error = 0 ;
if ( ! num_gh )
return 0 ;
if ( num_gh = = 1 ) {
ghs - > gh_flags & = ~ ( LM_FLAG_TRY | GL_ASYNC ) ;
return gfs2_glock_nq ( ghs ) ;
}
e = kcalloc ( num_gh , sizeof ( struct gfs2_holder * ) , GFP_KERNEL ) ;
if ( ! e )
return - ENOMEM ;
for ( x = 0 ; x < num_gh ; x + + ) {
ghs [ x ] . gh_flags | = LM_FLAG_TRY | GL_ASYNC ;
error = gfs2_glock_nq ( & ghs [ x ] ) ;
if ( error ) {
borked = 1 ;
serious = error ;
num_gh = x ;
break ;
}
}
for ( x = 0 ; x < num_gh ; x + + ) {
error = e [ x ] = glock_wait_internal ( & ghs [ x ] ) ;
if ( error ) {
borked = 1 ;
if ( error ! = GLR_TRYFAILED & & error ! = GLR_CANCELED )
serious = error ;
}
}
if ( ! borked ) {
kfree ( e ) ;
return 0 ;
}
for ( x = 0 ; x < num_gh ; x + + )
if ( ! e [ x ] )
gfs2_glock_dq ( & ghs [ x ] ) ;
if ( serious )
error = serious ;
else {
for ( x = 0 ; x < num_gh ; x + + )
gfs2_holder_reinit ( ghs [ x ] . gh_state , ghs [ x ] . gh_flags ,
& ghs [ x ] ) ;
error = nq_m_sync ( num_gh , ghs , ( struct gfs2_holder * * ) e ) ;
}
kfree ( e ) ;
return error ;
}
/**
* gfs2_glock_dq_m - release multiple glocks
* @ num_gh : the number of structures
* @ ghs : an array of struct gfs2_holder structures
*
*/
void gfs2_glock_dq_m ( unsigned int num_gh , struct gfs2_holder * ghs )
{
unsigned int x ;
for ( x = 0 ; x < num_gh ; x + + )
gfs2_glock_dq ( & ghs [ x ] ) ;
}
/**
* gfs2_glock_dq_uninit_m - release multiple glocks
* @ num_gh : the number of structures
* @ ghs : an array of struct gfs2_holder structures
*
*/
void gfs2_glock_dq_uninit_m ( unsigned int num_gh , struct gfs2_holder * ghs )
{
unsigned int x ;
for ( x = 0 ; x < num_gh ; x + + )
gfs2_glock_dq_uninit ( & ghs [ x ] ) ;
}
/**
* gfs2_lvb_hold - attach a LVB from a glock
* @ gl : The glock in question
*
*/
int gfs2_lvb_hold ( struct gfs2_glock * gl )
{
int error ;
gfs2_glmutex_lock ( gl ) ;
if ( ! atomic_read ( & gl - > gl_lvb_count ) ) {
error = gfs2_lm_hold_lvb ( gl - > gl_sbd , gl - > gl_lock , & gl - > gl_lvb ) ;
if ( error ) {
gfs2_glmutex_unlock ( gl ) ;
return error ;
}
gfs2_glock_hold ( gl ) ;
}
atomic_inc ( & gl - > gl_lvb_count ) ;
gfs2_glmutex_unlock ( gl ) ;
return 0 ;
}
/**
* gfs2_lvb_unhold - detach a LVB from a glock
* @ gl : The glock in question
*
*/
void gfs2_lvb_unhold ( struct gfs2_glock * gl )
{
gfs2_glock_hold ( gl ) ;
gfs2_glmutex_lock ( gl ) ;
gfs2_assert ( gl - > gl_sbd , atomic_read ( & gl - > gl_lvb_count ) > 0 ) ;
if ( atomic_dec_and_test ( & gl - > gl_lvb_count ) ) {
gfs2_lm_unhold_lvb ( gl - > gl_sbd , gl - > gl_lock , gl - > gl_lvb ) ;
gl - > gl_lvb = NULL ;
gfs2_glock_put ( gl ) ;
}
gfs2_glmutex_unlock ( gl ) ;
gfs2_glock_put ( gl ) ;
}
static void blocking_cb ( struct gfs2_sbd * sdp , struct lm_lockname * name ,
unsigned int state )
{
struct gfs2_glock * gl ;
gl = gfs2_glock_find ( sdp , name ) ;
if ( ! gl )
return ;
handle_callback ( gl , state ) ;
spin_lock ( & gl - > gl_spin ) ;
run_queue ( gl ) ;
spin_unlock ( & gl - > gl_spin ) ;
gfs2_glock_put ( gl ) ;
}
/**
* gfs2_glock_cb - Callback used by locking module
2006-09-07 23:50:20 +04:00
* @ sdp : Pointer to the superblock
2006-01-16 19:50:04 +03:00
* @ type : Type of callback
* @ data : Type dependent data pointer
*
* Called by the locking module when it wants to tell us something .
* Either we need to drop a lock , one of our ASYNC requests completed , or
* a journal from another client needs to be recovered .
*/
2006-09-08 18:17:58 +04:00
void gfs2_glock_cb ( void * cb_data , unsigned int type , void * data )
2006-01-16 19:50:04 +03:00
{
2006-09-08 18:17:58 +04:00
struct gfs2_sbd * sdp = cb_data ;
2006-01-16 19:50:04 +03:00
switch ( type ) {
case LM_CB_NEED_E :
2006-04-27 19:25:45 +04:00
blocking_cb ( sdp , data , LM_ST_UNLOCKED ) ;
2006-01-16 19:50:04 +03:00
return ;
case LM_CB_NEED_D :
2006-04-27 19:25:45 +04:00
blocking_cb ( sdp , data , LM_ST_DEFERRED ) ;
2006-01-16 19:50:04 +03:00
return ;
case LM_CB_NEED_S :
2006-04-27 19:25:45 +04:00
blocking_cb ( sdp , data , LM_ST_SHARED ) ;
2006-01-16 19:50:04 +03:00
return ;
case LM_CB_ASYNC : {
2006-04-27 19:25:45 +04:00
struct lm_async_cb * async = data ;
2006-01-16 19:50:04 +03:00
struct gfs2_glock * gl ;
2007-01-29 14:51:45 +03:00
down_read ( & gfs2_umount_flush_sem ) ;
2006-01-16 19:50:04 +03:00
gl = gfs2_glock_find ( sdp , & async - > lc_name ) ;
if ( gfs2_assert_warn ( sdp , gl ) )
return ;
if ( ! gfs2_assert_warn ( sdp , gl - > gl_req_bh ) )
gl - > gl_req_bh ( gl , async - > lc_ret ) ;
gfs2_glock_put ( gl ) ;
2007-01-29 14:51:45 +03:00
up_read ( & gfs2_umount_flush_sem ) ;
2006-01-16 19:50:04 +03:00
return ;
}
case LM_CB_NEED_RECOVERY :
gfs2_jdesc_make_dirty ( sdp , * ( unsigned int * ) data ) ;
if ( sdp - > sd_recoverd_process )
wake_up_process ( sdp - > sd_recoverd_process ) ;
return ;
case LM_CB_DROPLOCKS :
gfs2_gl_hash_clear ( sdp , NO_WAIT ) ;
gfs2_quota_scan ( sdp ) ;
return ;
default :
gfs2_assert_warn ( sdp , 0 ) ;
return ;
}
}
/**
* demote_ok - Check to see if it ' s ok to unlock a glock
* @ gl : the glock
*
* Returns : 1 if it ' s ok
*/
static int demote_ok ( struct gfs2_glock * gl )
{
2006-08-30 17:30:00 +04:00
const struct gfs2_glock_operations * glops = gl - > gl_ops ;
2006-01-16 19:50:04 +03:00
int demote = 1 ;
if ( test_bit ( GLF_STICKY , & gl - > gl_flags ) )
demote = 0 ;
else if ( glops - > go_demote_ok )
demote = glops - > go_demote_ok ( gl ) ;
return demote ;
}
/**
* gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
* @ gl : the glock
*
*/
void gfs2_glock_schedule_for_reclaim ( struct gfs2_glock * gl )
{
struct gfs2_sbd * sdp = gl - > gl_sbd ;
spin_lock ( & sdp - > sd_reclaim_lock ) ;
if ( list_empty ( & gl - > gl_reclaim ) ) {
gfs2_glock_hold ( gl ) ;
list_add ( & gl - > gl_reclaim , & sdp - > sd_reclaim_list ) ;
atomic_inc ( & sdp - > sd_reclaim_count ) ;
}
spin_unlock ( & sdp - > sd_reclaim_lock ) ;
wake_up ( & sdp - > sd_reclaim_wq ) ;
}
/**
* gfs2_reclaim_glock - process the next glock on the filesystem ' s reclaim list
* @ sdp : the filesystem
*
* Called from gfs2_glockd ( ) glock reclaim daemon , or when promoting a
* different glock and we notice that there are a lot of glocks in the
* reclaim list .
*
*/
void gfs2_reclaim_glock ( struct gfs2_sbd * sdp )
{
struct gfs2_glock * gl ;
spin_lock ( & sdp - > sd_reclaim_lock ) ;
if ( list_empty ( & sdp - > sd_reclaim_list ) ) {
spin_unlock ( & sdp - > sd_reclaim_lock ) ;
return ;
}
gl = list_entry ( sdp - > sd_reclaim_list . next ,
struct gfs2_glock , gl_reclaim ) ;
list_del_init ( & gl - > gl_reclaim ) ;
spin_unlock ( & sdp - > sd_reclaim_lock ) ;
atomic_dec ( & sdp - > sd_reclaim_count ) ;
atomic_inc ( & sdp - > sd_reclaimed ) ;
if ( gfs2_glmutex_trylock ( gl ) ) {
2007-01-22 21:09:04 +03:00
if ( list_empty ( & gl - > gl_holders ) & &
2006-09-04 17:49:55 +04:00
gl - > gl_state ! = LM_ST_UNLOCKED & & demote_ok ( gl ) )
2006-01-16 19:50:04 +03:00
handle_callback ( gl , LM_ST_UNLOCKED ) ;
gfs2_glmutex_unlock ( gl ) ;
}
gfs2_glock_put ( gl ) ;
}
/**
* examine_bucket - Call a function for glock in a hash bucket
* @ examiner : the function
* @ sdp : the filesystem
* @ bucket : the bucket
*
* Returns : 1 if the bucket has entries
*/
static int examine_bucket ( glock_examiner examiner , struct gfs2_sbd * sdp ,
2006-09-08 21:35:56 +04:00
unsigned int hash )
2006-01-16 19:50:04 +03:00
{
2006-09-12 05:40:30 +04:00
struct gfs2_glock * gl , * prev = NULL ;
int has_entries = 0 ;
2006-09-12 18:10:01 +04:00
struct hlist_head * head = & gl_hash_table [ hash ] . hb_list ;
2006-01-16 19:50:04 +03:00
2006-09-12 05:40:30 +04:00
read_lock ( gl_lock_addr ( hash ) ) ;
2006-09-12 18:10:01 +04:00
/* Can't use hlist_for_each_entry - don't want prefetch here */
if ( hlist_empty ( head ) )
2006-09-12 05:40:30 +04:00
goto out ;
2006-09-12 18:10:01 +04:00
gl = list_entry ( head - > first , struct gfs2_glock , gl_list ) ;
while ( 1 ) {
2006-09-12 05:40:30 +04:00
if ( gl - > gl_sbd = = sdp ) {
2006-01-16 19:50:04 +03:00
gfs2_glock_hold ( gl ) ;
2006-09-12 05:40:30 +04:00
read_unlock ( gl_lock_addr ( hash ) ) ;
if ( prev )
gfs2_glock_put ( prev ) ;
prev = gl ;
examiner ( gl ) ;
2006-09-14 21:57:38 +04:00
has_entries = 1 ;
2006-09-12 05:40:30 +04:00
read_lock ( gl_lock_addr ( hash ) ) ;
2006-01-16 19:50:04 +03:00
}
2006-09-12 18:10:01 +04:00
if ( gl - > gl_list . next = = NULL )
break ;
2006-09-12 05:40:30 +04:00
gl = list_entry ( gl - > gl_list . next , struct gfs2_glock , gl_list ) ;
2006-01-16 19:50:04 +03:00
}
2006-09-12 05:40:30 +04:00
out :
read_unlock ( gl_lock_addr ( hash ) ) ;
if ( prev )
gfs2_glock_put ( prev ) ;
return has_entries ;
2006-01-16 19:50:04 +03:00
}
/**
* scan_glock - look at a glock and see if we can reclaim it
* @ gl : the glock to look at
*
*/
static void scan_glock ( struct gfs2_glock * gl )
{
2006-11-23 18:51:34 +03:00
if ( gl - > gl_ops = = & gfs2_inode_glops & & gl - > gl_object )
2006-09-12 05:40:30 +04:00
return ;
2006-08-25 01:03:05 +04:00
2006-01-16 19:50:04 +03:00
if ( gfs2_glmutex_trylock ( gl ) ) {
2007-01-22 21:09:04 +03:00
if ( list_empty ( & gl - > gl_holders ) & &
2006-09-12 05:40:30 +04:00
gl - > gl_state ! = LM_ST_UNLOCKED & & demote_ok ( gl ) )
2006-01-16 19:50:04 +03:00
goto out_schedule ;
gfs2_glmutex_unlock ( gl ) ;
}
return ;
2006-07-05 21:16:19 +04:00
out_schedule :
2006-01-16 19:50:04 +03:00
gfs2_glmutex_unlock ( gl ) ;
gfs2_glock_schedule_for_reclaim ( gl ) ;
}
/**
* gfs2_scand_internal - Look for glocks and inodes to toss from memory
* @ sdp : the filesystem
*
*/
void gfs2_scand_internal ( struct gfs2_sbd * sdp )
{
unsigned int x ;
2006-09-10 02:59:27 +04:00
for ( x = 0 ; x < GFS2_GL_HASH_SIZE ; x + + )
2006-09-08 21:35:56 +04:00
examine_bucket ( scan_glock , sdp , x ) ;
2006-01-16 19:50:04 +03:00
}
/**
* clear_glock - look at a glock and see if we can free it from glock cache
* @ gl : the glock to look at
*
*/
static void clear_glock ( struct gfs2_glock * gl )
{
struct gfs2_sbd * sdp = gl - > gl_sbd ;
int released ;
spin_lock ( & sdp - > sd_reclaim_lock ) ;
if ( ! list_empty ( & gl - > gl_reclaim ) ) {
list_del_init ( & gl - > gl_reclaim ) ;
atomic_dec ( & sdp - > sd_reclaim_count ) ;
2006-04-21 00:57:23 +04:00
spin_unlock ( & sdp - > sd_reclaim_lock ) ;
2006-01-16 19:50:04 +03:00
released = gfs2_glock_put ( gl ) ;
gfs2_assert ( sdp , ! released ) ;
2006-04-21 00:57:23 +04:00
} else {
spin_unlock ( & sdp - > sd_reclaim_lock ) ;
2006-01-16 19:50:04 +03:00
}
if ( gfs2_glmutex_trylock ( gl ) ) {
2007-01-23 21:20:41 +03:00
if ( list_empty ( & gl - > gl_holders ) & &
2006-01-16 19:50:04 +03:00
gl - > gl_state ! = LM_ST_UNLOCKED )
handle_callback ( gl , LM_ST_UNLOCKED ) ;
gfs2_glmutex_unlock ( gl ) ;
}
}
/**
* gfs2_gl_hash_clear - Empty out the glock hash table
* @ sdp : the filesystem
* @ wait : wait until it ' s all gone
*
* Called when unmounting the filesystem , or when inter - node lock manager
* requests DROPLOCKS because it is running out of capacity .
*/
void gfs2_gl_hash_clear ( struct gfs2_sbd * sdp , int wait )
{
unsigned long t ;
unsigned int x ;
int cont ;
t = jiffies ;
for ( ; ; ) {
cont = 0 ;
2006-09-12 05:40:30 +04:00
for ( x = 0 ; x < GFS2_GL_HASH_SIZE ; x + + ) {
2006-09-25 17:26:04 +04:00
if ( examine_bucket ( clear_glock , sdp , x ) )
2006-01-16 19:50:04 +03:00
cont = 1 ;
2006-09-12 05:40:30 +04:00
}
2006-01-16 19:50:04 +03:00
if ( ! wait | | ! cont )
break ;
if ( time_after_eq ( jiffies ,
t + gfs2_tune_get ( sdp , gt_stall_secs ) * HZ ) ) {
fs_warn ( sdp , " Unmount seems to be stalled. "
" Dumping lock state... \n " ) ;
gfs2_dump_lockstate ( sdp ) ;
t = jiffies ;
}
2007-01-29 14:51:45 +03:00
down_write ( & gfs2_umount_flush_sem ) ;
2006-01-16 19:50:04 +03:00
invalidate_inodes ( sdp - > sd_vfs ) ;
2007-01-29 14:51:45 +03:00
up_write ( & gfs2_umount_flush_sem ) ;
2006-05-06 00:59:11 +04:00
msleep ( 10 ) ;
2006-01-16 19:50:04 +03:00
}
}
/*
* Diagnostic routines to help debug distributed deadlock
*/
2007-03-24 01:05:15 +03:00
static void gfs2_print_symbol ( struct glock_iter * gi , const char * fmt ,
unsigned long address )
{
/* when sprint_symbol becomes available in the new kernel, replace this */
/* function with:
2007-04-17 20:37:11 +04:00
char buffer [ KSYM_SYMBOL_LEN ] ;
2007-03-24 01:05:15 +03:00
2007-04-17 20:37:11 +04:00
sprint_symbol ( buffer , address ) ;
print_dbg ( gi , fmt , buffer ) ;
2007-03-24 01:05:15 +03:00
*/
2007-04-17 20:37:11 +04:00
char buffer [ 256 ] ;
if ( gi ) {
memset ( buffer , 0 , sizeof ( buffer ) ) ;
sprintf ( buffer , " %p " , address ) ;
print_dbg ( gi , fmt , buffer ) ;
}
else
print_symbol ( fmt , address ) ;
2007-03-24 01:05:15 +03:00
}
2006-01-16 19:50:04 +03:00
/**
* dump_holder - print information about a glock holder
* @ str : a string naming the type of holder
* @ gh : the glock holder
*
* Returns : 0 on success , - ENOBUFS when we run out of space
*/
2007-03-16 13:26:37 +03:00
static int dump_holder ( struct glock_iter * gi , char * str ,
struct gfs2_holder * gh )
2006-01-16 19:50:04 +03:00
{
unsigned int x ;
2007-03-24 01:05:15 +03:00
struct task_struct * gh_owner ;
2006-01-16 19:50:04 +03:00
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " %s \n " , str ) ;
2007-03-24 01:05:15 +03:00
if ( gh - > gh_owner_pid ) {
print_dbg ( gi , " owner = %ld " , ( long ) gh - > gh_owner_pid ) ;
gh_owner = find_task_by_pid ( gh - > gh_owner_pid ) ;
if ( gh_owner )
print_dbg ( gi , " (%s) \n " , gh_owner - > comm ) ;
else
print_dbg ( gi , " (ended) \n " ) ;
} else
print_dbg ( gi , " owner = -1 \n " ) ;
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " gh_state = %u \n " , gh - > gh_state ) ;
print_dbg ( gi , " gh_flags = " ) ;
2006-01-16 19:50:04 +03:00
for ( x = 0 ; x < 32 ; x + + )
if ( gh - > gh_flags & ( 1 < < x ) )
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " %u " , x ) ;
print_dbg ( gi , " \n " ) ;
print_dbg ( gi , " error = %d \n " , gh - > gh_error ) ;
print_dbg ( gi , " gh_iflags = " ) ;
2006-01-16 19:50:04 +03:00
for ( x = 0 ; x < 32 ; x + + )
if ( test_bit ( x , & gh - > gh_iflags ) )
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " %u " , x ) ;
print_dbg ( gi , " \n " ) ;
2007-03-24 01:05:15 +03:00
gfs2_print_symbol ( gi , " initialized at: %s \n " , gh - > gh_ip ) ;
2006-01-16 19:50:04 +03:00
2007-03-16 13:26:37 +03:00
return 0 ;
2006-01-16 19:50:04 +03:00
}
/**
* dump_inode - print information about an inode
* @ ip : the inode
*
* Returns : 0 on success , - ENOBUFS when we run out of space
*/
2007-03-16 13:26:37 +03:00
static int dump_inode ( struct glock_iter * gi , struct gfs2_inode * ip )
2006-01-16 19:50:04 +03:00
{
unsigned int x ;
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " Inode: \n " ) ;
print_dbg ( gi , " num = %llu/%llu \n " ,
ip - > i_num . no_formal_ino , ip - > i_num . no_addr ) ;
print_dbg ( gi , " type = %u \n " , IF2DT ( ip - > i_inode . i_mode ) ) ;
print_dbg ( gi , " i_flags = " ) ;
2006-01-16 19:50:04 +03:00
for ( x = 0 ; x < 32 ; x + + )
if ( test_bit ( x , & ip - > i_flags ) )
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " %u " , x ) ;
print_dbg ( gi , " \n " ) ;
return 0 ;
2006-01-16 19:50:04 +03:00
}
/**
* dump_glock - print information about a glock
* @ gl : the glock
* @ count : where we are in the buffer
*
* Returns : 0 on success , - ENOBUFS when we run out of space
*/
2007-03-16 13:26:37 +03:00
static int dump_glock ( struct glock_iter * gi , struct gfs2_glock * gl )
2006-01-16 19:50:04 +03:00
{
struct gfs2_holder * gh ;
unsigned int x ;
int error = - ENOBUFS ;
2007-03-24 01:05:15 +03:00
struct task_struct * gl_owner ;
2006-01-16 19:50:04 +03:00
spin_lock ( & gl - > gl_spin ) ;
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " Glock 0x%p (%u, %llu) \n " , gl , gl - > gl_name . ln_type ,
( unsigned long long ) gl - > gl_name . ln_number ) ;
print_dbg ( gi , " gl_flags = " ) ;
2006-09-07 22:40:21 +04:00
for ( x = 0 ; x < 32 ; x + + ) {
2006-01-16 19:50:04 +03:00
if ( test_bit ( x , & gl - > gl_flags ) )
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " %u " , x ) ;
}
2007-03-24 01:05:15 +03:00
if ( ! test_bit ( GLF_LOCK , & gl - > gl_flags ) )
print_dbg ( gi , " (unlocked) " ) ;
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " \n " ) ;
print_dbg ( gi , " gl_ref = %d \n " , atomic_read ( & gl - > gl_ref ) ) ;
print_dbg ( gi , " gl_state = %u \n " , gl - > gl_state ) ;
2007-03-24 01:05:15 +03:00
if ( gl - > gl_owner_pid ) {
gl_owner = find_task_by_pid ( gl - > gl_owner_pid ) ;
if ( gl_owner )
print_dbg ( gi , " gl_owner = pid %d (%s) \n " ,
gl - > gl_owner_pid , gl_owner - > comm ) ;
else
print_dbg ( gi , " gl_owner = %d (ended) \n " ,
gl - > gl_owner_pid ) ;
} else
print_dbg ( gi , " gl_owner = -1 \n " ) ;
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " gl_ip = %lu \n " , gl - > gl_ip ) ;
print_dbg ( gi , " req_gh = %s \n " , ( gl - > gl_req_gh ) ? " yes " : " no " ) ;
print_dbg ( gi , " req_bh = %s \n " , ( gl - > gl_req_bh ) ? " yes " : " no " ) ;
print_dbg ( gi , " lvb_count = %d \n " , atomic_read ( & gl - > gl_lvb_count ) ) ;
print_dbg ( gi , " object = %s \n " , ( gl - > gl_object ) ? " yes " : " no " ) ;
print_dbg ( gi , " le = %s \n " ,
2006-01-16 19:50:04 +03:00
( list_empty ( & gl - > gl_le . le_list ) ) ? " no " : " yes " ) ;
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " reclaim = %s \n " ,
( list_empty ( & gl - > gl_reclaim ) ) ? " no " : " yes " ) ;
2006-01-16 19:50:04 +03:00
if ( gl - > gl_aspace )
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " aspace = 0x%p nrpages = %lu \n " , gl - > gl_aspace ,
gl - > gl_aspace - > i_mapping - > nrpages ) ;
2006-01-16 19:50:04 +03:00
else
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " aspace = no \n " ) ;
print_dbg ( gi , " ail = %d \n " , atomic_read ( & gl - > gl_ail_count ) ) ;
2006-01-16 19:50:04 +03:00
if ( gl - > gl_req_gh ) {
2007-03-16 13:26:37 +03:00
error = dump_holder ( gi , " Request " , gl - > gl_req_gh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
}
list_for_each_entry ( gh , & gl - > gl_holders , gh_list ) {
2007-03-16 13:26:37 +03:00
error = dump_holder ( gi , " Holder " , gh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
}
list_for_each_entry ( gh , & gl - > gl_waiters1 , gh_list ) {
2007-03-16 13:26:37 +03:00
error = dump_holder ( gi , " Waiter1 " , gh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
}
list_for_each_entry ( gh , & gl - > gl_waiters3 , gh_list ) {
2007-03-16 13:26:37 +03:00
error = dump_holder ( gi , " Waiter3 " , gh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
}
2007-03-16 12:40:31 +03:00
if ( test_bit ( GLF_DEMOTE , & gl - > gl_flags ) ) {
print_dbg ( gi , " Demotion req to state %u (%llu uS ago) \n " ,
gl - > gl_demote_state ,
2007-03-18 19:05:27 +03:00
( u64 ) ( jiffies - gl - > gl_demote_time ) * ( 1000000 / HZ ) ) ;
2007-03-16 12:40:31 +03:00
}
2006-02-28 01:23:27 +03:00
if ( gl - > gl_ops = = & gfs2_inode_glops & & gl - > gl_object ) {
2006-01-16 19:50:04 +03:00
if ( ! test_bit ( GLF_LOCK , & gl - > gl_flags ) & &
2007-03-16 13:26:37 +03:00
list_empty ( & gl - > gl_holders ) ) {
error = dump_inode ( gi , gl - > gl_object ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
} else {
error = - ENOBUFS ;
2007-03-16 13:26:37 +03:00
print_dbg ( gi , " Inode: busy \n " ) ;
2006-01-16 19:50:04 +03:00
}
}
error = 0 ;
2006-09-04 20:04:26 +04:00
out :
2006-01-16 19:50:04 +03:00
spin_unlock ( & gl - > gl_spin ) ;
return error ;
}
/**
* gfs2_dump_lockstate - print out the current lockstate
* @ sdp : the filesystem
* @ ub : the buffer to copy the information into
*
* If @ ub is NULL , dump the lockstate to the console .
*
*/
2006-04-28 18:59:12 +04:00
static int gfs2_dump_lockstate ( struct gfs2_sbd * sdp )
2006-01-16 19:50:04 +03:00
{
struct gfs2_glock * gl ;
2006-09-12 18:10:01 +04:00
struct hlist_node * h ;
2006-01-16 19:50:04 +03:00
unsigned int x ;
int error = 0 ;
for ( x = 0 ; x < GFS2_GL_HASH_SIZE ; x + + ) {
2006-09-10 00:59:11 +04:00
read_lock ( gl_lock_addr ( x ) ) ;
2006-01-16 19:50:04 +03:00
2006-09-12 18:10:01 +04:00
hlist_for_each_entry ( gl , h , & gl_hash_table [ x ] . hb_list , gl_list ) {
2006-09-07 22:40:21 +04:00
if ( gl - > gl_sbd ! = sdp )
continue ;
2006-01-16 19:50:04 +03:00
2007-03-16 13:26:37 +03:00
error = dump_glock ( NULL , gl ) ;
2006-01-16 19:50:04 +03:00
if ( error )
break ;
}
2006-09-10 00:59:11 +04:00
read_unlock ( gl_lock_addr ( x ) ) ;
2006-01-16 19:50:04 +03:00
if ( error )
break ;
}
return error ;
}
2006-09-07 22:40:21 +04:00
int __init gfs2_glock_init ( void )
{
unsigned i ;
for ( i = 0 ; i < GFS2_GL_HASH_SIZE ; i + + ) {
2006-09-12 18:10:01 +04:00
INIT_HLIST_HEAD ( & gl_hash_table [ i ] . hb_list ) ;
2006-09-07 22:40:21 +04:00
}
2006-09-10 00:59:11 +04:00
# ifdef GL_HASH_LOCK_SZ
for ( i = 0 ; i < GL_HASH_LOCK_SZ ; i + + ) {
rwlock_init ( & gl_hash_locks [ i ] ) ;
}
# endif
2006-09-07 22:40:21 +04:00
return 0 ;
}
2007-03-16 13:26:37 +03:00
static int gfs2_glock_iter_next ( struct glock_iter * gi )
{
2007-04-17 20:37:11 +04:00
read_lock ( gl_lock_addr ( gi - > hash ) ) ;
2007-03-16 13:26:37 +03:00
while ( 1 ) {
if ( ! gi - > hb_list ) { /* If we don't have a hash bucket yet */
gi - > hb_list = & gl_hash_table [ gi - > hash ] . hb_list ;
if ( hlist_empty ( gi - > hb_list ) ) {
2007-04-17 20:37:11 +04:00
read_unlock ( gl_lock_addr ( gi - > hash ) ) ;
2007-03-16 13:26:37 +03:00
gi - > hash + + ;
2007-04-17 20:37:11 +04:00
read_lock ( gl_lock_addr ( gi - > hash ) ) ;
2007-03-16 13:26:37 +03:00
gi - > hb_list = NULL ;
2007-04-17 20:37:11 +04:00
if ( gi - > hash > = GFS2_GL_HASH_SIZE ) {
read_unlock ( gl_lock_addr ( gi - > hash ) ) ;
2007-03-16 13:26:37 +03:00
return 1 ;
2007-04-17 20:37:11 +04:00
}
2007-03-16 13:26:37 +03:00
else
continue ;
}
if ( ! hlist_empty ( gi - > hb_list ) ) {
gi - > gl = list_entry ( gi - > hb_list - > first ,
struct gfs2_glock ,
gl_list ) ;
}
} else {
if ( gi - > gl - > gl_list . next = = NULL ) {
2007-04-17 20:37:11 +04:00
read_unlock ( gl_lock_addr ( gi - > hash ) ) ;
2007-03-16 13:26:37 +03:00
gi - > hash + + ;
2007-04-17 20:37:11 +04:00
read_lock ( gl_lock_addr ( gi - > hash ) ) ;
2007-03-16 13:26:37 +03:00
gi - > hb_list = NULL ;
continue ;
}
gi - > gl = list_entry ( gi - > gl - > gl_list . next ,
struct gfs2_glock , gl_list ) ;
}
if ( gi - > gl )
break ;
}
2007-04-17 20:37:11 +04:00
read_unlock ( gl_lock_addr ( gi - > hash ) ) ;
2007-03-16 13:26:37 +03:00
return 0 ;
}
static void gfs2_glock_iter_free ( struct glock_iter * gi )
{
kfree ( gi ) ;
}
static struct glock_iter * gfs2_glock_iter_init ( struct gfs2_sbd * sdp )
{
struct glock_iter * gi ;
gi = kmalloc ( sizeof ( * gi ) , GFP_KERNEL ) ;
if ( ! gi )
return NULL ;
gi - > sdp = sdp ;
gi - > hash = 0 ;
gi - > gl = NULL ;
gi - > hb_list = NULL ;
gi - > seq = NULL ;
memset ( gi - > string , 0 , sizeof ( gi - > string ) ) ;
if ( gfs2_glock_iter_next ( gi ) ) {
gfs2_glock_iter_free ( gi ) ;
return NULL ;
}
return gi ;
}
static void * gfs2_glock_seq_start ( struct seq_file * file , loff_t * pos )
{
struct glock_iter * gi ;
loff_t n = * pos ;
gi = gfs2_glock_iter_init ( file - > private ) ;
if ( ! gi )
return NULL ;
while ( n - - ) {
if ( gfs2_glock_iter_next ( gi ) ) {
gfs2_glock_iter_free ( gi ) ;
return NULL ;
}
}
return gi ;
}
static void * gfs2_glock_seq_next ( struct seq_file * file , void * iter_ptr ,
loff_t * pos )
{
struct glock_iter * gi = iter_ptr ;
( * pos ) + + ;
if ( gfs2_glock_iter_next ( gi ) ) {
gfs2_glock_iter_free ( gi ) ;
return NULL ;
}
return gi ;
}
static void gfs2_glock_seq_stop ( struct seq_file * file , void * iter_ptr )
{
/* nothing for now */
}
static int gfs2_glock_seq_show ( struct seq_file * file , void * iter_ptr )
{
struct glock_iter * gi = iter_ptr ;
gi - > seq = file ;
dump_glock ( gi , gi - > gl ) ;
return 0 ;
}
static struct seq_operations gfs2_glock_seq_ops = {
. start = gfs2_glock_seq_start ,
. next = gfs2_glock_seq_next ,
. stop = gfs2_glock_seq_stop ,
. show = gfs2_glock_seq_show ,
} ;
static int gfs2_debugfs_open ( struct inode * inode , struct file * file )
{
struct seq_file * seq ;
int ret ;
ret = seq_open ( file , & gfs2_glock_seq_ops ) ;
if ( ret )
return ret ;
seq = file - > private_data ;
seq - > private = inode - > i_private ;
return 0 ;
}
static const struct file_operations gfs2_debug_fops = {
. owner = THIS_MODULE ,
. open = gfs2_debugfs_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = seq_release
} ;
int gfs2_create_debugfs_file ( struct gfs2_sbd * sdp )
{
sdp - > debugfs_dentry = debugfs_create_file ( sdp - > sd_table_name ,
S_IFREG | S_IRUGO ,
gfs2_root , sdp ,
& gfs2_debug_fops ) ;
if ( ! sdp - > debugfs_dentry )
return - ENOMEM ;
return 0 ;
}
void gfs2_delete_debugfs_file ( struct gfs2_sbd * sdp )
{
2007-03-08 01:09:10 +03:00
if ( sdp & & sdp - > debugfs_dentry )
2007-03-16 13:26:37 +03:00
debugfs_remove ( sdp - > debugfs_dentry ) ;
}
int gfs2_register_debugfs ( void )
{
gfs2_root = debugfs_create_dir ( " gfs2 " , NULL ) ;
return gfs2_root ? 0 : - ENOMEM ;
}
void gfs2_unregister_debugfs ( void )
{
debugfs_remove ( gfs2_root ) ;
}