2006-01-16 19:50:04 +03:00
/*
* Copyright ( C ) Sistina Software , Inc . 1997 - 2003 All rights reserved .
2006-05-18 23:09:15 +04:00
* Copyright ( C ) 2004 - 2006 Red Hat , Inc . All rights reserved .
2006-01-16 19:50:04 +03:00
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2006-09-01 19:05:15 +04:00
* of the GNU General Public License version 2.
2006-01-16 19:50:04 +03:00
*/
# ifndef __GLOCK_DOT_H__
# define __GLOCK_DOT_H__
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2009-01-12 13:43:39 +03:00
# include <linux/parser.h>
2006-09-05 18:39:21 +04:00
# include "incore.h"
2009-01-12 13:43:39 +03:00
/* Options for hostdata parser */
enum {
Opt_jid ,
Opt_id ,
Opt_first ,
Opt_nodir ,
Opt_err ,
} ;
/*
* lm_lockname types
*/
# define LM_TYPE_RESERVED 0x00
# define LM_TYPE_NONDISK 0x01
# define LM_TYPE_INODE 0x02
# define LM_TYPE_RGRP 0x03
# define LM_TYPE_META 0x04
# define LM_TYPE_IOPEN 0x05
# define LM_TYPE_FLOCK 0x06
# define LM_TYPE_PLOCK 0x07
# define LM_TYPE_QUOTA 0x08
# define LM_TYPE_JOURNAL 0x09
/*
* lm_lock ( ) states
*
* SHARED is compatible with SHARED , not with DEFERRED or EX .
* DEFERRED is compatible with DEFERRED , not with SHARED or EX .
*/
# define LM_ST_UNLOCKED 0
# define LM_ST_EXCLUSIVE 1
# define LM_ST_DEFERRED 2
# define LM_ST_SHARED 3
/*
* lm_lock ( ) flags
*
* LM_FLAG_TRY
* Don ' t wait to acquire the lock if it can ' t be granted immediately .
*
* LM_FLAG_TRY_1CB
* Send one blocking callback if TRY is set and the lock is not granted .
*
* LM_FLAG_NOEXP
* GFS sets this flag on lock requests it makes while doing journal recovery .
* These special requests should not be blocked due to the recovery like
* ordinary locks would be .
*
* LM_FLAG_ANY
* A SHARED request may also be granted in DEFERRED , or a DEFERRED request may
* also be granted in SHARED . The preferred state is whichever is compatible
* with other granted locks , or the specified state if no other locks exist .
*
* LM_FLAG_PRIORITY
* Override fairness considerations . Suppose a lock is held in a shared state
* and there is a pending request for the deferred state . A shared lock
* request with the priority flag would be allowed to bypass the deferred
* request and directly join the other shared lock . A shared lock request
* without the priority flag might be forced to wait until the deferred
* requested had acquired and released the lock .
*/
2006-01-16 19:50:04 +03:00
# define LM_FLAG_TRY 0x00000001
# define LM_FLAG_TRY_1CB 0x00000002
# define LM_FLAG_NOEXP 0x00000004
# define LM_FLAG_ANY 0x00000008
2009-01-12 13:43:39 +03:00
# define LM_FLAG_PRIORITY 0x00000010
# define GL_ASYNC 0x00000040
# define GL_EXACT 0x00000080
# define GL_SKIP 0x00000100
# define GL_ATIME 0x00000200
# define GL_NOCACHE 0x00000400
/*
* lm_lock ( ) and lm_async_cb return flags
*
* LM_OUT_ST_MASK
* Masks the lower two bits of lock state in the returned value .
*
* LM_OUT_CANCELED
* The lock request was canceled .
*
* LM_OUT_ASYNC
* The result of the request will be returned in an LM_CB_ASYNC callback .
*
*/
# define LM_OUT_ST_MASK 0x00000003
# define LM_OUT_CANCELED 0x00000008
# define LM_OUT_ASYNC 0x00000080
# define LM_OUT_ERROR 0x00000100
/*
* lm_recovery_done ( ) messages
*/
# define LM_RD_GAVEUP 308
# define LM_RD_SUCCESS 309
# define GLR_TRYFAILED 13
struct lm_lockops {
const char * lm_proto_name ;
int ( * lm_mount ) ( struct gfs2_sbd * sdp , const char * fsname ) ;
void ( * lm_unmount ) ( struct gfs2_sbd * sdp ) ;
void ( * lm_withdraw ) ( struct gfs2_sbd * sdp ) ;
void ( * lm_put_lock ) ( struct kmem_cache * cachep , void * gl ) ;
unsigned int ( * lm_lock ) ( struct gfs2_glock * gl ,
unsigned int req_state , unsigned int flags ) ;
void ( * lm_cancel ) ( struct gfs2_glock * gl ) ;
const match_table_t * lm_tokens ;
} ;
# define LM_FLAG_TRY 0x00000001
# define LM_FLAG_TRY_1CB 0x00000002
# define LM_FLAG_NOEXP 0x00000004
# define LM_FLAG_ANY 0x00000008
# define LM_FLAG_PRIORITY 0x00000010
2006-01-16 19:50:04 +03:00
# define GL_ASYNC 0x00000040
# define GL_EXACT 0x00000080
# define GL_SKIP 0x00000100
# define GL_NOCACHE 0x00000400
# define GLR_TRYFAILED 13
2009-07-24 03:52:34 +04:00
extern struct workqueue_struct * gfs2_delete_workqueue ;
2008-02-22 19:07:18 +03:00
static inline struct gfs2_holder * gfs2_glock_is_locked_by_me ( struct gfs2_glock * gl )
2006-01-16 19:50:04 +03:00
{
struct gfs2_holder * gh ;
2008-02-07 11:13:19 +03:00
struct pid * pid ;
2006-01-16 19:50:04 +03:00
/* Look in glock's list of holders for one with current task as owner */
spin_lock ( & gl - > gl_spin ) ;
2008-02-07 11:13:19 +03:00
pid = task_pid ( current ) ;
2006-01-16 19:50:04 +03:00
list_for_each_entry ( gh , & gl - > gl_holders , gh_list ) {
2008-05-21 20:03:22 +04:00
if ( ! test_bit ( HIF_HOLDER , & gh - > gh_iflags ) )
break ;
2008-02-22 19:07:18 +03:00
if ( gh - > gh_owner_pid = = pid )
goto out ;
2006-01-16 19:50:04 +03:00
}
2008-02-22 19:07:18 +03:00
gh = NULL ;
out :
2006-01-16 19:50:04 +03:00
spin_unlock ( & gl - > gl_spin ) ;
2008-02-22 19:07:18 +03:00
return gh ;
2006-01-16 19:50:04 +03:00
}
static inline int gfs2_glock_is_held_excl ( struct gfs2_glock * gl )
{
2006-09-04 17:49:55 +04:00
return gl - > gl_state = = LM_ST_EXCLUSIVE ;
2006-01-16 19:50:04 +03:00
}
static inline int gfs2_glock_is_held_dfrd ( struct gfs2_glock * gl )
{
2006-09-04 17:49:55 +04:00
return gl - > gl_state = = LM_ST_DEFERRED ;
2006-01-16 19:50:04 +03:00
}
static inline int gfs2_glock_is_held_shrd ( struct gfs2_glock * gl )
{
2006-09-04 17:49:55 +04:00
return gl - > gl_state = = LM_ST_SHARED ;
2006-01-16 19:50:04 +03:00
}
static inline int gfs2_glock_is_blocking ( struct gfs2_glock * gl )
{
int ret ;
spin_lock ( & gl - > gl_spin ) ;
2008-05-21 20:03:22 +04:00
ret = test_bit ( GLF_DEMOTE , & gl - > gl_flags ) ;
2006-01-16 19:50:04 +03:00
spin_unlock ( & gl - > gl_spin ) ;
return ret ;
}
int gfs2_glock_get ( struct gfs2_sbd * sdp ,
2006-09-04 20:49:07 +04:00
u64 number , const struct gfs2_glock_operations * glops ,
2006-01-16 19:50:04 +03:00
int create , struct gfs2_glock * * glp ) ;
2009-07-24 03:52:34 +04:00
void gfs2_glock_hold ( struct gfs2_glock * gl ) ;
void gfs2_glock_put_nolock ( struct gfs2_glock * gl ) ;
2006-01-16 19:50:04 +03:00
int gfs2_glock_put ( struct gfs2_glock * gl ) ;
2006-04-21 00:57:23 +04:00
void gfs2_holder_init ( struct gfs2_glock * gl , unsigned int state , unsigned flags ,
2006-01-16 19:50:04 +03:00
struct gfs2_holder * gh ) ;
2006-04-21 00:57:23 +04:00
void gfs2_holder_reinit ( unsigned int state , unsigned flags ,
struct gfs2_holder * gh ) ;
2006-01-16 19:50:04 +03:00
void gfs2_holder_uninit ( struct gfs2_holder * gh ) ;
int gfs2_glock_nq ( struct gfs2_holder * gh ) ;
int gfs2_glock_poll ( struct gfs2_holder * gh ) ;
int gfs2_glock_wait ( struct gfs2_holder * gh ) ;
void gfs2_glock_dq ( struct gfs2_holder * gh ) ;
2007-06-11 11:22:32 +04:00
void gfs2_glock_dq_wait ( struct gfs2_holder * gh ) ;
2006-01-16 19:50:04 +03:00
void gfs2_glock_dq_uninit ( struct gfs2_holder * gh ) ;
int gfs2_glock_nq_num ( struct gfs2_sbd * sdp ,
2006-09-04 20:49:07 +04:00
u64 number , const struct gfs2_glock_operations * glops ,
2006-01-16 19:50:04 +03:00
unsigned int state , int flags , struct gfs2_holder * gh ) ;
int gfs2_glock_nq_m ( unsigned int num_gh , struct gfs2_holder * ghs ) ;
void gfs2_glock_dq_m ( unsigned int num_gh , struct gfs2_holder * ghs ) ;
void gfs2_glock_dq_uninit_m ( unsigned int num_gh , struct gfs2_holder * ghs ) ;
2008-05-21 20:03:22 +04:00
void gfs2_print_dbg ( struct seq_file * seq , const char * fmt , . . . ) ;
2006-01-16 19:50:04 +03:00
2006-03-29 23:36:49 +04:00
/**
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
* @ gl : the glock
* @ state : the state we ' re requesting
* @ flags : the modifier flags
* @ gh : the holder structure
*
* Returns : 0 , GLR_ * , or errno
*/
static inline int gfs2_glock_nq_init ( struct gfs2_glock * gl ,
unsigned int state , int flags ,
struct gfs2_holder * gh )
{
int error ;
gfs2_holder_init ( gl , state , flags , gh ) ;
error = gfs2_glock_nq ( gh ) ;
if ( error )
gfs2_holder_uninit ( gh ) ;
return error ;
}
2006-01-16 19:50:04 +03:00
/* Lock Value Block functions */
int gfs2_lvb_hold ( struct gfs2_glock * gl ) ;
void gfs2_lvb_unhold ( struct gfs2_glock * gl ) ;
2009-01-12 13:43:39 +03:00
void gfs2_glock_cb ( struct gfs2_glock * gl , unsigned int state ) ;
void gfs2_glock_complete ( struct gfs2_glock * gl , int ret ) ;
2006-01-16 19:50:04 +03:00
void gfs2_reclaim_glock ( struct gfs2_sbd * sdp ) ;
2008-12-19 18:32:06 +03:00
void gfs2_gl_hash_clear ( struct gfs2_sbd * sdp ) ;
2008-11-18 16:38:48 +03:00
void gfs2_glock_finish_truncate ( struct gfs2_inode * ip ) ;
2009-01-12 13:43:39 +03:00
void gfs2_glock_thaw ( struct gfs2_sbd * sdp ) ;
2006-01-16 19:50:04 +03:00
2006-09-07 22:40:21 +04:00
int __init gfs2_glock_init ( void ) ;
2007-08-01 16:57:10 +04:00
void gfs2_glock_exit ( void ) ;
2007-03-16 13:26:37 +03:00
int gfs2_create_debugfs_file ( struct gfs2_sbd * sdp ) ;
void gfs2_delete_debugfs_file ( struct gfs2_sbd * sdp ) ;
int gfs2_register_debugfs ( void ) ;
void gfs2_unregister_debugfs ( void ) ;
2006-09-07 22:40:21 +04:00
2009-01-12 13:43:39 +03:00
extern const struct lm_lockops gfs2_dlm_ops ;
2006-01-16 19:50:04 +03:00
# endif /* __GLOCK_DOT_H__ */