2005-04-16 15:20:36 -07:00
/*
2005-11-02 14:58:39 +11:00
* Copyright ( c ) 2000 - 2003 , 2005 Silicon Graphics , Inc .
* All Rights Reserved .
2005-04-16 15:20:36 -07:00
*
2005-11-02 14:58:39 +11:00
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
2005-04-16 15:20:36 -07:00
* published by the Free Software Foundation .
*
2005-11-02 14:58:39 +11:00
* This program is distributed in the hope that it would be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
2005-04-16 15:20:36 -07:00
*
2005-11-02 14:58:39 +11:00
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA
2005-04-16 15:20:36 -07:00
*/
# include "xfs.h"
uint64_t vn_generation ; /* vnode generation number */
DEFINE_SPINLOCK ( vnumber_lock ) ;
/*
* Dedicated vnode inactive / reclaim sync semaphores .
* Prime number of hash buckets since address is used as the key .
*/
# define NVSYNC 37
# define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
2007-02-10 18:34:56 +11:00
static wait_queue_head_t vsync [ NVSYNC ] ;
2005-04-16 15:20:36 -07:00
void
vn_init ( void )
{
2005-09-02 16:58:38 +10:00
int i ;
2005-04-16 15:20:36 -07:00
2005-09-02 16:58:38 +10:00
for ( i = 0 ; i < NVSYNC ; i + + )
init_waitqueue_head ( & vsync [ i ] ) ;
}
void
vn_iowait (
2006-06-09 17:00:52 +10:00
bhv_vnode_t * vp )
2005-09-02 16:58:38 +10:00
{
wait_queue_head_t * wq = vptosync ( vp ) ;
wait_event ( * wq , ( atomic_read ( & vp - > v_iocount ) = = 0 ) ) ;
}
void
vn_iowake (
2006-06-09 17:00:52 +10:00
bhv_vnode_t * vp )
2005-09-02 16:58:38 +10:00
{
if ( atomic_dec_and_test ( & vp - > v_iocount ) )
wake_up ( vptosync ( vp ) ) ;
2005-04-16 15:20:36 -07:00
}
2006-06-09 14:58:38 +10:00
/*
* Volume managers supporting multiple paths can send back ENODEV when the
* final path disappears . In this case continuing to fill the page cache
* with dirty data which cannot be written out is evil , so prevent that .
*/
void
vn_ioerror (
2006-06-09 17:00:52 +10:00
bhv_vnode_t * vp ,
2006-06-09 14:58:38 +10:00
int error ,
char * f ,
int l )
{
if ( unlikely ( error = = - ENODEV ) )
2006-06-09 16:48:30 +10:00
bhv_vfs_force_shutdown ( vp - > v_vfsp , SHUTDOWN_DEVICE_REQ , f , l ) ;
2006-06-09 14:58:38 +10:00
}
2006-06-09 17:00:52 +10:00
bhv_vnode_t *
2005-04-16 15:20:36 -07:00
vn_initialize (
struct inode * inode )
{
2006-06-09 17:00:52 +10:00
bhv_vnode_t * vp = vn_from_inode ( inode ) ;
2005-04-16 15:20:36 -07:00
XFS_STATS_INC ( vn_active ) ;
XFS_STATS_INC ( vn_alloc ) ;
vp - > v_flag = VMODIFIED ;
spinlock_init ( & vp - > v_lock , " v_lock " ) ;
spin_lock ( & vnumber_lock ) ;
if ( ! + + vn_generation ) /* v_number shouldn't be zero */
vn_generation + + ;
vp - > v_number = vn_generation ;
spin_unlock ( & vnumber_lock ) ;
ASSERT ( VN_CACHED ( vp ) = = 0 ) ;
/* Initialize the first behavior and the behavior chain head. */
vn_bhv_head_init ( VN_BHV_HEAD ( vp ) , " vnode " ) ;
2005-09-02 16:58:38 +10:00
atomic_set ( & vp - > v_iocount , 0 ) ;
2005-04-16 15:20:36 -07:00
# ifdef XFS_VNODE_TRACE
vp - > v_trace = ktrace_alloc ( VNODE_TRACE_SIZE , KM_SLEEP ) ;
# endif /* XFS_VNODE_TRACE */
2006-03-14 13:33:36 +11:00
vn_trace_exit ( vp , __FUNCTION__ , ( inst_t * ) __return_address ) ;
2005-04-16 15:20:36 -07:00
return vp ;
}
/*
* Revalidate the Linux inode from the vattr .
* Note : i_size _not_ updated ; we must hold the inode
* semaphore when doing that - callers responsibility .
*/
void
vn_revalidate_core (
2006-06-09 17:00:52 +10:00
bhv_vnode_t * vp ,
2006-06-09 17:07:12 +10:00
bhv_vattr_t * vap )
2005-04-16 15:20:36 -07:00
{
2006-03-17 17:25:36 +11:00
struct inode * inode = vn_to_inode ( vp ) ;
2005-04-16 15:20:36 -07:00
2005-09-02 16:46:51 +10:00
inode - > i_mode = vap - > va_mode ;
2005-04-16 15:20:36 -07:00
inode - > i_nlink = vap - > va_nlink ;
inode - > i_uid = vap - > va_uid ;
inode - > i_gid = vap - > va_gid ;
inode - > i_blocks = vap - > va_nblocks ;
inode - > i_mtime = vap - > va_mtime ;
inode - > i_ctime = vap - > va_ctime ;
if ( vap - > va_xflags & XFS_XFLAG_IMMUTABLE )
inode - > i_flags | = S_IMMUTABLE ;
else
inode - > i_flags & = ~ S_IMMUTABLE ;
if ( vap - > va_xflags & XFS_XFLAG_APPEND )
inode - > i_flags | = S_APPEND ;
else
inode - > i_flags & = ~ S_APPEND ;
if ( vap - > va_xflags & XFS_XFLAG_SYNC )
inode - > i_flags | = S_SYNC ;
else
inode - > i_flags & = ~ S_SYNC ;
if ( vap - > va_xflags & XFS_XFLAG_NOATIME )
inode - > i_flags | = S_NOATIME ;
else
inode - > i_flags & = ~ S_NOATIME ;
}
/*
* Revalidate the Linux inode from the vnode .
*/
int
2006-03-14 13:33:36 +11:00
__vn_revalidate (
2006-06-09 17:00:52 +10:00
bhv_vnode_t * vp ,
2006-06-09 17:07:12 +10:00
bhv_vattr_t * vattr )
2005-04-16 15:20:36 -07:00
{
int error ;
2006-03-14 13:33:36 +11:00
vn_trace_entry ( vp , __FUNCTION__ , ( inst_t * ) __return_address ) ;
vattr - > va_mask = XFS_AT_STAT | XFS_AT_XFLAGS ;
2006-06-09 17:00:52 +10:00
error = bhv_vop_getattr ( vp , vattr , 0 , NULL ) ;
2006-03-14 13:33:36 +11:00
if ( likely ( ! error ) ) {
vn_revalidate_core ( vp , vattr ) ;
2005-04-16 15:20:36 -07:00
VUNMODIFY ( vp ) ;
}
return - error ;
}
2006-03-14 13:33:36 +11:00
int
vn_revalidate (
2006-06-09 17:00:52 +10:00
bhv_vnode_t * vp )
2006-03-14 13:33:36 +11:00
{
2006-06-09 17:07:12 +10:00
bhv_vattr_t vattr ;
2006-03-14 13:33:36 +11:00
return __vn_revalidate ( vp , & vattr ) ;
}
2005-04-16 15:20:36 -07:00
/*
* Add a reference to a referenced vnode .
*/
2006-06-09 17:00:52 +10:00
bhv_vnode_t *
2005-04-16 15:20:36 -07:00
vn_hold (
2006-06-09 17:00:52 +10:00
bhv_vnode_t * vp )
2005-04-16 15:20:36 -07:00
{
struct inode * inode ;
XFS_STATS_INC ( vn_hold ) ;
VN_LOCK ( vp ) ;
2006-03-17 17:25:36 +11:00
inode = igrab ( vn_to_inode ( vp ) ) ;
2005-04-16 15:20:36 -07:00
ASSERT ( inode ) ;
VN_UNLOCK ( vp , 0 ) ;
return vp ;
}
# ifdef XFS_VNODE_TRACE
# define KTRACE_ENTER(vp, vk, s, line, ra) \
ktrace_enter ( ( vp ) - > v_trace , \
/* 0 */ ( void * ) ( __psint_t ) ( vk ) , \
/* 1 */ ( void * ) ( s ) , \
/* 2 */ ( void * ) ( __psint_t ) line , \
2005-06-21 15:33:48 +10:00
/* 3 */ ( void * ) ( __psint_t ) ( vn_count ( vp ) ) , \
2005-04-16 15:20:36 -07:00
/* 4 */ ( void * ) ( ra ) , \
/* 5 */ ( void * ) ( __psunsigned_t ) ( vp ) - > v_flag , \
/* 6 */ ( void * ) ( __psint_t ) current_cpu ( ) , \
/* 7 */ ( void * ) ( __psint_t ) current_pid ( ) , \
/* 8 */ ( void * ) __return_address , \
2005-06-21 15:33:48 +10:00
/* 9 */ NULL , NULL , NULL , NULL , NULL , NULL , NULL )
2005-04-16 15:20:36 -07:00
/*
* Vnode tracing code .
*/
void
2006-06-09 17:00:52 +10:00
vn_trace_entry ( bhv_vnode_t * vp , const char * func , inst_t * ra )
2005-04-16 15:20:36 -07:00
{
KTRACE_ENTER ( vp , VNODE_KTRACE_ENTRY , func , 0 , ra ) ;
}
void
2006-06-09 17:00:52 +10:00
vn_trace_exit ( bhv_vnode_t * vp , const char * func , inst_t * ra )
2005-04-16 15:20:36 -07:00
{
KTRACE_ENTER ( vp , VNODE_KTRACE_EXIT , func , 0 , ra ) ;
}
void
2006-06-09 17:00:52 +10:00
vn_trace_hold ( bhv_vnode_t * vp , char * file , int line , inst_t * ra )
2005-04-16 15:20:36 -07:00
{
KTRACE_ENTER ( vp , VNODE_KTRACE_HOLD , file , line , ra ) ;
}
void
2006-06-09 17:00:52 +10:00
vn_trace_ref ( bhv_vnode_t * vp , char * file , int line , inst_t * ra )
2005-04-16 15:20:36 -07:00
{
KTRACE_ENTER ( vp , VNODE_KTRACE_REF , file , line , ra ) ;
}
void
2006-06-09 17:00:52 +10:00
vn_trace_rele ( bhv_vnode_t * vp , char * file , int line , inst_t * ra )
2005-04-16 15:20:36 -07:00
{
KTRACE_ENTER ( vp , VNODE_KTRACE_RELE , file , line , ra ) ;
}
# endif /* XFS_VNODE_TRACE */