2019-05-31 11:09:56 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2006-01-16 19:50:04 +03:00
/*
* Copyright ( C ) Sistina Software , Inc . 1997 - 2003 All rights reserved .
2011-05-13 15:11:17 +04:00
* Copyright ( C ) 2004 - 2011 Red Hat , Inc . All rights reserved .
2006-01-16 19:50:04 +03:00
*/
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/completion.h>
# include <linux/buffer_head.h>
# include <linux/namei.h>
# include <linux/mm.h>
2017-02-02 19:54:15 +03:00
# include <linux/cred.h>
2006-01-16 19:50:04 +03:00
# include <linux/xattr.h>
# include <linux/posix_acl.h>
2006-02-28 01:23:27 +03:00
# include <linux/gfs2_ondisk.h>
2006-03-28 23:14:04 +04:00
# include <linux/crc32.h>
2017-02-16 23:13:54 +03:00
# include <linux/iomap.h>
2011-05-09 17:06:38 +04:00
# include <linux/security.h>
2020-05-23 10:30:11 +03:00
# include <linux/fiemap.h>
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2006-01-16 19:50:04 +03:00
# include "gfs2.h"
2006-02-28 01:23:27 +03:00
# include "incore.h"
2006-01-16 19:50:04 +03:00
# include "acl.h"
# include "bmap.h"
# include "dir.h"
2009-08-26 21:51:04 +04:00
# include "xattr.h"
2006-01-16 19:50:04 +03:00
# include "glock.h"
# include "inode.h"
# include "meta_io.h"
# include "quota.h"
# include "rgrp.h"
# include "trans.h"
2006-02-28 01:23:27 +03:00
# include "util.h"
2008-10-14 19:05:55 +04:00
# include "super.h"
2011-05-09 17:06:38 +04:00
# include "glops.h"
2020-11-25 23:14:15 +03:00
static const struct inode_operations gfs2_file_iops ;
static const struct inode_operations gfs2_dir_iops ;
static const struct inode_operations gfs2_symlink_iops ;
2011-05-09 17:06:38 +04:00
/**
* gfs2_set_iop - Sets inode operations
* @ inode : The inode with correct i_mode filled in
*
* GFS2 lookup code fills in vfs inode contents based on info obtained
* from directory entry inside gfs2_inode_lookup ( ) .
*/
static void gfs2_set_iop ( struct inode * inode )
{
struct gfs2_sbd * sdp = GFS2_SB ( inode ) ;
umode_t mode = inode - > i_mode ;
if ( S_ISREG ( mode ) ) {
inode - > i_op = & gfs2_file_iops ;
if ( gfs2_localflocks ( sdp ) )
inode - > i_fop = & gfs2_file_fops_nolock ;
else
inode - > i_fop = & gfs2_file_fops ;
} else if ( S_ISDIR ( mode ) ) {
inode - > i_op = & gfs2_dir_iops ;
if ( gfs2_localflocks ( sdp ) )
inode - > i_fop = & gfs2_dir_fops_nolock ;
else
inode - > i_fop = & gfs2_dir_fops ;
} else if ( S_ISLNK ( mode ) ) {
inode - > i_op = & gfs2_symlink_iops ;
} else {
inode - > i_op = & gfs2_file_iops ;
init_special_inode ( inode , inode - > i_mode , inode - > i_rdev ) ;
}
}
2021-11-29 12:50:41 +03:00
static int iget_test ( struct inode * inode , void * opaque )
{
u64 no_addr = * ( u64 * ) opaque ;
return GFS2_I ( inode ) - > i_no_addr = = no_addr ;
}
static int iget_set ( struct inode * inode , void * opaque )
{
u64 no_addr = * ( u64 * ) opaque ;
GFS2_I ( inode ) - > i_no_addr = no_addr ;
inode - > i_ino = no_addr ;
return 0 ;
}
2011-05-09 17:06:38 +04:00
/**
* gfs2_inode_lookup - Lookup an inode
* @ sb : The super block
* @ type : The type of the inode
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
* @ no_addr : The inode number
* @ no_formal_ino : The inode generation number
* @ blktype : Requested block type ( GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED ;
2017-08-01 17:54:33 +03:00
* GFS2_BLKST_FREE to indicate not to verify )
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
*
* If @ type is DT_UNKNOWN , the inode type is fetched from disk .
*
* If @ blktype is anything other than GFS2_BLKST_FREE ( which is used as a
* placeholder because it doesn ' t otherwise make sense ) , the on - disk block type
* is verified to be @ blktype .
2011-05-09 17:06:38 +04:00
*
2020-01-15 08:21:42 +03:00
* When @ no_formal_ino is non - zero , this function will return ERR_PTR ( - ESTALE )
* if it detects that @ no_formal_ino doesn ' t match the actual inode generation
* number . However , it doesn ' t always know unless @ type is DT_UNKNOWN .
*
2011-05-09 17:06:38 +04:00
* Returns : A VFS inode , or an error
*/
struct inode * gfs2_inode_lookup ( struct super_block * sb , unsigned int type ,
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
u64 no_addr , u64 no_formal_ino ,
unsigned int blktype )
2011-05-09 17:06:38 +04:00
{
struct inode * inode ;
struct gfs2_inode * ip ;
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
struct gfs2_holder i_gh ;
2011-05-09 17:06:38 +04:00
int error ;
2016-06-17 15:31:27 +03:00
gfs2_holder_mark_uninitialized ( & i_gh ) ;
2021-11-29 12:50:41 +03:00
inode = iget5_locked ( sb , no_addr , iget_test , iget_set , & no_addr ) ;
2011-05-09 17:06:38 +04:00
if ( ! inode )
2014-01-16 14:31:13 +04:00
return ERR_PTR ( - ENOMEM ) ;
2011-05-09 17:06:38 +04:00
2016-04-12 23:14:26 +03:00
ip = GFS2_I ( inode ) ;
2011-05-09 17:06:38 +04:00
if ( inode - > i_state & I_NEW ) {
struct gfs2_sbd * sdp = GFS2_SB ( inode ) ;
2021-11-29 12:50:41 +03:00
struct gfs2_glock * io_gl ;
2022-08-22 19:30:12 +03:00
int extra_flags = 0 ;
2011-05-09 17:06:38 +04:00
2022-01-24 20:23:57 +03:00
error = gfs2_glock_get ( sdp , no_addr , & gfs2_inode_glops , CREATE ,
& ip - > i_gl ) ;
if ( unlikely ( error ) )
goto fail ;
error = gfs2_glock_get ( sdp , no_addr , & gfs2_iopen_glops , CREATE ,
& io_gl ) ;
if ( unlikely ( error ) )
goto fail ;
2022-08-22 19:30:12 +03:00
if ( blktype = = GFS2_BLKST_UNLINKED )
extra_flags | = LM_FLAG_TRY ;
else
2022-01-24 20:23:57 +03:00
gfs2_cancel_delete_work ( io_gl ) ;
2022-08-22 19:30:12 +03:00
error = gfs2_glock_nq_init ( io_gl , LM_ST_SHARED ,
2022-10-09 23:56:28 +03:00
GL_EXACT | GL_NOPID | extra_flags ,
2022-01-24 20:23:57 +03:00
& ip - > i_iopen_gh ) ;
gfs2_glock_put ( io_gl ) ;
2011-05-09 17:06:38 +04:00
if ( unlikely ( error ) )
goto fail ;
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
if ( type = = DT_UNKNOWN | | blktype ! = GFS2_BLKST_FREE ) {
/*
* The GL_SKIP flag indicates to skip reading the inode
2021-11-29 12:50:41 +03:00
* block . We read the inode when instantiating it
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
* after possibly checking the block type .
*/
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_EXCLUSIVE ,
GL_SKIP , & i_gh ) ;
if ( error )
2020-01-24 16:14:46 +03:00
goto fail ;
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
2020-01-15 08:21:42 +03:00
error = - ESTALE ;
if ( no_formal_ino & &
gfs2_inode_already_deleted ( ip - > i_gl , no_formal_ino ) )
goto fail ;
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
if ( blktype ! = GFS2_BLKST_FREE ) {
error = gfs2_check_blk_type ( sdp , no_addr ,
blktype ) ;
if ( error )
2020-01-24 16:14:46 +03:00
goto fail ;
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
}
}
gfs2: fix GL_SKIP node_scope problems
Before this patch, when a glock was locked, the very first holder on the
queue would unlock the lockref and call the go_instantiate glops function
(if one existed), unless GL_SKIP was specified. When we introduced the new
node-scope concept, we allowed multiple holders to lock glocks in EX mode
and share the lock.
But node-scope introduced a new problem: if the first holder has GL_SKIP
and the next one does NOT, since it is not the first holder on the queue,
the go_instantiate op was not called. Eventually the GL_SKIP holder may
call the instantiate sub-function (e.g. gfs2_rgrp_bh_get) but there was
still a window of time in which another non-GL_SKIP holder assumes the
instantiate function had been called by the first holder. In the case of
rgrp glocks, this led to a NULL pointer dereference on the buffer_heads.
This patch tries to fix the problem by introducing two new glock flags:
GLF_INSTANTIATE_NEEDED, which keeps track of when the instantiate function
needs to be called to "fill in" or "read in" the object before it is
referenced.
GLF_INSTANTIATE_IN_PROG which is used to determine when a process is
in the process of reading in the object. Whenever a function needs to
reference the object, it checks the GLF_INSTANTIATE_NEEDED flag, and if
set, it sets GLF_INSTANTIATE_IN_PROG and calls the glops "go_instantiate"
function.
As before, the gl_lockref spin_lock is unlocked during the IO operation,
which may take a relatively long amount of time to complete. While
unlocked, if another process determines go_instantiate is still needed,
it sees GLF_INSTANTIATE_IN_PROG is set, and waits for the go_instantiate
glop operation to be completed. Once GLF_INSTANTIATE_IN_PROG is cleared,
it needs to check GLF_INSTANTIATE_NEEDED again because the other process's
go_instantiate operation may not have been successful.
Functions that previously called the instantiate sub-functions now call
directly into gfs2_instantiate so the new bits are managed properly.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2021-10-06 17:29:18 +03:00
set_bit ( GLF_INSTANTIATE_NEEDED , & ip - > i_gl - > gl_flags ) ;
2021-11-29 12:50:41 +03:00
2020-01-15 08:26:00 +03:00
/* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
inode - > i_atime . tv_sec = 1LL < < ( 8 * sizeof ( inode - > i_atime . tv_sec ) - 1 ) ;
inode - > i_atime . tv_nsec = 0 ;
2021-11-29 12:50:41 +03:00
glock_set_object ( ip - > i_gl , ip ) ;
2011-05-09 17:06:38 +04:00
if ( type = = DT_UNKNOWN ) {
/* Inode glock must be locked already */
gfs2: fix GL_SKIP node_scope problems
Before this patch, when a glock was locked, the very first holder on the
queue would unlock the lockref and call the go_instantiate glops function
(if one existed), unless GL_SKIP was specified. When we introduced the new
node-scope concept, we allowed multiple holders to lock glocks in EX mode
and share the lock.
But node-scope introduced a new problem: if the first holder has GL_SKIP
and the next one does NOT, since it is not the first holder on the queue,
the go_instantiate op was not called. Eventually the GL_SKIP holder may
call the instantiate sub-function (e.g. gfs2_rgrp_bh_get) but there was
still a window of time in which another non-GL_SKIP holder assumes the
instantiate function had been called by the first holder. In the case of
rgrp glocks, this led to a NULL pointer dereference on the buffer_heads.
This patch tries to fix the problem by introducing two new glock flags:
GLF_INSTANTIATE_NEEDED, which keeps track of when the instantiate function
needs to be called to "fill in" or "read in" the object before it is
referenced.
GLF_INSTANTIATE_IN_PROG which is used to determine when a process is
in the process of reading in the object. Whenever a function needs to
reference the object, it checks the GLF_INSTANTIATE_NEEDED flag, and if
set, it sets GLF_INSTANTIATE_IN_PROG and calls the glops "go_instantiate"
function.
As before, the gl_lockref spin_lock is unlocked during the IO operation,
which may take a relatively long amount of time to complete. While
unlocked, if another process determines go_instantiate is still needed,
it sees GLF_INSTANTIATE_IN_PROG is set, and waits for the go_instantiate
glop operation to be completed. Once GLF_INSTANTIATE_IN_PROG is cleared,
it needs to check GLF_INSTANTIATE_NEEDED again because the other process's
go_instantiate operation may not have been successful.
Functions that previously called the instantiate sub-functions now call
directly into gfs2_instantiate so the new bits are managed properly.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2021-10-06 17:29:18 +03:00
error = gfs2_instantiate ( & i_gh ) ;
2021-11-29 12:50:41 +03:00
if ( error ) {
glock_clear_object ( ip - > i_gl , ip ) ;
2020-01-24 16:14:46 +03:00
goto fail ;
2021-11-29 12:50:41 +03:00
}
2011-05-09 17:06:38 +04:00
} else {
2020-01-15 08:26:00 +03:00
ip - > i_no_formal_ino = no_formal_ino ;
2011-05-09 17:06:38 +04:00
inode - > i_mode = DT2IF ( type ) ;
}
2020-01-15 08:21:42 +03:00
if ( gfs2_holder_initialized ( & i_gh ) )
gfs2_glock_dq_uninit ( & i_gh ) ;
2021-11-29 12:50:41 +03:00
glock_set_object ( ip - > i_iopen_gh . gh_gl , ip ) ;
2020-01-15 08:21:42 +03:00
2011-05-09 17:06:38 +04:00
gfs2_set_iop ( inode ) ;
2021-11-29 05:35:00 +03:00
unlock_new_inode ( inode ) ;
2020-01-15 08:21:42 +03:00
}
2016-09-26 21:24:34 +03:00
2020-01-15 08:21:42 +03:00
if ( no_formal_ino & & ip - > i_no_formal_ino & &
no_formal_ino ! = ip - > i_no_formal_ino ) {
iput ( inode ) ;
2021-11-29 05:35:00 +03:00
return ERR_PTR ( - ESTALE ) ;
2011-05-09 17:06:38 +04:00
}
return inode ;
2020-01-24 16:14:46 +03:00
fail :
2022-08-22 19:30:12 +03:00
if ( error = = GLR_TRYFAILED )
error = - EAGAIN ;
2021-11-29 12:50:41 +03:00
if ( gfs2_holder_initialized ( & ip - > i_iopen_gh ) )
2021-09-29 22:42:38 +03:00
gfs2_glock_dq_uninit ( & ip - > i_iopen_gh ) ;
2016-06-17 15:31:27 +03:00
if ( gfs2_holder_initialized ( & i_gh ) )
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
gfs2_glock_dq_uninit ( & i_gh ) ;
2011-05-09 17:06:38 +04:00
iget_failed ( inode ) ;
return ERR_PTR ( error ) ;
}
2020-01-15 07:31:38 +03:00
/**
* gfs2_lookup_by_inum - look up an inode by inode number
* @ sdp : The super block
* @ no_addr : The inode number
* @ no_formal_ino : The inode generation number ( 0 for any )
* @ blktype : Requested block type ( see gfs2_inode_lookup )
*/
2011-05-09 17:06:38 +04:00
struct inode * gfs2_lookup_by_inum ( struct gfs2_sbd * sdp , u64 no_addr ,
2020-01-15 07:31:38 +03:00
u64 no_formal_ino , unsigned int blktype )
2011-05-09 17:06:38 +04:00
{
struct super_block * sb = sdp - > sd_vfs ;
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
struct inode * inode ;
2011-05-09 17:06:38 +04:00
int error ;
2020-01-15 08:21:42 +03:00
inode = gfs2_inode_lookup ( sb , DT_UNKNOWN , no_addr , no_formal_ino ,
blktype ) ;
2011-05-09 17:06:38 +04:00
if ( IS_ERR ( inode ) )
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
return inode ;
2011-05-09 17:06:38 +04:00
if ( no_formal_ino ) {
error = - EIO ;
if ( GFS2_I ( inode ) - > i_diskflags & GFS2_DIF_SYSTEM )
goto fail_iput ;
}
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
return inode ;
2011-05-09 17:06:38 +04:00
fail_iput :
iput ( inode ) ;
gfs2: Fix gfs2_lookup_by_inum lock inversion
The current gfs2_lookup_by_inum takes the glock of a presumed inode
identified by block number, verifies that the block is indeed an inode,
and then instantiates and reads the new inode via gfs2_inode_lookup.
However, instantiating a new inode may block on freeing a previous
instance of that inode (__wait_on_freeing_inode), and freeing an inode
requires to take the glock already held, leading to lock inversion and
deadlock.
Fix this by first instantiating the new inode, then verifying that the
block is an inode (if required), and then reading in the new inode, all
in gfs2_inode_lookup.
If the block we are looking for is not an inode, we discard the new
inode via iget_failed, which marks inodes as bad and unhashes them.
Other tasks waiting on that inode will get back a bad inode back from
ilookup or iget_locked; in that case, retry the lookup.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-14 20:22:27 +03:00
return ERR_PTR ( error ) ;
2011-05-09 17:06:38 +04:00
}
struct inode * gfs2_lookup_simple ( struct inode * dip , const char * name )
{
struct qstr qstr ;
struct inode * inode ;
gfs2_str2qstr ( & qstr , name ) ;
inode = gfs2_lookupi ( dip , & qstr , 1 ) ;
/* gfs2_lookupi has inconsistent callers: vfs
* related routines expect NULL for no entry found ,
* gfs2_lookup_simple callers expect ENOENT
* and do not check for NULL .
*/
if ( inode = = NULL )
return ERR_PTR ( - ENOENT ) ;
else
return inode ;
}
/**
* gfs2_lookupi - Look up a filename in a directory and return its inode
2021-03-30 19:44:29 +03:00
* @ dir : The inode of the directory containing the inode to look - up
2011-05-09 17:06:38 +04:00
* @ name : The name of the inode to look for
* @ is_root : If 1 , ignore the caller ' s permissions
*
* This can be called via the VFS filldir function when NFS is doing
* a readdirplus and the inode which its intending to stat isn ' t
* already in cache . In this case we must not take the directory glock
* again , since the readdir call will have already taken that lock .
*
* Returns : errno
*/
struct inode * gfs2_lookupi ( struct inode * dir , const struct qstr * name ,
int is_root )
{
struct super_block * sb = dir - > i_sb ;
struct gfs2_inode * dip = GFS2_I ( dir ) ;
struct gfs2_holder d_gh ;
int error = 0 ;
struct inode * inode = NULL ;
2016-06-17 15:31:27 +03:00
gfs2_holder_mark_uninitialized ( & d_gh ) ;
2011-05-09 17:06:38 +04:00
if ( ! name - > len | | name - > len > GFS2_FNAMESIZE )
return ERR_PTR ( - ENAMETOOLONG ) ;
if ( ( name - > len = = 1 & & memcmp ( name - > name , " . " , 1 ) = = 0 ) | |
( name - > len = = 2 & & memcmp ( name - > name , " .. " , 2 ) = = 0 & &
2015-03-18 01:25:59 +03:00
dir = = d_inode ( sb - > s_root ) ) ) {
2011-05-09 17:06:38 +04:00
igrab ( dir ) ;
return dir ;
}
if ( gfs2_glock_is_locked_by_me ( dip - > i_gl ) = = NULL ) {
error = gfs2_glock_nq_init ( dip - > i_gl , LM_ST_SHARED , 0 , & d_gh ) ;
if ( error )
return ERR_PTR ( error ) ;
}
if ( ! is_root ) {
2021-01-21 16:19:43 +03:00
error = gfs2_permission ( & init_user_ns , dir , MAY_EXEC ) ;
2011-05-09 17:06:38 +04:00
if ( error )
goto out ;
}
2013-06-11 16:45:29 +04:00
inode = gfs2_dir_search ( dir , name , false ) ;
2011-05-09 17:06:38 +04:00
if ( IS_ERR ( inode ) )
error = PTR_ERR ( inode ) ;
out :
2016-06-17 15:31:27 +03:00
if ( gfs2_holder_initialized ( & d_gh ) )
2011-05-09 17:06:38 +04:00
gfs2_glock_dq_uninit ( & d_gh ) ;
if ( error = = - ENOENT )
return NULL ;
return inode ? inode : ERR_PTR ( error ) ;
}
/**
* create_ok - OK to create a new on - disk inode here ?
* @ dip : Directory in which dinode is to be created
* @ name : Name of new dinode
* @ mode :
*
* Returns : errno
*/
static int create_ok ( struct gfs2_inode * dip , const struct qstr * name ,
2011-07-26 11:30:54 +04:00
umode_t mode )
2011-05-09 17:06:38 +04:00
{
int error ;
2021-01-21 16:19:43 +03:00
error = gfs2_permission ( & init_user_ns , & dip - > i_inode ,
MAY_WRITE | MAY_EXEC ) ;
2011-05-09 17:06:38 +04:00
if ( error )
return error ;
/* Don't create entries in an unlinked directory */
if ( ! dip - > i_inode . i_nlink )
return - ENOENT ;
if ( dip - > i_entries = = ( u32 ) - 1 )
return - EFBIG ;
if ( S_ISDIR ( mode ) & & dip - > i_inode . i_nlink = = ( u32 ) - 1 )
return - EMLINK ;
return 0 ;
}
2012-10-31 14:30:22 +04:00
static void munge_mode_uid_gid ( const struct gfs2_inode * dip ,
struct inode * inode )
2011-05-09 17:06:38 +04:00
{
if ( GFS2_SB ( & dip - > i_inode ) - > sd_args . ar_suiddir & &
2013-02-01 09:56:13 +04:00
( dip - > i_inode . i_mode & S_ISUID ) & &
! uid_eq ( dip - > i_inode . i_uid , GLOBAL_ROOT_UID ) ) {
2012-10-31 14:30:22 +04:00
if ( S_ISDIR ( inode - > i_mode ) )
inode - > i_mode | = S_ISUID ;
2013-02-01 09:56:13 +04:00
else if ( ! uid_eq ( dip - > i_inode . i_uid , current_fsuid ( ) ) )
2012-10-31 14:30:22 +04:00
inode - > i_mode & = ~ 07111 ;
inode - > i_uid = dip - > i_inode . i_uid ;
2011-05-09 17:06:38 +04:00
} else
2012-10-31 14:30:22 +04:00
inode - > i_uid = current_fsuid ( ) ;
2011-05-09 17:06:38 +04:00
if ( dip - > i_inode . i_mode & S_ISGID ) {
2012-10-31 14:30:22 +04:00
if ( S_ISDIR ( inode - > i_mode ) )
inode - > i_mode | = S_ISGID ;
inode - > i_gid = dip - > i_inode . i_gid ;
2011-05-09 17:06:38 +04:00
} else
2012-10-31 14:30:22 +04:00
inode - > i_gid = current_fsgid ( ) ;
2011-05-09 17:06:38 +04:00
}
2014-02-04 19:45:11 +04:00
static int alloc_dinode ( struct gfs2_inode * ip , u32 flags , unsigned * dblocks )
2011-05-09 17:06:38 +04:00
{
2012-10-31 14:30:22 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2014-02-04 19:45:11 +04:00
struct gfs2_alloc_parms ap = { . target = * dblocks , . aflags = flags , } ;
2011-05-09 17:06:38 +04:00
int error ;
2015-03-18 20:03:41 +03:00
error = gfs2_quota_lock_check ( ip , & ap ) ;
2011-05-09 17:06:38 +04:00
if ( error )
goto out ;
2013-10-02 14:13:25 +04:00
error = gfs2_inplace_reserve ( ip , & ap ) ;
2013-02-26 20:15:20 +04:00
if ( error )
goto out_quota ;
2014-02-04 19:45:11 +04:00
error = gfs2_trans_begin ( sdp , ( * dblocks * RES_RG_BIT ) + RES_STATFS + RES_QUOTA , 0 ) ;
2011-05-09 17:06:38 +04:00
if ( error )
goto out_ipreserv ;
2014-02-04 19:45:11 +04:00
error = gfs2_alloc_blocks ( ip , & ip - > i_no_addr , dblocks , 1 , & ip - > i_generation ) ;
2012-10-31 14:30:22 +04:00
ip - > i_no_formal_ino = ip - > i_generation ;
ip - > i_inode . i_ino = ip - > i_no_addr ;
ip - > i_goal = ip - > i_no_addr ;
2011-05-09 17:06:38 +04:00
gfs2_trans_end ( sdp ) ;
out_ipreserv :
2012-10-31 14:30:22 +04:00
gfs2_inplace_release ( ip ) ;
2013-02-26 20:15:20 +04:00
out_quota :
gfs2_quota_unlock ( ip ) ;
2011-05-09 17:06:38 +04:00
out :
return error ;
}
2011-05-13 15:11:17 +04:00
static void gfs2_init_dir ( struct buffer_head * dibh ,
const struct gfs2_inode * parent )
2011-05-13 12:55:55 +04:00
{
struct gfs2_dinode * di = ( struct gfs2_dinode * ) dibh - > b_data ;
struct gfs2_dirent * dent = ( struct gfs2_dirent * ) ( di + 1 ) ;
gfs2_qstr2dirent ( & gfs2_qdot , GFS2_DIRENT_SIZE ( gfs2_qdot . len ) , dent ) ;
dent - > de_inum = di - > di_num ; /* already GFS2 endian */
dent - > de_type = cpu_to_be16 ( DT_DIR ) ;
dent = ( struct gfs2_dirent * ) ( ( char * ) dent + GFS2_DIRENT_SIZE ( 1 ) ) ;
gfs2_qstr2dirent ( & gfs2_qdotdot , dibh - > b_size - GFS2_DIRENT_SIZE ( 1 ) - sizeof ( struct gfs2_dinode ) , dent ) ;
gfs2_inum_out ( parent , dent ) ;
dent - > de_type = cpu_to_be16 ( DT_DIR ) ;
}
2014-02-04 19:45:11 +04:00
/**
* gfs2_init_xattr - Initialise an xattr block for a new inode
* @ ip : The inode in question
*
* This sets up an empty xattr block for a new inode , ready to
* take any ACLs , LSM xattrs , etc .
*/
static void gfs2_init_xattr ( struct gfs2_inode * ip )
{
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
struct buffer_head * bh ;
struct gfs2_ea_header * ea ;
bh = gfs2_meta_new ( ip - > i_gl , ip - > i_eattr ) ;
gfs2_trans_add_meta ( ip - > i_gl , bh ) ;
gfs2_metatype_set ( bh , GFS2_METATYPE_EA , GFS2_FORMAT_EA ) ;
gfs2_buffer_clear_tail ( bh , sizeof ( struct gfs2_meta_header ) ) ;
ea = GFS2_EA_BH2FIRST ( bh ) ;
ea - > ea_rec_len = cpu_to_be32 ( sdp - > sd_jbsize ) ;
ea - > ea_type = GFS2_EATYPE_UNUSED ;
ea - > ea_flags = GFS2_EAFLAG_LAST ;
brelse ( bh ) ;
}
2011-05-09 17:06:38 +04:00
/**
* init_dinode - Fill in a new dinode structure
2011-05-13 15:11:17 +04:00
* @ dip : The directory this inode is being created in
2012-10-31 14:30:22 +04:00
* @ ip : The inode
2011-05-13 15:11:17 +04:00
* @ symname : The symlink destination ( if a symlink )
2011-05-09 17:06:38 +04:00
*
*/
2012-10-31 14:30:22 +04:00
static void init_dinode ( struct gfs2_inode * dip , struct gfs2_inode * ip ,
2013-03-01 13:29:12 +04:00
const char * symname )
2011-05-09 17:06:38 +04:00
{
struct gfs2_dinode * di ;
struct buffer_head * dibh ;
2012-10-31 14:30:22 +04:00
dibh = gfs2_meta_new ( ip - > i_gl , ip - > i_no_addr ) ;
2012-12-14 16:36:02 +04:00
gfs2_trans_add_meta ( ip - > i_gl , dibh ) ;
2011-05-09 17:06:38 +04:00
di = ( struct gfs2_dinode * ) dibh - > b_data ;
2013-03-01 13:29:12 +04:00
gfs2_dinode_out ( ip , di ) ;
2011-05-09 17:06:38 +04:00
2021-02-01 03:23:55 +03:00
di - > di_major = cpu_to_be32 ( imajor ( & ip - > i_inode ) ) ;
di - > di_minor = cpu_to_be32 ( iminor ( & ip - > i_inode ) ) ;
2011-05-09 17:06:38 +04:00
di - > __pad1 = 0 ;
di - > __pad2 = 0 ;
di - > __pad3 = 0 ;
memset ( & di - > __pad4 , 0 , sizeof ( di - > __pad4 ) ) ;
memset ( & di - > di_reserved , 0 , sizeof ( di - > di_reserved ) ) ;
2013-03-01 13:29:12 +04:00
gfs2_buffer_clear_tail ( dibh , sizeof ( struct gfs2_dinode ) ) ;
2011-05-13 13:34:59 +04:00
2012-10-31 14:30:22 +04:00
switch ( ip - > i_inode . i_mode & S_IFMT ) {
2011-05-13 13:34:59 +04:00
case S_IFDIR :
2011-05-13 12:55:55 +04:00
gfs2_init_dir ( dibh , dip ) ;
2011-05-13 13:34:59 +04:00
break ;
case S_IFLNK :
2012-10-31 14:30:22 +04:00
memcpy ( dibh - > b_data + sizeof ( struct gfs2_dinode ) , symname , ip - > i_inode . i_size ) ;
2011-05-13 13:34:59 +04:00
break ;
2011-05-13 12:55:55 +04:00
}
2011-05-09 17:06:38 +04:00
set_buffer_uptodate ( dibh ) ;
2013-03-01 13:29:12 +04:00
brelse ( dibh ) ;
2011-05-09 17:06:38 +04:00
}
2014-01-06 16:03:05 +04:00
/**
2021-03-30 19:44:29 +03:00
* gfs2_trans_da_blks - Calculate number of blocks to link inode
2014-01-06 16:03:05 +04:00
* @ dip : The directory we are linking into
* @ da : The dir add information
* @ nr_inodes : The number of inodes involved
*
* This calculate the number of blocks we need to reserve in a
* transaction to link @ nr_inodes into a directory . In most cases
* @ nr_inodes will be 2 ( the directory plus the inode being linked in )
* but in case of rename , 4 may be required .
*
* Returns : Number of blocks
*/
static unsigned gfs2_trans_da_blks ( const struct gfs2_inode * dip ,
const struct gfs2_diradd * da ,
unsigned nr_inodes )
{
return da - > nr_blocks + gfs2_rg_blocks ( dip , da - > nr_blocks ) +
( nr_inodes * RES_DINODE ) + RES_QUOTA + RES_STATFS ;
}
2011-05-09 17:06:38 +04:00
static int link_dinode ( struct gfs2_inode * dip , const struct qstr * name ,
2014-01-06 15:28:41 +04:00
struct gfs2_inode * ip , struct gfs2_diradd * da )
2011-05-09 17:06:38 +04:00
{
struct gfs2_sbd * sdp = GFS2_SB ( & dip - > i_inode ) ;
2014-01-06 15:28:41 +04:00
struct gfs2_alloc_parms ap = { . target = da - > nr_blocks , } ;
2011-05-09 17:06:38 +04:00
int error ;
2014-01-06 15:28:41 +04:00
if ( da - > nr_blocks ) {
2015-03-18 20:03:41 +03:00
error = gfs2_quota_lock_check ( dip , & ap ) ;
2011-05-09 17:06:38 +04:00
if ( error )
goto fail_quota_locks ;
2013-10-02 14:13:25 +04:00
error = gfs2_inplace_reserve ( dip , & ap ) ;
2011-05-09 17:06:38 +04:00
if ( error )
goto fail_quota_locks ;
2014-01-06 16:03:05 +04:00
error = gfs2_trans_begin ( sdp , gfs2_trans_da_blks ( dip , da , 2 ) , 0 ) ;
2011-05-09 17:06:38 +04:00
if ( error )
goto fail_ipreserv ;
} else {
error = gfs2_trans_begin ( sdp , RES_LEAF + 2 * RES_DINODE , 0 ) ;
if ( error )
goto fail_quota_locks ;
}
2014-01-06 16:49:43 +04:00
error = gfs2_dir_add ( & dip - > i_inode , name , ip , da ) ;
2011-05-09 17:06:38 +04:00
gfs2_trans_end ( sdp ) ;
fail_ipreserv :
2013-02-26 20:15:20 +04:00
gfs2_inplace_release ( dip ) ;
2011-05-09 17:06:38 +04:00
fail_quota_locks :
gfs2_quota_unlock ( dip ) ;
return error ;
}
2011-09-24 02:51:32 +04:00
static int gfs2_initxattrs ( struct inode * inode , const struct xattr * xattr_array ,
2011-06-06 23:29:25 +04:00
void * fs_info )
2011-05-09 17:06:38 +04:00
{
2011-06-06 23:29:25 +04:00
const struct xattr * xattr ;
int err = 0 ;
for ( xattr = xattr_array ; xattr - > name ! = NULL ; xattr + + ) {
err = __gfs2_xattr_set ( inode , xattr - > name , xattr - > value ,
xattr - > value_len , 0 ,
GFS2_EATYPE_SECURITY ) ;
if ( err < 0 )
break ;
2011-05-09 17:06:38 +04:00
}
return err ;
}
2006-01-16 19:50:04 +03:00
2011-05-09 17:06:38 +04:00
/**
2011-05-13 15:11:17 +04:00
* gfs2_create_inode - Create a new inode
* @ dir : The parent directory
* @ dentry : The new dentry
2013-06-14 14:17:15 +04:00
* @ file : If non - NULL , the file which is being opened
2011-05-13 15:11:17 +04:00
* @ mode : The permissions on the new inode
* @ dev : For device nodes , this is the device number
* @ symname : For symlinks , this is the link destination
* @ size : The initial size of the inode ( ignored for directories )
2021-03-30 19:44:29 +03:00
* @ excl : Force fail if inode exists
2011-05-09 17:06:38 +04:00
*
2011-05-13 15:11:17 +04:00
* Returns : 0 on success , or error code
2011-05-09 17:06:38 +04:00
*/
2011-05-13 15:11:17 +04:00
static int gfs2_create_inode ( struct inode * dir , struct dentry * dentry ,
2013-06-14 14:17:15 +04:00
struct file * file ,
2011-07-26 11:30:54 +04:00
umode_t mode , dev_t dev , const char * symname ,
2018-06-08 20:06:28 +03:00
unsigned int size , int excl )
2011-05-09 17:06:38 +04:00
{
2011-05-13 15:11:17 +04:00
const struct qstr * name = & dentry - > d_name ;
2013-12-20 17:16:52 +04:00
struct posix_acl * default_acl , * acl ;
2011-05-13 15:11:17 +04:00
struct gfs2_holder ghs [ 2 ] ;
2011-05-09 17:06:38 +04:00
struct inode * inode = NULL ;
2012-07-19 16:12:40 +04:00
struct gfs2_inode * dip = GFS2_I ( dir ) , * ip ;
2011-05-09 17:06:38 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & dip - > i_inode ) ;
2020-11-27 16:23:04 +03:00
struct gfs2_glock * io_gl ;
2015-12-04 19:19:14 +03:00
int error , free_vfs_inode = 1 ;
2012-10-31 14:37:10 +04:00
u32 aflags = 0 ;
2014-02-04 19:45:11 +04:00
unsigned blocks = 1 ;
2014-09-29 16:52:04 +04:00
struct gfs2_diradd da = { . bh = NULL , . save_loc = 1 , } ;
2011-05-09 17:06:38 +04:00
if ( ! name - > len | | name - > len > GFS2_FNAMESIZE )
2011-05-13 15:11:17 +04:00
return - ENAMETOOLONG ;
2011-05-09 17:06:38 +04:00
2020-02-27 21:47:53 +03:00
error = gfs2_qa_get ( dip ) ;
2012-06-06 14:17:59 +04:00
if ( error )
return error ;
2013-02-26 20:15:20 +04:00
error = gfs2_rindex_update ( sdp ) ;
if ( error )
2020-02-27 21:47:53 +03:00
goto fail ;
2013-02-26 20:15:20 +04:00
2011-05-13 15:11:17 +04:00
error = gfs2_glock_nq_init ( dip - > i_gl , LM_ST_EXCLUSIVE , 0 , ghs ) ;
2011-05-09 17:06:38 +04:00
if ( error )
goto fail ;
2017-06-30 16:16:46 +03:00
gfs2_holder_mark_uninitialized ( ghs + 1 ) ;
2011-05-09 17:06:38 +04:00
error = create_ok ( dip , name , mode ) ;
2013-06-11 16:45:29 +04:00
if ( error )
goto fail_gunlock ;
inode = gfs2_dir_search ( dir , & dentry - > d_name , ! S_ISREG ( mode ) | | excl ) ;
error = PTR_ERR ( inode ) ;
if ( ! IS_ERR ( inode ) ) {
2014-11-19 22:34:49 +03:00
if ( S_ISDIR ( inode - > i_mode ) ) {
iput ( inode ) ;
inode = ERR_PTR ( - EISDIR ) ;
goto fail_gunlock ;
}
2014-11-19 22:35:24 +03:00
d_instantiate ( dentry , inode ) ;
2013-06-14 14:17:15 +04:00
error = 0 ;
2013-09-16 16:52:00 +04:00
if ( file ) {
2014-11-19 22:35:24 +03:00
if ( S_ISREG ( inode - > i_mode ) )
2018-06-08 18:44:56 +03:00
error = finish_open ( file , dentry , gfs2_open_common ) ;
2014-11-19 22:35:24 +03:00
else
error = finish_no_open ( file , NULL ) ;
2013-06-14 14:17:15 +04:00
}
2011-08-18 17:35:53 +04:00
gfs2_glock_dq_uninit ( ghs ) ;
2020-05-04 18:18:43 +03:00
goto fail ;
2013-06-11 16:45:29 +04:00
} else if ( error ! = - ENOENT ) {
2011-05-09 17:06:38 +04:00
goto fail_gunlock ;
2013-06-11 16:45:29 +04:00
}
2011-05-09 17:06:38 +04:00
2014-01-06 15:28:41 +04:00
error = gfs2_diradd_alloc_required ( dir , name , & da ) ;
2013-02-26 20:15:20 +04:00
if ( error < 0 )
goto fail_gunlock ;
2012-10-31 14:30:22 +04:00
inode = new_inode ( sdp - > sd_vfs ) ;
2013-02-26 20:15:20 +04:00
error = - ENOMEM ;
if ( ! inode )
goto fail_gunlock ;
2013-12-20 17:16:52 +04:00
error = posix_acl_create ( dir , & mode , & default_acl , & acl ) ;
if ( error )
2015-12-04 19:19:14 +03:00
goto fail_gunlock ;
2013-12-20 17:16:52 +04:00
2012-10-31 14:30:22 +04:00
ip = GFS2_I ( inode ) ;
2020-02-27 21:47:53 +03:00
error = gfs2_qa_get ( ip ) ;
2011-05-09 17:06:38 +04:00
if ( error )
2013-12-20 17:16:52 +04:00
goto fail_free_acls ;
2012-10-31 14:30:22 +04:00
inode - > i_mode = mode ;
2013-03-01 13:29:12 +04:00
set_nlink ( inode , S_ISDIR ( mode ) ? 2 : 1 ) ;
2012-10-31 14:30:22 +04:00
inode - > i_rdev = dev ;
inode - > i_size = size ;
2016-09-14 17:48:04 +03:00
inode - > i_atime = inode - > i_mtime = inode - > i_ctime = current_time ( inode ) ;
2012-10-31 14:30:22 +04:00
munge_mode_uid_gid ( dip , inode ) ;
2014-09-19 06:40:28 +04:00
check_and_update_goal ( dip ) ;
2012-10-31 14:30:22 +04:00
ip - > i_goal = dip - > i_goal ;
2013-02-26 23:09:35 +04:00
ip - > i_diskflags = 0 ;
ip - > i_eattr = 0 ;
ip - > i_height = 0 ;
ip - > i_depth = 0 ;
ip - > i_entries = 0 ;
2017-03-16 22:29:13 +03:00
ip - > i_no_addr = 0 ; /* Temporarily zero until real addr is assigned */
2013-02-26 23:09:35 +04:00
switch ( mode & S_IFMT ) {
case S_IFREG :
if ( ( dip - > i_diskflags & GFS2_DIF_INHERIT_JDATA ) | |
gfs2_tune_get ( sdp , gt_new_files_jdata ) )
ip - > i_diskflags | = GFS2_DIF_JDATA ;
gfs2_set_aops ( inode ) ;
break ;
case S_IFDIR :
ip - > i_diskflags | = ( dip - > i_diskflags & GFS2_DIF_INHERIT_JDATA ) ;
ip - > i_diskflags | = GFS2_DIF_JDATA ;
ip - > i_entries = 2 ;
break ;
}
2015-11-11 00:07:26 +03:00
/* Force SYSTEM flag on all files and subdirs of a SYSTEM directory */
if ( dip - > i_diskflags & GFS2_DIF_SYSTEM )
ip - > i_diskflags | = GFS2_DIF_SYSTEM ;
2013-02-26 23:09:35 +04:00
gfs2_set_inode_flags ( inode ) ;
2011-05-09 17:06:38 +04:00
2015-03-18 01:25:59 +03:00
if ( ( GFS2_I ( d_inode ( sdp - > sd_root_dir ) ) = = dip ) | |
2012-10-31 14:37:10 +04:00
( dip - > i_diskflags & GFS2_DIF_TOPDIR ) )
aflags | = GFS2_AF_ORLOV ;
2014-02-04 19:45:11 +04:00
if ( default_acl | | acl )
blocks + + ;
error = alloc_dinode ( ip , aflags , & blocks ) ;
2011-05-09 17:06:38 +04:00
if ( error )
2012-10-31 14:30:22 +04:00
goto fail_free_inode ;
2011-05-09 17:06:38 +04:00
2014-02-04 19:45:11 +04:00
gfs2_set_inode_blocks ( inode , blocks ) ;
2012-10-31 14:30:22 +04:00
error = gfs2_glock_get ( sdp , ip - > i_no_addr , & gfs2_inode_glops , CREATE , & ip - > i_gl ) ;
2011-05-09 17:06:38 +04:00
if ( error )
2012-10-31 14:30:22 +04:00
goto fail_free_inode ;
2017-07-19 19:10:19 +03:00
2020-11-30 18:07:25 +03:00
error = gfs2_glock_get ( sdp , ip - > i_no_addr , & gfs2_iopen_glops , CREATE , & io_gl ) ;
2012-10-31 14:30:22 +04:00
if ( error )
goto fail_free_inode ;
2020-11-30 18:07:25 +03:00
gfs2_cancel_delete_work ( io_gl ) ;
2021-11-30 20:26:15 +03:00
error = insert_inode_locked4 ( inode , ip - > i_no_addr , iget_test , & ip - > i_no_addr ) ;
BUG_ON ( error ) ;
2022-04-05 23:39:16 +03:00
error = gfs2_glock_nq_init ( io_gl , LM_ST_SHARED , GL_EXACT | GL_NOPID ,
& ip - > i_iopen_gh ) ;
2020-11-30 18:07:25 +03:00
if ( error )
goto fail_gunlock2 ;
2012-10-31 14:30:22 +04:00
2022-01-24 20:23:57 +03:00
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_EXCLUSIVE , GL_SKIP , ghs + 1 ) ;
if ( error )
goto fail_gunlock3 ;
2014-02-04 19:45:11 +04:00
error = gfs2_trans_begin ( sdp , blocks , 0 ) ;
2012-10-31 14:30:22 +04:00
if ( error )
2022-01-24 20:23:57 +03:00
goto fail_gunlock3 ;
2011-05-09 17:06:38 +04:00
2014-02-04 19:45:11 +04:00
if ( blocks > 1 ) {
ip - > i_eattr = ip - > i_no_addr + 1 ;
gfs2_init_xattr ( ip ) ;
}
2013-03-01 13:29:12 +04:00
init_dinode ( dip , ip , symname ) ;
2013-02-26 20:15:20 +04:00
gfs2_trans_end ( sdp ) ;
2021-11-30 20:26:15 +03:00
glock_set_object ( ip - > i_gl , ip ) ;
2021-10-01 21:00:21 +03:00
glock_set_object ( io_gl , ip ) ;
2012-10-31 14:30:22 +04:00
gfs2_set_iop ( inode ) ;
2015-12-04 19:19:14 +03:00
free_vfs_inode = 0 ; /* After this point, the inode is no longer
considered free . Any failures need to undo
the gfs2 structures . */
2013-12-20 17:16:52 +04:00
if ( default_acl ) {
2016-05-13 04:59:17 +03:00
error = __gfs2_set_acl ( inode , default_acl , ACL_TYPE_DEFAULT ) ;
2018-11-26 20:45:35 +03:00
if ( error )
2022-01-24 20:23:57 +03:00
goto fail_gunlock4 ;
2013-12-20 17:16:52 +04:00
posix_acl_release ( default_acl ) ;
2018-11-26 20:45:35 +03:00
default_acl = NULL ;
2013-12-20 17:16:52 +04:00
}
if ( acl ) {
2018-11-26 20:45:35 +03:00
error = __gfs2_set_acl ( inode , acl , ACL_TYPE_ACCESS ) ;
if ( error )
2022-01-24 20:23:57 +03:00
goto fail_gunlock4 ;
2013-12-20 17:16:52 +04:00
posix_acl_release ( acl ) ;
2018-11-26 20:45:35 +03:00
acl = NULL ;
2013-12-20 17:16:52 +04:00
}
2014-03-19 17:37:00 +04:00
error = security_inode_init_security ( & ip - > i_inode , & dip - > i_inode , name ,
& gfs2_initxattrs , NULL ) ;
2011-05-09 17:06:38 +04:00
if ( error )
2022-01-24 20:23:57 +03:00
goto fail_gunlock4 ;
2011-05-09 17:06:38 +04:00
2014-01-06 15:28:41 +04:00
error = link_dinode ( dip , name , ip , & da ) ;
2011-05-09 17:06:38 +04:00
if ( error )
2022-01-24 20:23:57 +03:00
goto fail_gunlock4 ;
2011-05-09 17:06:38 +04:00
2013-03-01 13:29:12 +04:00
mark_inode_dirty ( inode ) ;
2013-06-14 14:17:15 +04:00
d_instantiate ( dentry , inode ) ;
2019-11-19 19:40:46 +03:00
/* After instantiate, errors should result in evict which will destroy
* both inode and iopen glocks properly . */
2013-09-16 16:52:03 +04:00
if ( file ) {
2018-06-08 20:22:02 +03:00
file - > f_mode | = FMODE_CREATED ;
2018-06-08 18:44:56 +03:00
error = finish_open ( file , dentry , gfs2_open_common ) ;
2013-09-16 16:52:03 +04:00
}
2013-02-26 23:09:35 +04:00
gfs2_glock_dq_uninit ( ghs ) ;
2020-05-04 18:18:43 +03:00
gfs2_qa_put ( ip ) ;
2013-02-26 23:09:35 +04:00
gfs2_glock_dq_uninit ( ghs + 1 ) ;
2019-11-19 19:40:46 +03:00
gfs2_glock_put ( io_gl ) ;
2020-05-04 18:18:43 +03:00
gfs2_qa_put ( dip ) ;
2021-11-30 20:26:15 +03:00
unlock_new_inode ( inode ) ;
2013-06-14 14:17:15 +04:00
return error ;
2011-05-09 17:06:38 +04:00
2022-01-24 20:23:57 +03:00
fail_gunlock4 :
2021-11-30 20:26:15 +03:00
glock_clear_object ( ip - > i_gl , ip ) ;
2017-07-18 20:26:07 +03:00
glock_clear_object ( io_gl , ip ) ;
2022-01-24 20:23:57 +03:00
fail_gunlock3 :
2015-12-04 19:19:14 +03:00
gfs2_glock_dq_uninit ( & ip - > i_iopen_gh ) ;
2011-05-09 17:06:38 +04:00
fail_gunlock2 :
2019-11-19 19:40:46 +03:00
gfs2_glock_put ( io_gl ) ;
2012-10-31 14:30:22 +04:00
fail_free_inode :
2017-07-18 20:26:07 +03:00
if ( ip - > i_gl ) {
2020-06-01 18:37:09 +03:00
if ( free_vfs_inode ) /* else evict will do the put for us */
gfs2_glock_put ( ip - > i_gl ) ;
2017-07-18 20:26:07 +03:00
}
2021-12-10 16:43:36 +03:00
gfs2_rs_deltree ( & ip - > i_res ) ;
2020-03-06 19:32:35 +03:00
gfs2_qa_put ( ip ) ;
2013-12-20 17:16:52 +04:00
fail_free_acls :
2018-11-26 20:45:35 +03:00
posix_acl_release ( default_acl ) ;
posix_acl_release ( acl ) ;
2011-05-09 17:06:38 +04:00
fail_gunlock :
2014-01-06 16:49:43 +04:00
gfs2_dir_no_add ( & da ) ;
2011-05-13 15:11:17 +04:00
gfs2_glock_dq_uninit ( ghs ) ;
2019-06-05 17:24:24 +03:00
if ( ! IS_ERR_OR_NULL ( inode ) ) {
2013-03-01 13:29:12 +04:00
clear_nlink ( inode ) ;
2014-03-31 19:33:17 +04:00
if ( ! free_vfs_inode )
mark_inode_dirty ( inode ) ;
set_bit ( free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED ,
& GFS2_I ( inode ) - > i_flags ) ;
2021-11-30 20:26:15 +03:00
if ( inode - > i_state & I_NEW )
iget_failed ( inode ) ;
else
iput ( inode ) ;
2011-08-02 16:17:27 +04:00
}
2017-06-30 16:16:46 +03:00
if ( gfs2_holder_initialized ( ghs + 1 ) )
gfs2_glock_dq_uninit ( ghs + 1 ) ;
2011-05-09 17:06:38 +04:00
fail :
2020-02-27 21:47:53 +03:00
gfs2_qa_put ( dip ) ;
2011-05-13 15:11:17 +04:00
return error ;
2011-05-09 17:06:38 +04:00
}
2011-05-13 15:11:17 +04:00
2006-01-16 19:50:04 +03:00
/**
* gfs2_create - Create a file
2021-03-30 19:44:29 +03:00
* @ mnt_userns : User namespace of the mount the inode was found from
2006-01-16 19:50:04 +03:00
* @ dir : The directory in which to create the file
* @ dentry : The dentry of the new file
* @ mode : The mode of the new file
2021-03-30 19:44:29 +03:00
* @ excl : Force fail if inode exists
2006-01-16 19:50:04 +03:00
*
* Returns : errno
*/
2021-01-21 16:19:43 +03:00
static int gfs2_create ( struct user_namespace * mnt_userns , struct inode * dir ,
struct dentry * dentry , umode_t mode , bool excl )
2006-01-16 19:50:04 +03:00
{
2018-06-08 20:06:28 +03:00
return gfs2_create_inode ( dir , dentry , NULL , S_IFREG | mode , 0 , NULL , 0 , excl ) ;
2006-01-16 19:50:04 +03:00
}
/**
2013-06-14 14:17:15 +04:00
* __gfs2_lookup - Look up a filename in a directory and return its inode
2006-01-16 19:50:04 +03:00
* @ dir : The directory inode
* @ dentry : The dentry of the new inode
2013-06-14 14:17:15 +04:00
* @ file : File to be opened
2006-01-16 19:50:04 +03:00
*
*
* Returns : errno
*/
2013-06-14 14:17:15 +04:00
static struct dentry * __gfs2_lookup ( struct inode * dir , struct dentry * dentry ,
2018-06-08 20:06:28 +03:00
struct file * file )
2006-01-16 19:50:04 +03:00
{
2013-06-14 14:17:15 +04:00
struct inode * inode ;
struct dentry * d ;
struct gfs2_holder gh ;
struct gfs2_glock * gl ;
int error ;
inode = gfs2_lookupi ( dir , & dentry - > d_name , 0 ) ;
2014-09-10 22:09:20 +04:00
if ( inode = = NULL ) {
d_add ( dentry , NULL ) ;
2013-06-14 14:17:15 +04:00
return NULL ;
2014-09-10 22:09:20 +04:00
}
2013-06-14 14:17:15 +04:00
if ( IS_ERR ( inode ) )
return ERR_CAST ( inode ) ;
gl = GFS2_I ( inode ) - > i_gl ;
error = gfs2_glock_nq_init ( gl , LM_ST_SHARED , LM_FLAG_ANY , & gh ) ;
if ( error ) {
iput ( inode ) ;
return ERR_PTR ( error ) ;
2008-01-08 11:14:30 +03:00
}
2013-06-14 14:17:15 +04:00
d = d_splice_alias ( inode , dentry ) ;
2014-01-16 22:51:07 +04:00
if ( IS_ERR ( d ) ) {
gfs2_glock_dq_uninit ( & gh ) ;
return d ;
}
2013-06-14 14:17:15 +04:00
if ( file & & S_ISREG ( inode - > i_mode ) )
2018-06-08 18:44:56 +03:00
error = finish_open ( file , dentry , gfs2_open_common ) ;
2013-06-14 14:17:15 +04:00
gfs2_glock_dq_uninit ( & gh ) ;
2013-09-23 16:21:04 +04:00
if ( error ) {
dput ( d ) ;
2013-06-14 14:17:15 +04:00
return ERR_PTR ( error ) ;
2013-09-23 16:21:04 +04:00
}
2013-06-14 14:17:15 +04:00
return d ;
}
static struct dentry * gfs2_lookup ( struct inode * dir , struct dentry * dentry ,
unsigned flags )
{
2018-06-08 20:06:28 +03:00
return __gfs2_lookup ( dir , dentry , NULL ) ;
2006-01-16 19:50:04 +03:00
}
/**
* gfs2_link - Link to a file
* @ old_dentry : The inode to link
* @ dir : Add link to this directory
* @ dentry : The name of the link
*
* Link the inode in " old_dentry " into the directory " dir " with the
* name in " dentry " .
*
* Returns : errno
*/
static int gfs2_link ( struct dentry * old_dentry , struct inode * dir ,
struct dentry * dentry )
{
2006-06-14 23:32:57 +04:00
struct gfs2_inode * dip = GFS2_I ( dir ) ;
struct gfs2_sbd * sdp = GFS2_SB ( dir ) ;
2015-03-18 01:25:59 +03:00
struct inode * inode = d_inode ( old_dentry ) ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_holder ghs [ 2 ] ;
2011-05-09 15:08:36 +04:00
struct buffer_head * dibh ;
2014-09-29 16:52:04 +04:00
struct gfs2_diradd da = { . bh = NULL , . save_loc = 1 , } ;
2006-01-16 19:50:04 +03:00
int error ;
2006-11-01 20:22:46 +03:00
if ( S_ISDIR ( inode - > i_mode ) )
2006-01-16 19:50:04 +03:00
return - EPERM ;
2020-02-27 21:47:53 +03:00
error = gfs2_qa_get ( dip ) ;
2012-06-06 14:17:59 +04:00
if ( error )
return error ;
2006-01-16 19:50:04 +03:00
gfs2_holder_init ( dip - > i_gl , LM_ST_EXCLUSIVE , 0 , ghs ) ;
gfs2_holder_init ( ip - > i_gl , LM_ST_EXCLUSIVE , 0 , ghs + 1 ) ;
2008-08-12 22:39:29 +04:00
error = gfs2_glock_nq ( ghs ) ; /* parent */
if ( error )
goto out_parent ;
error = gfs2_glock_nq ( ghs + 1 ) ; /* child */
2006-01-16 19:50:04 +03:00
if ( error )
2008-08-12 22:39:29 +04:00
goto out_child ;
2006-01-16 19:50:04 +03:00
2011-05-05 15:35:40 +04:00
error = - ENOENT ;
if ( inode - > i_nlink = = 0 )
goto out_gunlock ;
2021-01-21 16:19:43 +03:00
error = gfs2_permission ( & init_user_ns , dir , MAY_WRITE | MAY_EXEC ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_gunlock ;
2007-05-15 18:37:50 +04:00
error = gfs2_dir_check ( dir , & dentry - > d_name , NULL ) ;
2006-01-16 19:50:04 +03:00
switch ( error ) {
case - ENOENT :
break ;
case 0 :
error = - EEXIST ;
2020-11-20 21:25:03 +03:00
goto out_gunlock ;
2006-01-16 19:50:04 +03:00
default :
goto out_gunlock ;
}
error = - EINVAL ;
2006-11-01 22:04:17 +03:00
if ( ! dip - > i_inode . i_nlink )
2006-01-16 19:50:04 +03:00
goto out_gunlock ;
error = - EFBIG ;
2008-11-03 16:59:19 +03:00
if ( dip - > i_entries = = ( u32 ) - 1 )
2006-01-16 19:50:04 +03:00
goto out_gunlock ;
error = - EPERM ;
if ( IS_IMMUTABLE ( inode ) | | IS_APPEND ( inode ) )
goto out_gunlock ;
error = - EINVAL ;
2006-11-01 22:04:17 +03:00
if ( ! ip - > i_inode . i_nlink )
2006-01-16 19:50:04 +03:00
goto out_gunlock ;
error = - EMLINK ;
2006-11-01 22:04:17 +03:00
if ( ip - > i_inode . i_nlink = = ( u32 ) - 1 )
2006-01-16 19:50:04 +03:00
goto out_gunlock ;
2014-01-06 15:28:41 +04:00
error = gfs2_diradd_alloc_required ( dir , & dentry - > d_name , & da ) ;
2006-03-20 20:30:04 +03:00
if ( error < 0 )
2006-01-16 19:50:04 +03:00
goto out_gunlock ;
2014-01-06 15:28:41 +04:00
if ( da . nr_blocks ) {
struct gfs2_alloc_parms ap = { . target = da . nr_blocks , } ;
2015-03-18 20:03:41 +03:00
error = gfs2_quota_lock_check ( dip , & ap ) ;
2006-01-16 19:50:04 +03:00
if ( error )
2012-05-18 17:28:23 +04:00
goto out_gunlock ;
2006-01-16 19:50:04 +03:00
2013-10-02 14:13:25 +04:00
error = gfs2_inplace_reserve ( dip , & ap ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_gunlock_q ;
2014-01-06 16:03:05 +04:00
error = gfs2_trans_begin ( sdp , gfs2_trans_da_blks ( dip , & da , 2 ) , 0 ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_ipres ;
} else {
error = gfs2_trans_begin ( sdp , 2 * RES_DINODE + RES_LEAF , 0 ) ;
if ( error )
goto out_ipres ;
}
2011-05-09 15:08:36 +04:00
error = gfs2_meta_inode_buffer ( ip , & dibh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_end_trans ;
2014-01-06 16:49:43 +04:00
error = gfs2_dir_add ( dir , & dentry - > d_name , ip , & da ) ;
2011-05-09 15:08:36 +04:00
if ( error )
goto out_brelse ;
2012-12-14 16:36:02 +04:00
gfs2_trans_add_meta ( ip - > i_gl , dibh ) ;
2011-05-09 15:08:36 +04:00
inc_nlink ( & ip - > i_inode ) ;
2016-09-14 17:48:04 +03:00
ip - > i_inode . i_ctime = current_time ( & ip - > i_inode ) ;
2011-08-15 17:20:36 +04:00
ihold ( inode ) ;
d_instantiate ( dentry , inode ) ;
mark_inode_dirty ( inode ) ;
2006-01-16 19:50:04 +03:00
2011-05-09 15:08:36 +04:00
out_brelse :
brelse ( dibh ) ;
2006-06-14 23:32:57 +04:00
out_end_trans :
2006-01-16 19:50:04 +03:00
gfs2_trans_end ( sdp ) ;
2006-06-14 23:32:57 +04:00
out_ipres :
2014-01-06 15:28:41 +04:00
if ( da . nr_blocks )
2006-01-16 19:50:04 +03:00
gfs2_inplace_release ( dip ) ;
2006-06-14 23:32:57 +04:00
out_gunlock_q :
2014-01-06 15:28:41 +04:00
if ( da . nr_blocks )
2006-01-16 19:50:04 +03:00
gfs2_quota_unlock ( dip ) ;
2006-06-14 23:32:57 +04:00
out_gunlock :
2014-01-06 16:49:43 +04:00
gfs2_dir_no_add ( & da ) ;
2008-08-12 22:39:29 +04:00
gfs2_glock_dq ( ghs + 1 ) ;
out_child :
gfs2_glock_dq ( ghs ) ;
out_parent :
2020-05-04 18:18:43 +03:00
gfs2_qa_put ( dip ) ;
2006-01-16 19:50:04 +03:00
gfs2_holder_uninit ( ghs ) ;
gfs2_holder_uninit ( ghs + 1 ) ;
return error ;
}
2009-05-22 13:54:50 +04:00
/*
* gfs2_unlink_ok - check to see that a inode is still in a directory
* @ dip : the directory
* @ name : the name of the file
* @ ip : the inode
*
* Assumes that the lock on ( at least ) @ dip is held .
*
* Returns : 0 if the parent / child relationship is correct , errno if it isn ' t
*/
static int gfs2_unlink_ok ( struct gfs2_inode * dip , const struct qstr * name ,
const struct gfs2_inode * ip )
{
int error ;
if ( IS_IMMUTABLE ( & ip - > i_inode ) | | IS_APPEND ( & ip - > i_inode ) )
return - EPERM ;
if ( ( dip - > i_inode . i_mode & S_ISVTX ) & &
2013-02-01 09:56:13 +04:00
! uid_eq ( dip - > i_inode . i_uid , current_fsuid ( ) ) & &
! uid_eq ( ip - > i_inode . i_uid , current_fsuid ( ) ) & & ! capable ( CAP_FOWNER ) )
2009-05-22 13:54:50 +04:00
return - EPERM ;
if ( IS_APPEND ( & dip - > i_inode ) )
return - EPERM ;
2021-01-21 16:19:43 +03:00
error = gfs2_permission ( & init_user_ns , & dip - > i_inode ,
MAY_WRITE | MAY_EXEC ) ;
2009-05-22 13:54:50 +04:00
if ( error )
return error ;
2014-10-10 00:49:21 +04:00
return gfs2_dir_check ( & dip - > i_inode , name , ip ) ;
2009-05-22 13:54:50 +04:00
}
2006-01-16 19:50:04 +03:00
/**
2011-05-09 19:42:37 +04:00
* gfs2_unlink_inode - Removes an inode from its parent dir and unlinks it
* @ dip : The parent directory
2021-03-30 19:44:29 +03:00
* @ dentry : The dentry to unlink
2011-05-09 19:42:37 +04:00
*
* Called with all the locks and in a transaction . This will only be
* called for a directory after it has been checked to ensure it is empty .
*
* Returns : 0 on success , or an error
*/
static int gfs2_unlink_inode ( struct gfs2_inode * dip ,
2012-11-12 22:03:29 +04:00
const struct dentry * dentry )
2011-05-09 19:42:37 +04:00
{
2015-03-18 01:25:59 +03:00
struct inode * inode = d_inode ( dentry ) ;
2011-05-09 19:42:37 +04:00
struct gfs2_inode * ip = GFS2_I ( inode ) ;
int error ;
error = gfs2_dir_del ( dip , dentry ) ;
if ( error )
return error ;
ip - > i_entries = 0 ;
2016-09-14 17:48:04 +03:00
inode - > i_ctime = current_time ( inode ) ;
2011-05-09 19:42:37 +04:00
if ( S_ISDIR ( inode - > i_mode ) )
clear_nlink ( inode ) ;
else
drop_nlink ( inode ) ;
mark_inode_dirty ( inode ) ;
if ( inode - > i_nlink = = 0 )
gfs2_unlink_di ( inode ) ;
return 0 ;
}
/**
* gfs2_unlink - Unlink an inode ( this does rmdir as well )
* @ dir : The inode of the directory containing the inode to unlink
2006-01-16 19:50:04 +03:00
* @ dentry : The file itself
*
2011-05-09 19:42:37 +04:00
* This routine uses the type of the inode as a flag to figure out
* whether this is an unlink or an rmdir .
2006-01-16 19:50:04 +03:00
*
* Returns : errno
*/
static int gfs2_unlink ( struct inode * dir , struct dentry * dentry )
{
2006-06-14 23:32:57 +04:00
struct gfs2_inode * dip = GFS2_I ( dir ) ;
struct gfs2_sbd * sdp = GFS2_SB ( dir ) ;
2015-03-18 01:25:59 +03:00
struct inode * inode = d_inode ( dentry ) ;
2011-05-09 19:42:37 +04:00
struct gfs2_inode * ip = GFS2_I ( inode ) ;
2007-01-30 02:13:44 +03:00
struct gfs2_holder ghs [ 3 ] ;
struct gfs2_rgrpd * rgd ;
2012-04-05 06:11:16 +04:00
int error ;
error = gfs2_rindex_update ( sdp ) ;
if ( error )
return error ;
error = - EROFS ;
2006-01-16 19:50:04 +03:00
gfs2_holder_init ( dip - > i_gl , LM_ST_EXCLUSIVE , 0 , ghs ) ;
2007-01-30 02:13:44 +03:00
gfs2_holder_init ( ip - > i_gl , LM_ST_EXCLUSIVE , 0 , ghs + 1 ) ;
2006-01-16 19:50:04 +03:00
2012-02-08 16:58:32 +04:00
rgd = gfs2_blk2rgrpd ( sdp , ip - > i_no_addr , 1 ) ;
2012-02-24 19:09:14 +04:00
if ( ! rgd )
2011-11-08 18:04:20 +04:00
goto out_inodes ;
2012-02-24 19:09:14 +04:00
2018-04-24 20:35:02 +03:00
gfs2_holder_init ( rgd - > rd_gl , LM_ST_EXCLUSIVE , LM_FLAG_NODE_SCOPE , ghs + 2 ) ;
2007-01-30 02:13:44 +03:00
2007-08-26 17:23:56 +04:00
error = gfs2_glock_nq ( ghs ) ; /* parent */
2006-01-16 19:50:04 +03:00
if ( error )
2007-08-26 17:23:56 +04:00
goto out_parent ;
error = gfs2_glock_nq ( ghs + 1 ) ; /* child */
if ( error )
goto out_child ;
2011-05-05 15:35:40 +04:00
error = - ENOENT ;
2011-05-09 19:42:37 +04:00
if ( inode - > i_nlink = = 0 )
2011-05-05 15:35:40 +04:00
goto out_rgrp ;
2011-05-09 19:42:37 +04:00
if ( S_ISDIR ( inode - > i_mode ) ) {
error = - ENOTEMPTY ;
if ( ip - > i_entries > 2 | | inode - > i_nlink > 2 )
goto out_rgrp ;
}
2007-08-26 17:23:56 +04:00
error = gfs2_glock_nq ( ghs + 2 ) ; /* rgrp */
if ( error )
goto out_rgrp ;
2006-01-16 19:50:04 +03:00
error = gfs2_unlink_ok ( dip , & dentry - > d_name , ip ) ;
if ( error )
2008-08-12 22:39:29 +04:00
goto out_gunlock ;
2006-01-16 19:50:04 +03:00
2011-05-09 19:42:37 +04:00
error = gfs2_trans_begin ( sdp , 2 * RES_DINODE + 3 * RES_LEAF + RES_RG_BIT , 0 ) ;
if ( error )
2018-01-29 20:00:23 +03:00
goto out_gunlock ;
2006-01-16 19:50:04 +03:00
2012-11-12 22:03:29 +04:00
error = gfs2_unlink_inode ( dip , dentry ) ;
2006-06-14 23:32:57 +04:00
gfs2_trans_end ( sdp ) ;
2018-01-29 20:00:23 +03:00
2008-08-12 22:39:29 +04:00
out_gunlock :
2007-08-26 17:23:56 +04:00
gfs2_glock_dq ( ghs + 2 ) ;
out_rgrp :
gfs2_glock_dq ( ghs + 1 ) ;
out_child :
gfs2_glock_dq ( ghs ) ;
out_parent :
2011-11-08 18:04:20 +04:00
gfs2_holder_uninit ( ghs + 2 ) ;
out_inodes :
gfs2_holder_uninit ( ghs + 1 ) ;
2007-08-26 17:23:56 +04:00
gfs2_holder_uninit ( ghs ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
/**
* gfs2_symlink - Create a symlink
2021-03-30 19:44:29 +03:00
* @ mnt_userns : User namespace of the mount the inode was found from
2006-01-16 19:50:04 +03:00
* @ dir : The directory to create the symlink in
* @ dentry : The dentry to put the symlink in
* @ symname : The thing which the link points to
*
* Returns : errno
*/
2021-01-21 16:19:43 +03:00
static int gfs2_symlink ( struct user_namespace * mnt_userns , struct inode * dir ,
struct dentry * dentry , const char * symname )
2006-01-16 19:50:04 +03:00
{
2011-05-13 13:34:59 +04:00
unsigned int size ;
2006-01-16 19:50:04 +03:00
size = strlen ( symname ) ;
2017-11-14 18:53:12 +03:00
if ( size > = gfs2_max_stuffed_size ( GFS2_I ( dir ) ) )
2006-01-16 19:50:04 +03:00
return - ENAMETOOLONG ;
2018-06-08 20:06:28 +03:00
return gfs2_create_inode ( dir , dentry , NULL , S_IFLNK | S_IRWXUGO , 0 , symname , size , 0 ) ;
2006-01-16 19:50:04 +03:00
}
/**
* gfs2_mkdir - Make a directory
2021-03-30 19:44:29 +03:00
* @ mnt_userns : User namespace of the mount the inode was found from
2006-01-16 19:50:04 +03:00
* @ dir : The parent directory of the new one
* @ dentry : The dentry of the new directory
* @ mode : The mode of the new directory
*
* Returns : errno
*/
2021-01-21 16:19:43 +03:00
static int gfs2_mkdir ( struct user_namespace * mnt_userns , struct inode * dir ,
struct dentry * dentry , umode_t mode )
2006-01-16 19:50:04 +03:00
{
2017-11-14 18:53:12 +03:00
unsigned dsize = gfs2_max_stuffed_size ( GFS2_I ( dir ) ) ;
2018-06-08 20:06:28 +03:00
return gfs2_create_inode ( dir , dentry , NULL , S_IFDIR | mode , 0 , NULL , dsize , 0 ) ;
2006-01-16 19:50:04 +03:00
}
/**
* gfs2_mknod - Make a special file
2021-03-30 19:44:29 +03:00
* @ mnt_userns : User namespace of the mount the inode was found from
2006-01-16 19:50:04 +03:00
* @ dir : The directory in which the special file will reside
* @ dentry : The dentry of the special file
* @ mode : The mode of the special file
2011-05-13 15:11:17 +04:00
* @ dev : The device specification of the special file
2006-01-16 19:50:04 +03:00
*
*/
2021-01-21 16:19:43 +03:00
static int gfs2_mknod ( struct user_namespace * mnt_userns , struct inode * dir ,
struct dentry * dentry , umode_t mode , dev_t dev )
2006-01-16 19:50:04 +03:00
{
2018-06-08 20:06:28 +03:00
return gfs2_create_inode ( dir , dentry , NULL , mode , dev , NULL , 0 , 0 ) ;
2013-06-14 14:17:15 +04:00
}
/**
* gfs2_atomic_open - Atomically open a file
* @ dir : The directory
* @ dentry : The proposed new entry
* @ file : The proposed new struct file
* @ flags : open flags
* @ mode : File mode
*
* Returns : error code or 0 for success
*/
static int gfs2_atomic_open ( struct inode * dir , struct dentry * dentry ,
2015-05-01 20:54:38 +03:00
struct file * file , unsigned flags ,
2018-06-08 20:32:02 +03:00
umode_t mode )
2013-06-14 14:17:15 +04:00
{
struct dentry * d ;
bool excl = ! ! ( flags & O_EXCL ) ;
2016-07-05 16:44:53 +03:00
if ( ! d_in_lookup ( dentry ) )
2014-09-13 02:21:05 +04:00
goto skip_lookup ;
2018-06-08 20:06:28 +03:00
d = __gfs2_lookup ( dir , dentry , file ) ;
2013-06-14 14:17:15 +04:00
if ( IS_ERR ( d ) )
return PTR_ERR ( d ) ;
2013-09-23 16:21:04 +04:00
if ( d ! = NULL )
dentry = d ;
2015-03-18 01:25:59 +03:00
if ( d_really_is_positive ( dentry ) ) {
2018-06-08 19:58:04 +03:00
if ( ! ( file - > f_mode & FMODE_OPENED ) )
2014-11-19 22:35:58 +03:00
return finish_no_open ( file , d ) ;
2013-09-23 16:21:04 +04:00
dput ( d ) ;
2020-03-10 16:31:41 +03:00
return excl & & ( flags & O_CREAT ) ? - EEXIST : 0 ;
2013-06-14 14:17:15 +04:00
}
2013-09-23 16:21:04 +04:00
BUG_ON ( d ! = NULL ) ;
2014-09-13 02:21:05 +04:00
skip_lookup :
2013-06-14 14:17:15 +04:00
if ( ! ( flags & O_CREAT ) )
return - ENOENT ;
2018-06-08 20:06:28 +03:00
return gfs2_create_inode ( dir , dentry , file , S_IFREG | mode , 0 , NULL , 0 , excl ) ;
2006-01-16 19:50:04 +03:00
}
2008-08-26 12:38:26 +04:00
/*
* gfs2_ok_to_move - check if it ' s ok to move a directory to another directory
* @ this : move this
* @ to : to here
*
* Follow @ to back to the root and make sure we don ' t encounter @ this
* Assumes we already hold the rename lock .
*
* Returns : errno
*/
static int gfs2_ok_to_move ( struct gfs2_inode * this , struct gfs2_inode * to )
{
struct inode * dir = & to - > i_inode ;
struct super_block * sb = dir - > i_sb ;
struct inode * tmp ;
int error = 0 ;
igrab ( dir ) ;
for ( ; ; ) {
if ( dir = = & this - > i_inode ) {
error = - EINVAL ;
break ;
}
2015-03-18 01:25:59 +03:00
if ( dir = = d_inode ( sb - > s_root ) ) {
2008-08-26 12:38:26 +04:00
error = 0 ;
break ;
}
2010-09-17 15:30:23 +04:00
tmp = gfs2_lookupi ( dir , & gfs2_qdotdot , 1 ) ;
2014-03-12 12:41:44 +04:00
if ( ! tmp ) {
error = - ENOENT ;
break ;
}
2008-08-26 12:38:26 +04:00
if ( IS_ERR ( tmp ) ) {
error = PTR_ERR ( tmp ) ;
break ;
}
iput ( dir ) ;
dir = tmp ;
}
iput ( dir ) ;
return error ;
}
2015-05-05 20:12:19 +03:00
/**
* update_moved_ino - Update an inode that ' s being moved
* @ ip : The inode being moved
* @ ndip : The parent directory of the new filename
* @ dir_rename : True of ip is a directory
*
* Returns : errno
*/
static int update_moved_ino ( struct gfs2_inode * ip , struct gfs2_inode * ndip ,
int dir_rename )
{
if ( dir_rename )
return gfs2_dir_mvino ( ip , & gfs2_qdotdot , ndip , DT_DIR ) ;
2016-09-14 17:48:04 +03:00
ip - > i_inode . i_ctime = current_time ( & ip - > i_inode ) ;
2018-02-28 22:48:53 +03:00
mark_inode_dirty_sync ( & ip - > i_inode ) ;
2015-05-05 20:12:19 +03:00
return 0 ;
}
2006-01-16 19:50:04 +03:00
/**
* gfs2_rename - Rename a file
* @ odir : Parent directory of old file name
* @ odentry : The old dentry of the file
* @ ndir : Parent directory of new file name
* @ ndentry : The new dentry of the file
*
* Returns : errno
*/
static int gfs2_rename ( struct inode * odir , struct dentry * odentry ,
struct inode * ndir , struct dentry * ndentry )
{
2006-06-14 23:32:57 +04:00
struct gfs2_inode * odip = GFS2_I ( odir ) ;
struct gfs2_inode * ndip = GFS2_I ( ndir ) ;
2015-03-18 01:25:59 +03:00
struct gfs2_inode * ip = GFS2_I ( d_inode ( odentry ) ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_inode * nip = NULL ;
2006-06-14 23:32:57 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( odir ) ;
2019-08-30 20:31:00 +03:00
struct gfs2_holder ghs [ 4 ] , r_gh , rd_gh ;
2007-01-30 02:13:44 +03:00
struct gfs2_rgrpd * nrgd ;
2006-01-16 19:50:04 +03:00
unsigned int num_gh ;
int dir_rename = 0 ;
2014-09-29 16:52:04 +04:00
struct gfs2_diradd da = { . nr_blocks = 0 , . save_loc = 0 , } ;
2006-01-16 19:50:04 +03:00
unsigned int x ;
int error ;
2016-06-17 15:31:27 +03:00
gfs2_holder_mark_uninitialized ( & r_gh ) ;
2019-08-30 20:31:00 +03:00
gfs2_holder_mark_uninitialized ( & rd_gh ) ;
2015-03-18 01:25:59 +03:00
if ( d_really_is_positive ( ndentry ) ) {
nip = GFS2_I ( d_inode ( ndentry ) ) ;
2006-01-16 19:50:04 +03:00
if ( ip = = nip )
return 0 ;
}
2012-04-05 06:11:16 +04:00
error = gfs2_rindex_update ( sdp ) ;
if ( error )
return error ;
2020-02-27 21:47:53 +03:00
error = gfs2_qa_get ( ndip ) ;
2012-06-06 14:17:59 +04:00
if ( error )
return error ;
2008-08-26 12:38:26 +04:00
if ( odip ! = ndip ) {
error = gfs2_glock_nq_init ( sdp - > sd_rename_gl , LM_ST_EXCLUSIVE ,
0 , & r_gh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
2008-08-26 12:38:26 +04:00
if ( S_ISDIR ( ip - > i_inode . i_mode ) ) {
dir_rename = 1 ;
2015-05-05 20:12:19 +03:00
/* don't move a directory into its subdir */
2008-08-26 12:38:26 +04:00
error = gfs2_ok_to_move ( ip , ndip ) ;
if ( error )
goto out_gunlock_r ;
}
2006-01-16 19:50:04 +03:00
}
2006-06-21 23:38:17 +04:00
num_gh = 1 ;
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
gfs2_holder_init ( odip - > i_gl , LM_ST_EXCLUSIVE , GL_ASYNC , ghs ) ;
2006-06-21 23:38:17 +04:00
if ( odip ! = ndip ) {
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
gfs2_holder_init ( ndip - > i_gl , LM_ST_EXCLUSIVE , GL_ASYNC ,
ghs + num_gh ) ;
2006-06-21 23:38:17 +04:00
num_gh + + ;
}
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
gfs2_holder_init ( ip - > i_gl , LM_ST_EXCLUSIVE , GL_ASYNC , ghs + num_gh ) ;
2006-06-21 23:38:17 +04:00
num_gh + + ;
2006-01-16 19:50:04 +03:00
2006-06-21 23:38:17 +04:00
if ( nip ) {
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
gfs2_holder_init ( nip - > i_gl , LM_ST_EXCLUSIVE , GL_ASYNC ,
ghs + num_gh ) ;
2006-06-21 23:38:17 +04:00
num_gh + + ;
}
2006-01-16 19:50:04 +03:00
2008-08-12 22:39:29 +04:00
for ( x = 0 ; x < num_gh ; x + + ) {
error = gfs2_glock_nq ( ghs + x ) ;
if ( error )
goto out_gunlock ;
}
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
error = gfs2_glock_async_wait ( num_gh , ghs ) ;
if ( error )
goto out_gunlock ;
2006-01-16 19:50:04 +03:00
2019-08-30 20:31:00 +03:00
if ( nip ) {
/* Grab the resource group glock for unlink flag twiddling.
* This is the case where the target dinode already exists
* so we unlink before doing the rename .
*/
nrgd = gfs2_blk2rgrpd ( sdp , nip - > i_no_addr , 1 ) ;
if ( ! nrgd ) {
error = - ENOENT ;
goto out_gunlock ;
}
2018-04-24 20:35:02 +03:00
error = gfs2_glock_nq_init ( nrgd - > rd_gl , LM_ST_EXCLUSIVE ,
LM_FLAG_NODE_SCOPE , & rd_gh ) ;
2019-08-30 20:31:00 +03:00
if ( error )
goto out_gunlock ;
}
2011-05-05 15:35:40 +04:00
error = - ENOENT ;
if ( ip - > i_inode . i_nlink = = 0 )
goto out_gunlock ;
2006-01-16 19:50:04 +03:00
/* Check out the old directory */
error = gfs2_unlink_ok ( odip , & odentry - > d_name , ip ) ;
if ( error )
goto out_gunlock ;
/* Check out the new directory */
if ( nip ) {
error = gfs2_unlink_ok ( ndip , & ndentry - > d_name , nip ) ;
if ( error )
goto out_gunlock ;
2011-05-05 15:35:40 +04:00
if ( nip - > i_inode . i_nlink = = 0 ) {
error = - EAGAIN ;
goto out_gunlock ;
}
2006-11-01 20:22:46 +03:00
if ( S_ISDIR ( nip - > i_inode . i_mode ) ) {
2008-11-03 16:59:19 +03:00
if ( nip - > i_entries < 2 ) {
2011-05-09 16:36:10 +04:00
gfs2_consist_inode ( nip ) ;
2006-01-16 19:50:04 +03:00
error = - EIO ;
goto out_gunlock ;
}
2008-11-03 16:59:19 +03:00
if ( nip - > i_entries > 2 ) {
2006-01-16 19:50:04 +03:00
error = - ENOTEMPTY ;
goto out_gunlock ;
}
}
} else {
2021-01-21 16:19:43 +03:00
error = gfs2_permission ( & init_user_ns , ndir ,
MAY_WRITE | MAY_EXEC ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_gunlock ;
2007-05-15 18:37:50 +04:00
error = gfs2_dir_check ( ndir , & ndentry - > d_name , NULL ) ;
2006-01-16 19:50:04 +03:00
switch ( error ) {
case - ENOENT :
error = 0 ;
break ;
case 0 :
error = - EEXIST ;
2020-11-20 21:25:03 +03:00
goto out_gunlock ;
2006-01-16 19:50:04 +03:00
default :
goto out_gunlock ;
2019-10-04 18:55:29 +03:00
}
2006-01-16 19:50:04 +03:00
if ( odip ! = ndip ) {
2006-11-01 22:04:17 +03:00
if ( ! ndip - > i_inode . i_nlink ) {
2011-05-05 15:35:40 +04:00
error = - ENOENT ;
2006-01-16 19:50:04 +03:00
goto out_gunlock ;
}
2008-11-03 16:59:19 +03:00
if ( ndip - > i_entries = = ( u32 ) - 1 ) {
2006-01-16 19:50:04 +03:00
error = - EFBIG ;
goto out_gunlock ;
}
2006-11-01 20:22:46 +03:00
if ( S_ISDIR ( ip - > i_inode . i_mode ) & &
2006-11-01 22:04:17 +03:00
ndip - > i_inode . i_nlink = = ( u32 ) - 1 ) {
2006-01-16 19:50:04 +03:00
error = - EMLINK ;
goto out_gunlock ;
}
}
}
/* Check out the dir to be renamed */
if ( dir_rename ) {
2021-01-21 16:19:43 +03:00
error = gfs2_permission ( & init_user_ns , d_inode ( odentry ) ,
MAY_WRITE ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_gunlock ;
}
2014-01-06 15:28:41 +04:00
if ( nip = = NULL ) {
error = gfs2_diradd_alloc_required ( ndir , & ndentry - > d_name , & da ) ;
if ( error )
goto out_gunlock ;
}
2006-01-16 19:50:04 +03:00
2014-01-06 15:28:41 +04:00
if ( da . nr_blocks ) {
struct gfs2_alloc_parms ap = { . target = da . nr_blocks , } ;
2015-03-18 20:03:41 +03:00
error = gfs2_quota_lock_check ( ndip , & ap ) ;
2006-01-16 19:50:04 +03:00
if ( error )
2012-05-18 17:28:23 +04:00
goto out_gunlock ;
2006-01-16 19:50:04 +03:00
2013-10-02 14:13:25 +04:00
error = gfs2_inplace_reserve ( ndip , & ap ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_gunlock_q ;
2014-01-06 16:03:05 +04:00
error = gfs2_trans_begin ( sdp , gfs2_trans_da_blks ( ndip , & da , 4 ) +
4 * RES_LEAF + 4 , 0 ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_ipreserv ;
} else {
error = gfs2_trans_begin ( sdp , 4 * RES_DINODE +
2007-01-19 00:07:03 +03:00
5 * RES_LEAF + 4 , 0 ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_gunlock ;
}
/* Remove the target file, if it exists */
2012-11-12 22:03:29 +04:00
if ( nip )
error = gfs2_unlink_inode ( ndip , ndentry ) ;
2006-01-16 19:50:04 +03:00
2015-05-05 20:12:19 +03:00
error = update_moved_ino ( ip , ndip , dir_rename ) ;
if ( error )
goto out_end_trans ;
2006-01-16 19:50:04 +03:00
2011-05-09 19:42:37 +04:00
error = gfs2_dir_del ( odip , odentry ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_end_trans ;
2014-01-06 16:49:43 +04:00
error = gfs2_dir_add ( ndir , & ndentry - > d_name , ip , & da ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_end_trans ;
2006-06-14 23:32:57 +04:00
out_end_trans :
2006-01-16 19:50:04 +03:00
gfs2_trans_end ( sdp ) ;
2006-06-14 23:32:57 +04:00
out_ipreserv :
2014-01-06 15:28:41 +04:00
if ( da . nr_blocks )
2006-01-16 19:50:04 +03:00
gfs2_inplace_release ( ndip ) ;
2006-06-14 23:32:57 +04:00
out_gunlock_q :
2014-01-06 15:28:41 +04:00
if ( da . nr_blocks )
2006-01-16 19:50:04 +03:00
gfs2_quota_unlock ( ndip ) ;
2006-06-14 23:32:57 +04:00
out_gunlock :
2014-01-06 16:49:43 +04:00
gfs2_dir_no_add ( & da ) ;
2019-08-30 20:31:00 +03:00
if ( gfs2_holder_initialized ( & rd_gh ) )
gfs2_glock_dq_uninit ( & rd_gh ) ;
2008-08-12 22:39:29 +04:00
while ( x - - ) {
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
if ( gfs2_holder_queued ( ghs + x ) )
gfs2_glock_dq ( ghs + x ) ;
2006-01-16 19:50:04 +03:00
gfs2_holder_uninit ( ghs + x ) ;
2008-08-12 22:39:29 +04:00
}
2006-06-14 23:32:57 +04:00
out_gunlock_r :
2016-06-17 15:31:27 +03:00
if ( gfs2_holder_initialized ( & r_gh ) )
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( & r_gh ) ;
2006-06-14 23:32:57 +04:00
out :
2020-02-27 21:47:53 +03:00
gfs2_qa_put ( ndip ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
2015-05-05 20:12:19 +03:00
/**
* gfs2_exchange - exchange two files
* @ odir : Parent directory of old file name
* @ odentry : The old dentry of the file
* @ ndir : Parent directory of new file name
* @ ndentry : The new dentry of the file
* @ flags : The rename flags
*
* Returns : errno
*/
static int gfs2_exchange ( struct inode * odir , struct dentry * odentry ,
struct inode * ndir , struct dentry * ndentry ,
unsigned int flags )
{
struct gfs2_inode * odip = GFS2_I ( odir ) ;
struct gfs2_inode * ndip = GFS2_I ( ndir ) ;
struct gfs2_inode * oip = GFS2_I ( odentry - > d_inode ) ;
struct gfs2_inode * nip = GFS2_I ( ndentry - > d_inode ) ;
struct gfs2_sbd * sdp = GFS2_SB ( odir ) ;
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
struct gfs2_holder ghs [ 4 ] , r_gh ;
2015-05-05 20:12:19 +03:00
unsigned int num_gh ;
unsigned int x ;
umode_t old_mode = oip - > i_inode . i_mode ;
umode_t new_mode = nip - > i_inode . i_mode ;
int error ;
2016-06-17 15:31:27 +03:00
gfs2_holder_mark_uninitialized ( & r_gh ) ;
2015-05-05 20:12:19 +03:00
error = gfs2_rindex_update ( sdp ) ;
if ( error )
return error ;
if ( odip ! = ndip ) {
error = gfs2_glock_nq_init ( sdp - > sd_rename_gl , LM_ST_EXCLUSIVE ,
0 , & r_gh ) ;
if ( error )
goto out ;
if ( S_ISDIR ( old_mode ) ) {
/* don't move a directory into its subdir */
error = gfs2_ok_to_move ( oip , ndip ) ;
if ( error )
goto out_gunlock_r ;
}
if ( S_ISDIR ( new_mode ) ) {
/* don't move a directory into its subdir */
error = gfs2_ok_to_move ( nip , odip ) ;
if ( error )
goto out_gunlock_r ;
}
}
num_gh = 1 ;
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
gfs2_holder_init ( odip - > i_gl , LM_ST_EXCLUSIVE , GL_ASYNC , ghs ) ;
2015-05-05 20:12:19 +03:00
if ( odip ! = ndip ) {
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
gfs2_holder_init ( ndip - > i_gl , LM_ST_EXCLUSIVE , GL_ASYNC ,
ghs + num_gh ) ;
2015-05-05 20:12:19 +03:00
num_gh + + ;
}
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
gfs2_holder_init ( oip - > i_gl , LM_ST_EXCLUSIVE , GL_ASYNC , ghs + num_gh ) ;
2015-05-05 20:12:19 +03:00
num_gh + + ;
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
gfs2_holder_init ( nip - > i_gl , LM_ST_EXCLUSIVE , GL_ASYNC , ghs + num_gh ) ;
2015-05-05 20:12:19 +03:00
num_gh + + ;
for ( x = 0 ; x < num_gh ; x + + ) {
error = gfs2_glock_nq ( ghs + x ) ;
if ( error )
goto out_gunlock ;
}
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
error = gfs2_glock_async_wait ( num_gh , ghs ) ;
if ( error )
goto out_gunlock ;
2015-05-05 20:12:19 +03:00
error = - ENOENT ;
if ( oip - > i_inode . i_nlink = = 0 | | nip - > i_inode . i_nlink = = 0 )
goto out_gunlock ;
error = gfs2_unlink_ok ( odip , & odentry - > d_name , oip ) ;
if ( error )
goto out_gunlock ;
error = gfs2_unlink_ok ( ndip , & ndentry - > d_name , nip ) ;
if ( error )
goto out_gunlock ;
if ( S_ISDIR ( old_mode ) ) {
2021-01-21 16:19:43 +03:00
error = gfs2_permission ( & init_user_ns , odentry - > d_inode ,
MAY_WRITE ) ;
2015-05-05 20:12:19 +03:00
if ( error )
goto out_gunlock ;
}
if ( S_ISDIR ( new_mode ) ) {
2021-01-21 16:19:43 +03:00
error = gfs2_permission ( & init_user_ns , ndentry - > d_inode ,
MAY_WRITE ) ;
2015-05-05 20:12:19 +03:00
if ( error )
goto out_gunlock ;
}
error = gfs2_trans_begin ( sdp , 4 * RES_DINODE + 4 * RES_LEAF , 0 ) ;
if ( error )
goto out_gunlock ;
error = update_moved_ino ( oip , ndip , S_ISDIR ( old_mode ) ) ;
if ( error )
goto out_end_trans ;
error = update_moved_ino ( nip , odip , S_ISDIR ( new_mode ) ) ;
if ( error )
goto out_end_trans ;
error = gfs2_dir_mvino ( ndip , & ndentry - > d_name , oip ,
IF2DT ( old_mode ) ) ;
if ( error )
goto out_end_trans ;
error = gfs2_dir_mvino ( odip , & odentry - > d_name , nip ,
IF2DT ( new_mode ) ) ;
if ( error )
goto out_end_trans ;
if ( odip ! = ndip ) {
if ( S_ISDIR ( new_mode ) & & ! S_ISDIR ( old_mode ) ) {
inc_nlink ( & odip - > i_inode ) ;
drop_nlink ( & ndip - > i_inode ) ;
} else if ( S_ISDIR ( old_mode ) & & ! S_ISDIR ( new_mode ) ) {
inc_nlink ( & ndip - > i_inode ) ;
drop_nlink ( & odip - > i_inode ) ;
}
}
mark_inode_dirty ( & ndip - > i_inode ) ;
if ( odip ! = ndip )
mark_inode_dirty ( & odip - > i_inode ) ;
out_end_trans :
gfs2_trans_end ( sdp ) ;
out_gunlock :
while ( x - - ) {
gfs2: Use async glocks for rename
Because s_vfs_rename_mutex is not cluster-wide, multiple nodes can
reverse the roles of which directories are "old" and which are "new" for
the purposes of rename. This can cause deadlocks where two nodes end up
waiting for each other.
There can be several layers of directory dependencies across many nodes.
This patch fixes the problem by acquiring all gfs2_rename's inode glocks
asychronously and waiting for all glocks to be acquired. That way all
inodes are locked regardless of the order.
The timeout value for multiple asynchronous glocks is calculated to be
the total of the individual wait times for each glock times two.
Since gfs2_exchange is very similar to gfs2_rename, both functions are
patched in the same way.
A new async glock wait queue, sd_async_glock_wait, keeps a list of
waiters for these events. If gfs2's holder_wake function detects an
async holder, it wakes up any waiters for the event. The waiter only
tests whether any of its requests are still pending.
Since the glocks are sent to dlm asychronously, the wait function needs
to check to see which glocks, if any, were granted.
If a glock is granted by dlm (and therefore held), its minimum hold time
is checked and adjusted as necessary, as other glock grants do.
If the event times out, all glocks held thus far must be dequeued to
resolve any existing deadlocks. Then, if there are any outstanding
locking requests, we need to loop around and wait for dlm to respond to
those requests too. After we release all requests, we return -ESTALE to
the caller (vfs rename) which loops around and retries the request.
Node1 Node2
--------- ---------
1. Enqueue A Enqueue B
2. Enqueue B Enqueue A
3. A granted
6. B granted
7. Wait for B
8. Wait for A
9. A times out (since Node 1 holds A)
10. Dequeue B (since it was granted)
11. Wait for all requests from DLM
12. B Granted (since Node2 released it in step 10)
13. Rename
14. Dequeue A
15. DLM Grants A
16. Dequeue A (due to the timeout and since we
no longer have B held for our task).
17. Dequeue B
18. Return -ESTALE to vfs
19. VFS retries the operation, goto step 1.
This release-all-locks / acquire-all-locks may slow rename / exchange
down as both nodes struggle in the same way and do the same thing.
However, this will only happen when there is contention for the same
inodes, which ought to be rare.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
2019-08-30 20:31:02 +03:00
if ( gfs2_holder_queued ( ghs + x ) )
gfs2_glock_dq ( ghs + x ) ;
2015-05-05 20:12:19 +03:00
gfs2_holder_uninit ( ghs + x ) ;
}
out_gunlock_r :
2016-06-17 15:31:27 +03:00
if ( gfs2_holder_initialized ( & r_gh ) )
2015-05-05 20:12:19 +03:00
gfs2_glock_dq_uninit ( & r_gh ) ;
out :
return error ;
}
2021-01-21 16:19:43 +03:00
static int gfs2_rename2 ( struct user_namespace * mnt_userns , struct inode * odir ,
struct dentry * odentry , struct inode * ndir ,
struct dentry * ndentry , unsigned int flags )
2015-05-05 20:12:19 +03:00
{
flags & = ~ RENAME_NOREPLACE ;
if ( flags & ~ RENAME_EXCHANGE )
return - EINVAL ;
if ( flags & RENAME_EXCHANGE )
return gfs2_exchange ( odir , odentry , ndir , ndentry , flags ) ;
return gfs2_rename ( odir , odentry , ndir , ndentry ) ;
}
2009-05-22 13:48:59 +04:00
/**
2015-11-17 18:20:54 +03:00
* gfs2_get_link - Follow a symbolic link
2010-01-14 08:59:16 +03:00
* @ dentry : The dentry of the link
2015-11-17 18:20:54 +03:00
* @ inode : The inode of the link
2015-12-29 23:58:39 +03:00
* @ done : destructor for return value
2009-05-22 13:48:59 +04:00
*
2010-01-14 08:59:16 +03:00
* This can handle symlinks of any size .
2009-05-22 13:48:59 +04:00
*
2010-01-14 08:59:16 +03:00
* Returns : 0 on success or error code
2009-05-22 13:48:59 +04:00
*/
2015-11-17 18:20:54 +03:00
static const char * gfs2_get_link ( struct dentry * dentry ,
2015-12-29 23:58:39 +03:00
struct inode * inode ,
struct delayed_call * done )
2009-05-22 13:48:59 +04:00
{
2015-11-17 18:20:54 +03:00
struct gfs2_inode * ip = GFS2_I ( inode ) ;
2009-05-22 13:48:59 +04:00
struct gfs2_holder i_gh ;
struct buffer_head * dibh ;
2011-05-13 13:34:59 +04:00
unsigned int size ;
2010-01-14 08:59:16 +03:00
char * buf ;
2009-05-22 13:48:59 +04:00
int error ;
2015-11-17 18:20:54 +03:00
if ( ! dentry )
return ERR_PTR ( - ECHILD ) ;
2009-05-22 13:48:59 +04:00
gfs2_holder_init ( ip - > i_gl , LM_ST_SHARED , 0 , & i_gh ) ;
error = gfs2_glock_nq ( & i_gh ) ;
if ( error ) {
gfs2_holder_uninit ( & i_gh ) ;
2015-05-02 20:32:22 +03:00
return ERR_PTR ( error ) ;
2009-05-22 13:48:59 +04:00
}
2010-08-11 12:53:11 +04:00
size = ( unsigned int ) i_size_read ( & ip - > i_inode ) ;
if ( size = = 0 ) {
2009-05-22 13:48:59 +04:00
gfs2_consist_inode ( ip ) ;
2010-01-14 08:59:16 +03:00
buf = ERR_PTR ( - EIO ) ;
2009-05-22 13:48:59 +04:00
goto out ;
}
error = gfs2_meta_inode_buffer ( ip , & dibh ) ;
2010-01-14 08:59:16 +03:00
if ( error ) {
buf = ERR_PTR ( error ) ;
2009-05-22 13:48:59 +04:00
goto out ;
}
2011-05-13 13:34:59 +04:00
buf = kzalloc ( size + 1 , GFP_NOFS ) ;
2010-01-14 08:59:16 +03:00
if ( ! buf )
buf = ERR_PTR ( - ENOMEM ) ;
else
2011-05-13 13:34:59 +04:00
memcpy ( buf , dibh - > b_data + sizeof ( struct gfs2_dinode ) , size ) ;
2009-05-22 13:48:59 +04:00
brelse ( dibh ) ;
out :
gfs2_glock_dq_uninit ( & i_gh ) ;
2015-05-02 20:32:22 +03:00
if ( ! IS_ERR ( buf ) )
2015-12-29 23:58:39 +03:00
set_delayed_call ( done , kfree_link , buf ) ;
2015-05-02 20:32:22 +03:00
return buf ;
2006-01-16 19:50:04 +03:00
}
/**
2021-03-30 19:44:29 +03:00
* gfs2_permission
* @ mnt_userns : User namespace of the mount the inode was found from
2011-01-19 12:42:40 +03:00
* @ inode : The inode
* @ mask : The mask to be tested
2006-01-16 19:50:04 +03:00
*
2006-11-27 17:55:28 +03:00
* This may be called from the VFS directly , or from within GFS2 with the
* inode locked , so we look to see if the glock is already locked and only
* lock the glock if its not already been done .
*
2006-01-16 19:50:04 +03:00
* Returns : errno
*/
2021-01-21 16:19:43 +03:00
int gfs2_permission ( struct user_namespace * mnt_userns , struct inode * inode ,
int mask )
2006-01-16 19:50:04 +03:00
{
2011-01-07 09:49:58 +03:00
struct gfs2_inode * ip ;
2006-01-16 19:50:04 +03:00
struct gfs2_holder i_gh ;
int error ;
2011-01-07 09:49:58 +03:00
2016-06-17 15:31:27 +03:00
gfs2_holder_mark_uninitialized ( & i_gh ) ;
2011-01-07 09:49:58 +03:00
ip = GFS2_I ( inode ) ;
2008-02-22 19:07:18 +03:00
if ( gfs2_glock_is_locked_by_me ( ip - > i_gl ) = = NULL ) {
2014-11-14 05:42:04 +03:00
if ( mask & MAY_NOT_BLOCK )
return - ECHILD ;
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_SHARED , LM_FLAG_ANY , & i_gh ) ;
if ( error )
return error ;
2006-11-27 17:55:28 +03:00
}
2006-01-16 19:50:04 +03:00
2008-07-02 23:12:01 +04:00
if ( ( mask & MAY_WRITE ) & & IS_IMMUTABLE ( inode ) )
2016-08-02 14:58:28 +03:00
error = - EPERM ;
2008-07-02 23:12:01 +04:00
else
2021-01-21 16:19:24 +03:00
error = generic_permission ( & init_user_ns , inode , mask ) ;
2016-06-17 15:31:27 +03:00
if ( gfs2_holder_initialized ( & i_gh ) )
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( & i_gh ) ;
return error ;
}
2011-08-15 17:20:36 +04:00
static int __gfs2_setattr_simple ( struct inode * inode , struct iattr * attr )
2011-05-09 17:06:38 +04:00
{
2021-01-21 16:19:26 +03:00
setattr_copy ( & init_user_ns , inode , attr ) ;
2011-05-09 17:06:38 +04:00
mark_inode_dirty ( inode ) ;
return 0 ;
}
2021-04-07 01:59:03 +03:00
static int gfs2_setattr_simple ( struct inode * inode , struct iattr * attr )
2011-05-09 17:06:38 +04:00
{
int error ;
if ( current - > journal_info )
2011-08-15 17:20:36 +04:00
return __gfs2_setattr_simple ( inode , attr ) ;
2011-05-09 17:06:38 +04:00
2011-08-15 17:20:36 +04:00
error = gfs2_trans_begin ( GFS2_SB ( inode ) , RES_DINODE , 0 ) ;
2011-05-09 17:06:38 +04:00
if ( error )
return error ;
2011-08-15 17:20:36 +04:00
error = __gfs2_setattr_simple ( inode , attr ) ;
gfs2_trans_end ( GFS2_SB ( inode ) ) ;
2011-05-09 17:06:38 +04:00
return error ;
}
2006-01-16 19:50:04 +03:00
static int setattr_chown ( struct inode * inode , struct iattr * attr )
{
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( inode ) ;
struct gfs2_sbd * sdp = GFS2_SB ( inode ) ;
2013-02-01 08:27:54 +04:00
kuid_t ouid , nuid ;
kgid_t ogid , ngid ;
2006-01-16 19:50:04 +03:00
int error ;
2015-03-18 20:03:41 +03:00
struct gfs2_alloc_parms ap ;
2006-01-16 19:50:04 +03:00
2006-11-01 21:23:29 +03:00
ouid = inode - > i_uid ;
ogid = inode - > i_gid ;
2006-01-16 19:50:04 +03:00
nuid = attr - > ia_uid ;
ngid = attr - > ia_gid ;
2013-02-01 09:56:13 +04:00
if ( ! ( attr - > ia_valid & ATTR_UID ) | | uid_eq ( ouid , nuid ) )
2013-02-01 05:49:26 +04:00
ouid = nuid = NO_UID_QUOTA_CHANGE ;
2013-02-01 09:56:13 +04:00
if ( ! ( attr - > ia_valid & ATTR_GID ) | | gid_eq ( ogid , ngid ) )
2013-02-01 05:49:26 +04:00
ogid = ngid = NO_GID_QUOTA_CHANGE ;
2020-02-27 21:47:53 +03:00
error = gfs2_qa_get ( ip ) ;
2014-01-07 02:16:01 +04:00
if ( error )
2020-02-27 21:47:53 +03:00
return error ;
2014-01-07 02:16:01 +04:00
error = gfs2_rindex_update ( sdp ) ;
if ( error )
goto out ;
error = gfs2_quota_lock ( ip , nuid , ngid ) ;
if ( error )
goto out ;
2015-03-18 20:03:41 +03:00
ap . target = gfs2_get_inode_blocks ( & ip - > i_inode ) ;
2013-02-01 09:56:13 +04:00
if ( ! uid_eq ( ouid , NO_UID_QUOTA_CHANGE ) | |
! gid_eq ( ogid , NO_GID_QUOTA_CHANGE ) ) {
2015-03-18 20:03:41 +03:00
error = gfs2_quota_check ( ip , nuid , ngid , & ap ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_gunlock_q ;
}
error = gfs2_trans_begin ( sdp , RES_DINODE + 2 * RES_QUOTA , 0 ) ;
if ( error )
goto out_gunlock_q ;
2011-08-15 17:20:36 +04:00
error = gfs2_setattr_simple ( inode , attr ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out_end_trans ;
2013-02-01 09:56:13 +04:00
if ( ! uid_eq ( ouid , NO_UID_QUOTA_CHANGE ) | |
! gid_eq ( ogid , NO_GID_QUOTA_CHANGE ) ) {
2015-06-02 19:02:24 +03:00
gfs2_quota_change ( ip , - ( s64 ) ap . target , ouid , ogid ) ;
2015-03-18 20:03:41 +03:00
gfs2_quota_change ( ip , ap . target , nuid , ngid ) ;
2006-01-16 19:50:04 +03:00
}
2006-09-04 20:04:26 +04:00
out_end_trans :
2006-01-16 19:50:04 +03:00
gfs2_trans_end ( sdp ) ;
2006-09-04 20:04:26 +04:00
out_gunlock_q :
2006-01-16 19:50:04 +03:00
gfs2_quota_unlock ( ip ) ;
2014-01-07 02:16:01 +04:00
out :
2020-02-27 21:47:53 +03:00
gfs2_qa_put ( ip ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
/**
* gfs2_setattr - Change attributes on an inode
2021-03-30 19:44:29 +03:00
* @ mnt_userns : User namespace of the mount the inode was found from
2006-01-16 19:50:04 +03:00
* @ dentry : The dentry which is changing
* @ attr : The structure describing the change
*
* The VFS layer wants to change one or more of an inodes attributes . Write
* that change out to disk .
*
* Returns : errno
*/
2021-01-21 16:19:43 +03:00
static int gfs2_setattr ( struct user_namespace * mnt_userns ,
struct dentry * dentry , struct iattr * attr )
2006-01-16 19:50:04 +03:00
{
2015-03-18 01:25:59 +03:00
struct inode * inode = d_inode ( dentry ) ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_holder i_gh ;
int error ;
2020-02-27 21:47:53 +03:00
error = gfs2_qa_get ( ip ) ;
2012-06-06 14:17:59 +04:00
if ( error )
return error ;
2006-01-16 19:50:04 +03:00
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_EXCLUSIVE , 0 , & i_gh ) ;
if ( error )
2020-02-27 21:47:53 +03:00
goto out ;
2006-01-16 19:50:04 +03:00
2021-07-28 15:47:34 +03:00
error = may_setattr ( & init_user_ns , inode , attr - > ia_valid ) ;
if ( error )
2020-02-27 21:47:53 +03:00
goto error ;
2006-01-16 19:50:04 +03:00
2021-01-21 16:19:26 +03:00
error = setattr_prepare ( & init_user_ns , dentry , attr ) ;
2006-01-16 19:50:04 +03:00
if ( error )
2020-02-27 21:47:53 +03:00
goto error ;
2006-01-16 19:50:04 +03:00
if ( attr - > ia_valid & ATTR_SIZE )
2010-08-11 12:37:53 +04:00
error = gfs2_setattr_size ( inode , attr - > ia_size ) ;
2006-01-16 19:50:04 +03:00
else if ( attr - > ia_valid & ( ATTR_UID | ATTR_GID ) )
error = setattr_chown ( inode , attr ) ;
2013-12-20 17:16:52 +04:00
else {
2011-08-15 17:20:36 +04:00
error = gfs2_setattr_simple ( inode , attr ) ;
2013-12-20 17:16:52 +04:00
if ( ! error & & attr - > ia_valid & ATTR_MODE )
2021-01-21 16:19:27 +03:00
error = posix_acl_chmod ( & init_user_ns , inode ,
inode - > i_mode ) ;
2013-12-20 17:16:52 +04:00
}
2006-01-16 19:50:04 +03:00
2020-02-27 21:47:53 +03:00
error :
2006-01-16 19:50:04 +03:00
if ( ! error )
mark_inode_dirty ( inode ) ;
2011-08-15 17:20:36 +04:00
gfs2_glock_dq_uninit ( & i_gh ) ;
2020-02-27 21:47:53 +03:00
out :
gfs2_qa_put ( ip ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
/**
* gfs2_getattr - Read out an inode ' s attributes
2021-01-21 16:19:43 +03:00
* @ mnt_userns : user namespace of the mount the inode was found from
statx: Add a system call to make enhanced file info available
Add a system call to make extended file information available, including
file creation and some attribute flags where available through the
underlying filesystem.
The getattr inode operation is altered to take two additional arguments: a
u32 request_mask and an unsigned int flags that indicate the
synchronisation mode. This change is propagated to the vfs_getattr*()
function.
Functions like vfs_stat() are now inline wrappers around new functions
vfs_statx() and vfs_statx_fd() to reduce stack usage.
========
OVERVIEW
========
The idea was initially proposed as a set of xattrs that could be retrieved
with getxattr(), but the general preference proved to be for a new syscall
with an extended stat structure.
A number of requests were gathered for features to be included. The
following have been included:
(1) Make the fields a consistent size on all arches and make them large.
(2) Spare space, request flags and information flags are provided for
future expansion.
(3) Better support for the y2038 problem [Arnd Bergmann] (tv_sec is an
__s64).
(4) Creation time: The SMB protocol carries the creation time, which could
be exported by Samba, which will in turn help CIFS make use of
FS-Cache as that can be used for coherency data (stx_btime).
This is also specified in NFSv4 as a recommended attribute and could
be exported by NFSD [Steve French].
(5) Lightweight stat: Ask for just those details of interest, and allow a
netfs (such as NFS) to approximate anything not of interest, possibly
without going to the server [Trond Myklebust, Ulrich Drepper, Andreas
Dilger] (AT_STATX_DONT_SYNC).
(6) Heavyweight stat: Force a netfs to go to the server, even if it thinks
its cached attributes are up to date [Trond Myklebust]
(AT_STATX_FORCE_SYNC).
And the following have been left out for future extension:
(7) Data version number: Could be used by userspace NFS servers [Aneesh
Kumar].
Can also be used to modify fill_post_wcc() in NFSD which retrieves
i_version directly, but has just called vfs_getattr(). It could get
it from the kstat struct if it used vfs_xgetattr() instead.
(There's disagreement on the exact semantics of a single field, since
not all filesystems do this the same way).
(8) BSD stat compatibility: Including more fields from the BSD stat such
as creation time (st_btime) and inode generation number (st_gen)
[Jeremy Allison, Bernd Schubert].
(9) Inode generation number: Useful for FUSE and userspace NFS servers
[Bernd Schubert].
(This was asked for but later deemed unnecessary with the
open-by-handle capability available and caused disagreement as to
whether it's a security hole or not).
(10) Extra coherency data may be useful in making backups [Andreas Dilger].
(No particular data were offered, but things like last backup
timestamp, the data version number and the DOS archive bit would come
into this category).
(11) Allow the filesystem to indicate what it can/cannot provide: A
filesystem can now say it doesn't support a standard stat feature if
that isn't available, so if, for instance, inode numbers or UIDs don't
exist or are fabricated locally...
(This requires a separate system call - I have an fsinfo() call idea
for this).
(12) Store a 16-byte volume ID in the superblock that can be returned in
struct xstat [Steve French].
(Deferred to fsinfo).
(13) Include granularity fields in the time data to indicate the
granularity of each of the times (NFSv4 time_delta) [Steve French].
(Deferred to fsinfo).
(14) FS_IOC_GETFLAGS value. These could be translated to BSD's st_flags.
Note that the Linux IOC flags are a mess and filesystems such as Ext4
define flags that aren't in linux/fs.h, so translation in the kernel
may be a necessity (or, possibly, we provide the filesystem type too).
(Some attributes are made available in stx_attributes, but the general
feeling was that the IOC flags were to ext[234]-specific and shouldn't
be exposed through statx this way).
(15) Mask of features available on file (eg: ACLs, seclabel) [Brad Boyer,
Michael Kerrisk].
(Deferred, probably to fsinfo. Finding out if there's an ACL or
seclabal might require extra filesystem operations).
(16) Femtosecond-resolution timestamps [Dave Chinner].
(A __reserved field has been left in the statx_timestamp struct for
this - if there proves to be a need).
(17) A set multiple attributes syscall to go with this.
===============
NEW SYSTEM CALL
===============
The new system call is:
int ret = statx(int dfd,
const char *filename,
unsigned int flags,
unsigned int mask,
struct statx *buffer);
The dfd, filename and flags parameters indicate the file to query, in a
similar way to fstatat(). There is no equivalent of lstat() as that can be
emulated with statx() by passing AT_SYMLINK_NOFOLLOW in flags. There is
also no equivalent of fstat() as that can be emulated by passing a NULL
filename to statx() with the fd of interest in dfd.
Whether or not statx() synchronises the attributes with the backing store
can be controlled by OR'ing a value into the flags argument (this typically
only affects network filesystems):
(1) AT_STATX_SYNC_AS_STAT tells statx() to behave as stat() does in this
respect.
(2) AT_STATX_FORCE_SYNC will require a network filesystem to synchronise
its attributes with the server - which might require data writeback to
occur to get the timestamps correct.
(3) AT_STATX_DONT_SYNC will suppress synchronisation with the server in a
network filesystem. The resulting values should be considered
approximate.
mask is a bitmask indicating the fields in struct statx that are of
interest to the caller. The user should set this to STATX_BASIC_STATS to
get the basic set returned by stat(). It should be noted that asking for
more information may entail extra I/O operations.
buffer points to the destination for the data. This must be 256 bytes in
size.
======================
MAIN ATTRIBUTES RECORD
======================
The following structures are defined in which to return the main attribute
set:
struct statx_timestamp {
__s64 tv_sec;
__s32 tv_nsec;
__s32 __reserved;
};
struct statx {
__u32 stx_mask;
__u32 stx_blksize;
__u64 stx_attributes;
__u32 stx_nlink;
__u32 stx_uid;
__u32 stx_gid;
__u16 stx_mode;
__u16 __spare0[1];
__u64 stx_ino;
__u64 stx_size;
__u64 stx_blocks;
__u64 __spare1[1];
struct statx_timestamp stx_atime;
struct statx_timestamp stx_btime;
struct statx_timestamp stx_ctime;
struct statx_timestamp stx_mtime;
__u32 stx_rdev_major;
__u32 stx_rdev_minor;
__u32 stx_dev_major;
__u32 stx_dev_minor;
__u64 __spare2[14];
};
The defined bits in request_mask and stx_mask are:
STATX_TYPE Want/got stx_mode & S_IFMT
STATX_MODE Want/got stx_mode & ~S_IFMT
STATX_NLINK Want/got stx_nlink
STATX_UID Want/got stx_uid
STATX_GID Want/got stx_gid
STATX_ATIME Want/got stx_atime{,_ns}
STATX_MTIME Want/got stx_mtime{,_ns}
STATX_CTIME Want/got stx_ctime{,_ns}
STATX_INO Want/got stx_ino
STATX_SIZE Want/got stx_size
STATX_BLOCKS Want/got stx_blocks
STATX_BASIC_STATS [The stuff in the normal stat struct]
STATX_BTIME Want/got stx_btime{,_ns}
STATX_ALL [All currently available stuff]
stx_btime is the file creation time, stx_mask is a bitmask indicating the
data provided and __spares*[] are where as-yet undefined fields can be
placed.
Time fields are structures with separate seconds and nanoseconds fields
plus a reserved field in case we want to add even finer resolution. Note
that times will be negative if before 1970; in such a case, the nanosecond
fields will also be negative if not zero.
The bits defined in the stx_attributes field convey information about a
file, how it is accessed, where it is and what it does. The following
attributes map to FS_*_FL flags and are the same numerical value:
STATX_ATTR_COMPRESSED File is compressed by the fs
STATX_ATTR_IMMUTABLE File is marked immutable
STATX_ATTR_APPEND File is append-only
STATX_ATTR_NODUMP File is not to be dumped
STATX_ATTR_ENCRYPTED File requires key to decrypt in fs
Within the kernel, the supported flags are listed by:
KSTAT_ATTR_FS_IOC_FLAGS
[Are any other IOC flags of sufficient general interest to be exposed
through this interface?]
New flags include:
STATX_ATTR_AUTOMOUNT Object is an automount trigger
These are for the use of GUI tools that might want to mark files specially,
depending on what they are.
Fields in struct statx come in a number of classes:
(0) stx_dev_*, stx_blksize.
These are local system information and are always available.
(1) stx_mode, stx_nlinks, stx_uid, stx_gid, stx_[amc]time, stx_ino,
stx_size, stx_blocks.
These will be returned whether the caller asks for them or not. The
corresponding bits in stx_mask will be set to indicate whether they
actually have valid values.
If the caller didn't ask for them, then they may be approximated. For
example, NFS won't waste any time updating them from the server,
unless as a byproduct of updating something requested.
If the values don't actually exist for the underlying object (such as
UID or GID on a DOS file), then the bit won't be set in the stx_mask,
even if the caller asked for the value. In such a case, the returned
value will be a fabrication.
Note that there are instances where the type might not be valid, for
instance Windows reparse points.
(2) stx_rdev_*.
This will be set only if stx_mode indicates we're looking at a
blockdev or a chardev, otherwise will be 0.
(3) stx_btime.
Similar to (1), except this will be set to 0 if it doesn't exist.
=======
TESTING
=======
The following test program can be used to test the statx system call:
samples/statx/test-statx.c
Just compile and run, passing it paths to the files you want to examine.
The file is built automatically if CONFIG_SAMPLES is enabled.
Here's some example output. Firstly, an NFS directory that crosses to
another FSID. Note that the AUTOMOUNT attribute is set because transiting
this directory will cause d_automount to be invoked by the VFS.
[root@andromeda ~]# /tmp/test-statx -A /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:26 Inode: 1703937 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Attributes: 0000000000001000 (-------- -------- -------- -------- -------- -------- ---m---- --------)
Secondly, the result of automounting on that directory.
[root@andromeda ~]# /tmp/test-statx /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:27 Inode: 2 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2017-01-31 19:46:22 +03:00
* @ path : Object to query
2006-01-16 19:50:04 +03:00
* @ stat : The inode ' s stats
statx: Add a system call to make enhanced file info available
Add a system call to make extended file information available, including
file creation and some attribute flags where available through the
underlying filesystem.
The getattr inode operation is altered to take two additional arguments: a
u32 request_mask and an unsigned int flags that indicate the
synchronisation mode. This change is propagated to the vfs_getattr*()
function.
Functions like vfs_stat() are now inline wrappers around new functions
vfs_statx() and vfs_statx_fd() to reduce stack usage.
========
OVERVIEW
========
The idea was initially proposed as a set of xattrs that could be retrieved
with getxattr(), but the general preference proved to be for a new syscall
with an extended stat structure.
A number of requests were gathered for features to be included. The
following have been included:
(1) Make the fields a consistent size on all arches and make them large.
(2) Spare space, request flags and information flags are provided for
future expansion.
(3) Better support for the y2038 problem [Arnd Bergmann] (tv_sec is an
__s64).
(4) Creation time: The SMB protocol carries the creation time, which could
be exported by Samba, which will in turn help CIFS make use of
FS-Cache as that can be used for coherency data (stx_btime).
This is also specified in NFSv4 as a recommended attribute and could
be exported by NFSD [Steve French].
(5) Lightweight stat: Ask for just those details of interest, and allow a
netfs (such as NFS) to approximate anything not of interest, possibly
without going to the server [Trond Myklebust, Ulrich Drepper, Andreas
Dilger] (AT_STATX_DONT_SYNC).
(6) Heavyweight stat: Force a netfs to go to the server, even if it thinks
its cached attributes are up to date [Trond Myklebust]
(AT_STATX_FORCE_SYNC).
And the following have been left out for future extension:
(7) Data version number: Could be used by userspace NFS servers [Aneesh
Kumar].
Can also be used to modify fill_post_wcc() in NFSD which retrieves
i_version directly, but has just called vfs_getattr(). It could get
it from the kstat struct if it used vfs_xgetattr() instead.
(There's disagreement on the exact semantics of a single field, since
not all filesystems do this the same way).
(8) BSD stat compatibility: Including more fields from the BSD stat such
as creation time (st_btime) and inode generation number (st_gen)
[Jeremy Allison, Bernd Schubert].
(9) Inode generation number: Useful for FUSE and userspace NFS servers
[Bernd Schubert].
(This was asked for but later deemed unnecessary with the
open-by-handle capability available and caused disagreement as to
whether it's a security hole or not).
(10) Extra coherency data may be useful in making backups [Andreas Dilger].
(No particular data were offered, but things like last backup
timestamp, the data version number and the DOS archive bit would come
into this category).
(11) Allow the filesystem to indicate what it can/cannot provide: A
filesystem can now say it doesn't support a standard stat feature if
that isn't available, so if, for instance, inode numbers or UIDs don't
exist or are fabricated locally...
(This requires a separate system call - I have an fsinfo() call idea
for this).
(12) Store a 16-byte volume ID in the superblock that can be returned in
struct xstat [Steve French].
(Deferred to fsinfo).
(13) Include granularity fields in the time data to indicate the
granularity of each of the times (NFSv4 time_delta) [Steve French].
(Deferred to fsinfo).
(14) FS_IOC_GETFLAGS value. These could be translated to BSD's st_flags.
Note that the Linux IOC flags are a mess and filesystems such as Ext4
define flags that aren't in linux/fs.h, so translation in the kernel
may be a necessity (or, possibly, we provide the filesystem type too).
(Some attributes are made available in stx_attributes, but the general
feeling was that the IOC flags were to ext[234]-specific and shouldn't
be exposed through statx this way).
(15) Mask of features available on file (eg: ACLs, seclabel) [Brad Boyer,
Michael Kerrisk].
(Deferred, probably to fsinfo. Finding out if there's an ACL or
seclabal might require extra filesystem operations).
(16) Femtosecond-resolution timestamps [Dave Chinner].
(A __reserved field has been left in the statx_timestamp struct for
this - if there proves to be a need).
(17) A set multiple attributes syscall to go with this.
===============
NEW SYSTEM CALL
===============
The new system call is:
int ret = statx(int dfd,
const char *filename,
unsigned int flags,
unsigned int mask,
struct statx *buffer);
The dfd, filename and flags parameters indicate the file to query, in a
similar way to fstatat(). There is no equivalent of lstat() as that can be
emulated with statx() by passing AT_SYMLINK_NOFOLLOW in flags. There is
also no equivalent of fstat() as that can be emulated by passing a NULL
filename to statx() with the fd of interest in dfd.
Whether or not statx() synchronises the attributes with the backing store
can be controlled by OR'ing a value into the flags argument (this typically
only affects network filesystems):
(1) AT_STATX_SYNC_AS_STAT tells statx() to behave as stat() does in this
respect.
(2) AT_STATX_FORCE_SYNC will require a network filesystem to synchronise
its attributes with the server - which might require data writeback to
occur to get the timestamps correct.
(3) AT_STATX_DONT_SYNC will suppress synchronisation with the server in a
network filesystem. The resulting values should be considered
approximate.
mask is a bitmask indicating the fields in struct statx that are of
interest to the caller. The user should set this to STATX_BASIC_STATS to
get the basic set returned by stat(). It should be noted that asking for
more information may entail extra I/O operations.
buffer points to the destination for the data. This must be 256 bytes in
size.
======================
MAIN ATTRIBUTES RECORD
======================
The following structures are defined in which to return the main attribute
set:
struct statx_timestamp {
__s64 tv_sec;
__s32 tv_nsec;
__s32 __reserved;
};
struct statx {
__u32 stx_mask;
__u32 stx_blksize;
__u64 stx_attributes;
__u32 stx_nlink;
__u32 stx_uid;
__u32 stx_gid;
__u16 stx_mode;
__u16 __spare0[1];
__u64 stx_ino;
__u64 stx_size;
__u64 stx_blocks;
__u64 __spare1[1];
struct statx_timestamp stx_atime;
struct statx_timestamp stx_btime;
struct statx_timestamp stx_ctime;
struct statx_timestamp stx_mtime;
__u32 stx_rdev_major;
__u32 stx_rdev_minor;
__u32 stx_dev_major;
__u32 stx_dev_minor;
__u64 __spare2[14];
};
The defined bits in request_mask and stx_mask are:
STATX_TYPE Want/got stx_mode & S_IFMT
STATX_MODE Want/got stx_mode & ~S_IFMT
STATX_NLINK Want/got stx_nlink
STATX_UID Want/got stx_uid
STATX_GID Want/got stx_gid
STATX_ATIME Want/got stx_atime{,_ns}
STATX_MTIME Want/got stx_mtime{,_ns}
STATX_CTIME Want/got stx_ctime{,_ns}
STATX_INO Want/got stx_ino
STATX_SIZE Want/got stx_size
STATX_BLOCKS Want/got stx_blocks
STATX_BASIC_STATS [The stuff in the normal stat struct]
STATX_BTIME Want/got stx_btime{,_ns}
STATX_ALL [All currently available stuff]
stx_btime is the file creation time, stx_mask is a bitmask indicating the
data provided and __spares*[] are where as-yet undefined fields can be
placed.
Time fields are structures with separate seconds and nanoseconds fields
plus a reserved field in case we want to add even finer resolution. Note
that times will be negative if before 1970; in such a case, the nanosecond
fields will also be negative if not zero.
The bits defined in the stx_attributes field convey information about a
file, how it is accessed, where it is and what it does. The following
attributes map to FS_*_FL flags and are the same numerical value:
STATX_ATTR_COMPRESSED File is compressed by the fs
STATX_ATTR_IMMUTABLE File is marked immutable
STATX_ATTR_APPEND File is append-only
STATX_ATTR_NODUMP File is not to be dumped
STATX_ATTR_ENCRYPTED File requires key to decrypt in fs
Within the kernel, the supported flags are listed by:
KSTAT_ATTR_FS_IOC_FLAGS
[Are any other IOC flags of sufficient general interest to be exposed
through this interface?]
New flags include:
STATX_ATTR_AUTOMOUNT Object is an automount trigger
These are for the use of GUI tools that might want to mark files specially,
depending on what they are.
Fields in struct statx come in a number of classes:
(0) stx_dev_*, stx_blksize.
These are local system information and are always available.
(1) stx_mode, stx_nlinks, stx_uid, stx_gid, stx_[amc]time, stx_ino,
stx_size, stx_blocks.
These will be returned whether the caller asks for them or not. The
corresponding bits in stx_mask will be set to indicate whether they
actually have valid values.
If the caller didn't ask for them, then they may be approximated. For
example, NFS won't waste any time updating them from the server,
unless as a byproduct of updating something requested.
If the values don't actually exist for the underlying object (such as
UID or GID on a DOS file), then the bit won't be set in the stx_mask,
even if the caller asked for the value. In such a case, the returned
value will be a fabrication.
Note that there are instances where the type might not be valid, for
instance Windows reparse points.
(2) stx_rdev_*.
This will be set only if stx_mode indicates we're looking at a
blockdev or a chardev, otherwise will be 0.
(3) stx_btime.
Similar to (1), except this will be set to 0 if it doesn't exist.
=======
TESTING
=======
The following test program can be used to test the statx system call:
samples/statx/test-statx.c
Just compile and run, passing it paths to the files you want to examine.
The file is built automatically if CONFIG_SAMPLES is enabled.
Here's some example output. Firstly, an NFS directory that crosses to
another FSID. Note that the AUTOMOUNT attribute is set because transiting
this directory will cause d_automount to be invoked by the VFS.
[root@andromeda ~]# /tmp/test-statx -A /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:26 Inode: 1703937 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Attributes: 0000000000001000 (-------- -------- -------- -------- -------- -------- ---m---- --------)
Secondly, the result of automounting on that directory.
[root@andromeda ~]# /tmp/test-statx /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:27 Inode: 2 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2017-01-31 19:46:22 +03:00
* @ request_mask : Mask of STATX_xxx flags indicating the caller ' s interests
* @ flags : AT_STATX_xxx setting
2006-01-16 19:50:04 +03:00
*
2006-11-27 18:12:05 +03:00
* This may be called from the VFS directly , or from within GFS2 with the
* inode locked , so we look to see if the glock is already locked and only
* lock the glock if its not already been done . Note that its the NFS
* readdirplus operation which causes this to be called ( from filldir )
* with the glock already held .
*
2006-01-16 19:50:04 +03:00
* Returns : errno
*/
2021-01-21 16:19:43 +03:00
static int gfs2_getattr ( struct user_namespace * mnt_userns ,
const struct path * path , struct kstat * stat ,
statx: Add a system call to make enhanced file info available
Add a system call to make extended file information available, including
file creation and some attribute flags where available through the
underlying filesystem.
The getattr inode operation is altered to take two additional arguments: a
u32 request_mask and an unsigned int flags that indicate the
synchronisation mode. This change is propagated to the vfs_getattr*()
function.
Functions like vfs_stat() are now inline wrappers around new functions
vfs_statx() and vfs_statx_fd() to reduce stack usage.
========
OVERVIEW
========
The idea was initially proposed as a set of xattrs that could be retrieved
with getxattr(), but the general preference proved to be for a new syscall
with an extended stat structure.
A number of requests were gathered for features to be included. The
following have been included:
(1) Make the fields a consistent size on all arches and make them large.
(2) Spare space, request flags and information flags are provided for
future expansion.
(3) Better support for the y2038 problem [Arnd Bergmann] (tv_sec is an
__s64).
(4) Creation time: The SMB protocol carries the creation time, which could
be exported by Samba, which will in turn help CIFS make use of
FS-Cache as that can be used for coherency data (stx_btime).
This is also specified in NFSv4 as a recommended attribute and could
be exported by NFSD [Steve French].
(5) Lightweight stat: Ask for just those details of interest, and allow a
netfs (such as NFS) to approximate anything not of interest, possibly
without going to the server [Trond Myklebust, Ulrich Drepper, Andreas
Dilger] (AT_STATX_DONT_SYNC).
(6) Heavyweight stat: Force a netfs to go to the server, even if it thinks
its cached attributes are up to date [Trond Myklebust]
(AT_STATX_FORCE_SYNC).
And the following have been left out for future extension:
(7) Data version number: Could be used by userspace NFS servers [Aneesh
Kumar].
Can also be used to modify fill_post_wcc() in NFSD which retrieves
i_version directly, but has just called vfs_getattr(). It could get
it from the kstat struct if it used vfs_xgetattr() instead.
(There's disagreement on the exact semantics of a single field, since
not all filesystems do this the same way).
(8) BSD stat compatibility: Including more fields from the BSD stat such
as creation time (st_btime) and inode generation number (st_gen)
[Jeremy Allison, Bernd Schubert].
(9) Inode generation number: Useful for FUSE and userspace NFS servers
[Bernd Schubert].
(This was asked for but later deemed unnecessary with the
open-by-handle capability available and caused disagreement as to
whether it's a security hole or not).
(10) Extra coherency data may be useful in making backups [Andreas Dilger].
(No particular data were offered, but things like last backup
timestamp, the data version number and the DOS archive bit would come
into this category).
(11) Allow the filesystem to indicate what it can/cannot provide: A
filesystem can now say it doesn't support a standard stat feature if
that isn't available, so if, for instance, inode numbers or UIDs don't
exist or are fabricated locally...
(This requires a separate system call - I have an fsinfo() call idea
for this).
(12) Store a 16-byte volume ID in the superblock that can be returned in
struct xstat [Steve French].
(Deferred to fsinfo).
(13) Include granularity fields in the time data to indicate the
granularity of each of the times (NFSv4 time_delta) [Steve French].
(Deferred to fsinfo).
(14) FS_IOC_GETFLAGS value. These could be translated to BSD's st_flags.
Note that the Linux IOC flags are a mess and filesystems such as Ext4
define flags that aren't in linux/fs.h, so translation in the kernel
may be a necessity (or, possibly, we provide the filesystem type too).
(Some attributes are made available in stx_attributes, but the general
feeling was that the IOC flags were to ext[234]-specific and shouldn't
be exposed through statx this way).
(15) Mask of features available on file (eg: ACLs, seclabel) [Brad Boyer,
Michael Kerrisk].
(Deferred, probably to fsinfo. Finding out if there's an ACL or
seclabal might require extra filesystem operations).
(16) Femtosecond-resolution timestamps [Dave Chinner].
(A __reserved field has been left in the statx_timestamp struct for
this - if there proves to be a need).
(17) A set multiple attributes syscall to go with this.
===============
NEW SYSTEM CALL
===============
The new system call is:
int ret = statx(int dfd,
const char *filename,
unsigned int flags,
unsigned int mask,
struct statx *buffer);
The dfd, filename and flags parameters indicate the file to query, in a
similar way to fstatat(). There is no equivalent of lstat() as that can be
emulated with statx() by passing AT_SYMLINK_NOFOLLOW in flags. There is
also no equivalent of fstat() as that can be emulated by passing a NULL
filename to statx() with the fd of interest in dfd.
Whether or not statx() synchronises the attributes with the backing store
can be controlled by OR'ing a value into the flags argument (this typically
only affects network filesystems):
(1) AT_STATX_SYNC_AS_STAT tells statx() to behave as stat() does in this
respect.
(2) AT_STATX_FORCE_SYNC will require a network filesystem to synchronise
its attributes with the server - which might require data writeback to
occur to get the timestamps correct.
(3) AT_STATX_DONT_SYNC will suppress synchronisation with the server in a
network filesystem. The resulting values should be considered
approximate.
mask is a bitmask indicating the fields in struct statx that are of
interest to the caller. The user should set this to STATX_BASIC_STATS to
get the basic set returned by stat(). It should be noted that asking for
more information may entail extra I/O operations.
buffer points to the destination for the data. This must be 256 bytes in
size.
======================
MAIN ATTRIBUTES RECORD
======================
The following structures are defined in which to return the main attribute
set:
struct statx_timestamp {
__s64 tv_sec;
__s32 tv_nsec;
__s32 __reserved;
};
struct statx {
__u32 stx_mask;
__u32 stx_blksize;
__u64 stx_attributes;
__u32 stx_nlink;
__u32 stx_uid;
__u32 stx_gid;
__u16 stx_mode;
__u16 __spare0[1];
__u64 stx_ino;
__u64 stx_size;
__u64 stx_blocks;
__u64 __spare1[1];
struct statx_timestamp stx_atime;
struct statx_timestamp stx_btime;
struct statx_timestamp stx_ctime;
struct statx_timestamp stx_mtime;
__u32 stx_rdev_major;
__u32 stx_rdev_minor;
__u32 stx_dev_major;
__u32 stx_dev_minor;
__u64 __spare2[14];
};
The defined bits in request_mask and stx_mask are:
STATX_TYPE Want/got stx_mode & S_IFMT
STATX_MODE Want/got stx_mode & ~S_IFMT
STATX_NLINK Want/got stx_nlink
STATX_UID Want/got stx_uid
STATX_GID Want/got stx_gid
STATX_ATIME Want/got stx_atime{,_ns}
STATX_MTIME Want/got stx_mtime{,_ns}
STATX_CTIME Want/got stx_ctime{,_ns}
STATX_INO Want/got stx_ino
STATX_SIZE Want/got stx_size
STATX_BLOCKS Want/got stx_blocks
STATX_BASIC_STATS [The stuff in the normal stat struct]
STATX_BTIME Want/got stx_btime{,_ns}
STATX_ALL [All currently available stuff]
stx_btime is the file creation time, stx_mask is a bitmask indicating the
data provided and __spares*[] are where as-yet undefined fields can be
placed.
Time fields are structures with separate seconds and nanoseconds fields
plus a reserved field in case we want to add even finer resolution. Note
that times will be negative if before 1970; in such a case, the nanosecond
fields will also be negative if not zero.
The bits defined in the stx_attributes field convey information about a
file, how it is accessed, where it is and what it does. The following
attributes map to FS_*_FL flags and are the same numerical value:
STATX_ATTR_COMPRESSED File is compressed by the fs
STATX_ATTR_IMMUTABLE File is marked immutable
STATX_ATTR_APPEND File is append-only
STATX_ATTR_NODUMP File is not to be dumped
STATX_ATTR_ENCRYPTED File requires key to decrypt in fs
Within the kernel, the supported flags are listed by:
KSTAT_ATTR_FS_IOC_FLAGS
[Are any other IOC flags of sufficient general interest to be exposed
through this interface?]
New flags include:
STATX_ATTR_AUTOMOUNT Object is an automount trigger
These are for the use of GUI tools that might want to mark files specially,
depending on what they are.
Fields in struct statx come in a number of classes:
(0) stx_dev_*, stx_blksize.
These are local system information and are always available.
(1) stx_mode, stx_nlinks, stx_uid, stx_gid, stx_[amc]time, stx_ino,
stx_size, stx_blocks.
These will be returned whether the caller asks for them or not. The
corresponding bits in stx_mask will be set to indicate whether they
actually have valid values.
If the caller didn't ask for them, then they may be approximated. For
example, NFS won't waste any time updating them from the server,
unless as a byproduct of updating something requested.
If the values don't actually exist for the underlying object (such as
UID or GID on a DOS file), then the bit won't be set in the stx_mask,
even if the caller asked for the value. In such a case, the returned
value will be a fabrication.
Note that there are instances where the type might not be valid, for
instance Windows reparse points.
(2) stx_rdev_*.
This will be set only if stx_mode indicates we're looking at a
blockdev or a chardev, otherwise will be 0.
(3) stx_btime.
Similar to (1), except this will be set to 0 if it doesn't exist.
=======
TESTING
=======
The following test program can be used to test the statx system call:
samples/statx/test-statx.c
Just compile and run, passing it paths to the files you want to examine.
The file is built automatically if CONFIG_SAMPLES is enabled.
Here's some example output. Firstly, an NFS directory that crosses to
another FSID. Note that the AUTOMOUNT attribute is set because transiting
this directory will cause d_automount to be invoked by the VFS.
[root@andromeda ~]# /tmp/test-statx -A /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:26 Inode: 1703937 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Attributes: 0000000000001000 (-------- -------- -------- -------- -------- -------- ---m---- --------)
Secondly, the result of automounting on that directory.
[root@andromeda ~]# /tmp/test-statx /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:27 Inode: 2 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2017-01-31 19:46:22 +03:00
u32 request_mask , unsigned int flags )
2006-01-16 19:50:04 +03:00
{
statx: Add a system call to make enhanced file info available
Add a system call to make extended file information available, including
file creation and some attribute flags where available through the
underlying filesystem.
The getattr inode operation is altered to take two additional arguments: a
u32 request_mask and an unsigned int flags that indicate the
synchronisation mode. This change is propagated to the vfs_getattr*()
function.
Functions like vfs_stat() are now inline wrappers around new functions
vfs_statx() and vfs_statx_fd() to reduce stack usage.
========
OVERVIEW
========
The idea was initially proposed as a set of xattrs that could be retrieved
with getxattr(), but the general preference proved to be for a new syscall
with an extended stat structure.
A number of requests were gathered for features to be included. The
following have been included:
(1) Make the fields a consistent size on all arches and make them large.
(2) Spare space, request flags and information flags are provided for
future expansion.
(3) Better support for the y2038 problem [Arnd Bergmann] (tv_sec is an
__s64).
(4) Creation time: The SMB protocol carries the creation time, which could
be exported by Samba, which will in turn help CIFS make use of
FS-Cache as that can be used for coherency data (stx_btime).
This is also specified in NFSv4 as a recommended attribute and could
be exported by NFSD [Steve French].
(5) Lightweight stat: Ask for just those details of interest, and allow a
netfs (such as NFS) to approximate anything not of interest, possibly
without going to the server [Trond Myklebust, Ulrich Drepper, Andreas
Dilger] (AT_STATX_DONT_SYNC).
(6) Heavyweight stat: Force a netfs to go to the server, even if it thinks
its cached attributes are up to date [Trond Myklebust]
(AT_STATX_FORCE_SYNC).
And the following have been left out for future extension:
(7) Data version number: Could be used by userspace NFS servers [Aneesh
Kumar].
Can also be used to modify fill_post_wcc() in NFSD which retrieves
i_version directly, but has just called vfs_getattr(). It could get
it from the kstat struct if it used vfs_xgetattr() instead.
(There's disagreement on the exact semantics of a single field, since
not all filesystems do this the same way).
(8) BSD stat compatibility: Including more fields from the BSD stat such
as creation time (st_btime) and inode generation number (st_gen)
[Jeremy Allison, Bernd Schubert].
(9) Inode generation number: Useful for FUSE and userspace NFS servers
[Bernd Schubert].
(This was asked for but later deemed unnecessary with the
open-by-handle capability available and caused disagreement as to
whether it's a security hole or not).
(10) Extra coherency data may be useful in making backups [Andreas Dilger].
(No particular data were offered, but things like last backup
timestamp, the data version number and the DOS archive bit would come
into this category).
(11) Allow the filesystem to indicate what it can/cannot provide: A
filesystem can now say it doesn't support a standard stat feature if
that isn't available, so if, for instance, inode numbers or UIDs don't
exist or are fabricated locally...
(This requires a separate system call - I have an fsinfo() call idea
for this).
(12) Store a 16-byte volume ID in the superblock that can be returned in
struct xstat [Steve French].
(Deferred to fsinfo).
(13) Include granularity fields in the time data to indicate the
granularity of each of the times (NFSv4 time_delta) [Steve French].
(Deferred to fsinfo).
(14) FS_IOC_GETFLAGS value. These could be translated to BSD's st_flags.
Note that the Linux IOC flags are a mess and filesystems such as Ext4
define flags that aren't in linux/fs.h, so translation in the kernel
may be a necessity (or, possibly, we provide the filesystem type too).
(Some attributes are made available in stx_attributes, but the general
feeling was that the IOC flags were to ext[234]-specific and shouldn't
be exposed through statx this way).
(15) Mask of features available on file (eg: ACLs, seclabel) [Brad Boyer,
Michael Kerrisk].
(Deferred, probably to fsinfo. Finding out if there's an ACL or
seclabal might require extra filesystem operations).
(16) Femtosecond-resolution timestamps [Dave Chinner].
(A __reserved field has been left in the statx_timestamp struct for
this - if there proves to be a need).
(17) A set multiple attributes syscall to go with this.
===============
NEW SYSTEM CALL
===============
The new system call is:
int ret = statx(int dfd,
const char *filename,
unsigned int flags,
unsigned int mask,
struct statx *buffer);
The dfd, filename and flags parameters indicate the file to query, in a
similar way to fstatat(). There is no equivalent of lstat() as that can be
emulated with statx() by passing AT_SYMLINK_NOFOLLOW in flags. There is
also no equivalent of fstat() as that can be emulated by passing a NULL
filename to statx() with the fd of interest in dfd.
Whether or not statx() synchronises the attributes with the backing store
can be controlled by OR'ing a value into the flags argument (this typically
only affects network filesystems):
(1) AT_STATX_SYNC_AS_STAT tells statx() to behave as stat() does in this
respect.
(2) AT_STATX_FORCE_SYNC will require a network filesystem to synchronise
its attributes with the server - which might require data writeback to
occur to get the timestamps correct.
(3) AT_STATX_DONT_SYNC will suppress synchronisation with the server in a
network filesystem. The resulting values should be considered
approximate.
mask is a bitmask indicating the fields in struct statx that are of
interest to the caller. The user should set this to STATX_BASIC_STATS to
get the basic set returned by stat(). It should be noted that asking for
more information may entail extra I/O operations.
buffer points to the destination for the data. This must be 256 bytes in
size.
======================
MAIN ATTRIBUTES RECORD
======================
The following structures are defined in which to return the main attribute
set:
struct statx_timestamp {
__s64 tv_sec;
__s32 tv_nsec;
__s32 __reserved;
};
struct statx {
__u32 stx_mask;
__u32 stx_blksize;
__u64 stx_attributes;
__u32 stx_nlink;
__u32 stx_uid;
__u32 stx_gid;
__u16 stx_mode;
__u16 __spare0[1];
__u64 stx_ino;
__u64 stx_size;
__u64 stx_blocks;
__u64 __spare1[1];
struct statx_timestamp stx_atime;
struct statx_timestamp stx_btime;
struct statx_timestamp stx_ctime;
struct statx_timestamp stx_mtime;
__u32 stx_rdev_major;
__u32 stx_rdev_minor;
__u32 stx_dev_major;
__u32 stx_dev_minor;
__u64 __spare2[14];
};
The defined bits in request_mask and stx_mask are:
STATX_TYPE Want/got stx_mode & S_IFMT
STATX_MODE Want/got stx_mode & ~S_IFMT
STATX_NLINK Want/got stx_nlink
STATX_UID Want/got stx_uid
STATX_GID Want/got stx_gid
STATX_ATIME Want/got stx_atime{,_ns}
STATX_MTIME Want/got stx_mtime{,_ns}
STATX_CTIME Want/got stx_ctime{,_ns}
STATX_INO Want/got stx_ino
STATX_SIZE Want/got stx_size
STATX_BLOCKS Want/got stx_blocks
STATX_BASIC_STATS [The stuff in the normal stat struct]
STATX_BTIME Want/got stx_btime{,_ns}
STATX_ALL [All currently available stuff]
stx_btime is the file creation time, stx_mask is a bitmask indicating the
data provided and __spares*[] are where as-yet undefined fields can be
placed.
Time fields are structures with separate seconds and nanoseconds fields
plus a reserved field in case we want to add even finer resolution. Note
that times will be negative if before 1970; in such a case, the nanosecond
fields will also be negative if not zero.
The bits defined in the stx_attributes field convey information about a
file, how it is accessed, where it is and what it does. The following
attributes map to FS_*_FL flags and are the same numerical value:
STATX_ATTR_COMPRESSED File is compressed by the fs
STATX_ATTR_IMMUTABLE File is marked immutable
STATX_ATTR_APPEND File is append-only
STATX_ATTR_NODUMP File is not to be dumped
STATX_ATTR_ENCRYPTED File requires key to decrypt in fs
Within the kernel, the supported flags are listed by:
KSTAT_ATTR_FS_IOC_FLAGS
[Are any other IOC flags of sufficient general interest to be exposed
through this interface?]
New flags include:
STATX_ATTR_AUTOMOUNT Object is an automount trigger
These are for the use of GUI tools that might want to mark files specially,
depending on what they are.
Fields in struct statx come in a number of classes:
(0) stx_dev_*, stx_blksize.
These are local system information and are always available.
(1) stx_mode, stx_nlinks, stx_uid, stx_gid, stx_[amc]time, stx_ino,
stx_size, stx_blocks.
These will be returned whether the caller asks for them or not. The
corresponding bits in stx_mask will be set to indicate whether they
actually have valid values.
If the caller didn't ask for them, then they may be approximated. For
example, NFS won't waste any time updating them from the server,
unless as a byproduct of updating something requested.
If the values don't actually exist for the underlying object (such as
UID or GID on a DOS file), then the bit won't be set in the stx_mask,
even if the caller asked for the value. In such a case, the returned
value will be a fabrication.
Note that there are instances where the type might not be valid, for
instance Windows reparse points.
(2) stx_rdev_*.
This will be set only if stx_mode indicates we're looking at a
blockdev or a chardev, otherwise will be 0.
(3) stx_btime.
Similar to (1), except this will be set to 0 if it doesn't exist.
=======
TESTING
=======
The following test program can be used to test the statx system call:
samples/statx/test-statx.c
Just compile and run, passing it paths to the files you want to examine.
The file is built automatically if CONFIG_SAMPLES is enabled.
Here's some example output. Firstly, an NFS directory that crosses to
another FSID. Note that the AUTOMOUNT attribute is set because transiting
this directory will cause d_automount to be invoked by the VFS.
[root@andromeda ~]# /tmp/test-statx -A /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:26 Inode: 1703937 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Attributes: 0000000000001000 (-------- -------- -------- -------- -------- -------- ---m---- --------)
Secondly, the result of automounting on that directory.
[root@andromeda ~]# /tmp/test-statx /warthog/data
statx(/warthog/data) = 0
results=7ff
Size: 4096 Blocks: 8 IO Block: 1048576 directory
Device: 00:27 Inode: 2 Links: 125
Access: (3777/drwxrwxrwx) Uid: 0 Gid: 4041
Access: 2016-11-24 09:02:12.219699527+0000
Modify: 2016-11-17 10:44:36.225653653+0000
Change: 2016-11-17 10:44:36.225653653+0000
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2017-01-31 19:46:22 +03:00
struct inode * inode = d_inode ( path - > dentry ) ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_holder gh ;
2017-10-09 18:55:58 +03:00
u32 gfsflags ;
2006-01-16 19:50:04 +03:00
int error ;
2016-06-17 15:31:27 +03:00
gfs2_holder_mark_uninitialized ( & gh ) ;
2008-02-22 19:07:18 +03:00
if ( gfs2_glock_is_locked_by_me ( ip - > i_gl ) = = NULL ) {
2014-11-14 05:42:04 +03:00
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_SHARED , LM_FLAG_ANY , & gh ) ;
if ( error )
return error ;
2006-01-16 19:50:04 +03:00
}
2017-10-09 18:55:58 +03:00
gfsflags = ip - > i_diskflags ;
if ( gfsflags & GFS2_DIF_APPENDONLY )
stat - > attributes | = STATX_ATTR_APPEND ;
if ( gfsflags & GFS2_DIF_IMMUTABLE )
stat - > attributes | = STATX_ATTR_IMMUTABLE ;
stat - > attributes_mask | = ( STATX_ATTR_APPEND |
STATX_ATTR_COMPRESSED |
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP ) ;
2021-01-21 16:19:30 +03:00
generic_fillattr ( & init_user_ns , inode , stat ) ;
2017-10-09 18:55:58 +03:00
2016-06-17 15:31:27 +03:00
if ( gfs2_holder_initialized ( & gh ) )
2006-11-27 18:12:05 +03:00
gfs2_glock_dq_uninit ( & gh ) ;
return 0 ;
2006-01-16 19:50:04 +03:00
}
2008-10-14 17:43:29 +04:00
static int gfs2_fiemap ( struct inode * inode , struct fiemap_extent_info * fieinfo ,
u64 start , u64 len )
{
struct gfs2_inode * ip = GFS2_I ( inode ) ;
struct gfs2_holder gh ;
int ret ;
2017-02-16 23:13:54 +03:00
inode_lock_shared ( inode ) ;
2008-10-14 17:43:29 +04:00
ret = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_SHARED , 0 , & gh ) ;
if ( ret )
goto out ;
2017-02-16 23:13:54 +03:00
ret = iomap_fiemap ( inode , fieinfo , start , len , & gfs2_iomap_ops ) ;
2008-10-14 17:43:29 +04:00
gfs2_glock_dq_uninit ( & gh ) ;
2017-02-16 23:13:54 +03:00
2008-10-14 17:43:29 +04:00
out :
2017-02-16 23:13:54 +03:00
inode_unlock_shared ( inode ) ;
2008-10-14 17:43:29 +04:00
return ret ;
}
2017-03-15 21:12:59 +03:00
loff_t gfs2_seek_data ( struct file * file , loff_t offset )
{
struct inode * inode = file - > f_mapping - > host ;
struct gfs2_inode * ip = GFS2_I ( inode ) ;
struct gfs2_holder gh ;
loff_t ret ;
inode_lock_shared ( inode ) ;
ret = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_SHARED , 0 , & gh ) ;
if ( ! ret )
ret = iomap_seek_data ( inode , offset , & gfs2_iomap_ops ) ;
gfs2_glock_dq_uninit ( & gh ) ;
inode_unlock_shared ( inode ) ;
if ( ret < 0 )
return ret ;
return vfs_setpos ( file , ret , inode - > i_sb - > s_maxbytes ) ;
}
loff_t gfs2_seek_hole ( struct file * file , loff_t offset )
{
struct inode * inode = file - > f_mapping - > host ;
struct gfs2_inode * ip = GFS2_I ( inode ) ;
struct gfs2_holder gh ;
loff_t ret ;
inode_lock_shared ( inode ) ;
ret = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_SHARED , 0 , & gh ) ;
if ( ! ret )
ret = iomap_seek_hole ( inode , offset , & gfs2_iomap_ops ) ;
gfs2_glock_dq_uninit ( & gh ) ;
inode_unlock_shared ( inode ) ;
if ( ret < 0 )
return ret ;
return vfs_setpos ( file , ret , inode - > i_sb - > s_maxbytes ) ;
}
2020-11-26 01:37:18 +03:00
static int gfs2_update_time ( struct inode * inode , struct timespec64 * time ,
int flags )
{
struct gfs2_inode * ip = GFS2_I ( inode ) ;
struct gfs2_glock * gl = ip - > i_gl ;
struct gfs2_holder * gh ;
int error ;
gh = gfs2_glock_is_locked_by_me ( gl ) ;
if ( gh & & ! gfs2_glock_is_held_excl ( gl ) ) {
gfs2_glock_dq ( gh ) ;
gfs2_holder_reinit ( LM_ST_EXCLUSIVE , 0 , gh ) ;
error = gfs2_glock_nq ( gh ) ;
if ( error )
return error ;
}
return generic_update_time ( inode , time , flags ) ;
}
2020-11-25 23:14:15 +03:00
static const struct inode_operations gfs2_file_iops = {
2008-07-16 05:03:57 +04:00
. permission = gfs2_permission ,
2006-01-16 19:50:04 +03:00
. setattr = gfs2_setattr ,
. getattr = gfs2_getattr ,
. listxattr = gfs2_listxattr ,
2008-10-14 17:43:29 +04:00
. fiemap = gfs2_fiemap ,
2011-07-23 19:37:31 +04:00
. get_acl = gfs2_get_acl ,
2013-12-20 17:16:52 +04:00
. set_acl = gfs2_set_acl ,
2020-11-26 01:37:18 +03:00
. update_time = gfs2_update_time ,
2021-04-07 15:36:43 +03:00
. fileattr_get = gfs2_fileattr_get ,
. fileattr_set = gfs2_fileattr_set ,
2006-01-16 19:50:04 +03:00
} ;
2020-11-25 23:14:15 +03:00
static const struct inode_operations gfs2_dir_iops = {
2006-01-16 19:50:04 +03:00
. create = gfs2_create ,
. lookup = gfs2_lookup ,
. link = gfs2_link ,
. unlink = gfs2_unlink ,
. symlink = gfs2_symlink ,
. mkdir = gfs2_mkdir ,
2011-05-09 19:42:37 +04:00
. rmdir = gfs2_unlink ,
2006-01-16 19:50:04 +03:00
. mknod = gfs2_mknod ,
2016-09-27 12:03:58 +03:00
. rename = gfs2_rename2 ,
2008-07-16 05:03:57 +04:00
. permission = gfs2_permission ,
2006-01-16 19:50:04 +03:00
. setattr = gfs2_setattr ,
. getattr = gfs2_getattr ,
. listxattr = gfs2_listxattr ,
2008-10-14 17:43:29 +04:00
. fiemap = gfs2_fiemap ,
2011-07-23 19:37:31 +04:00
. get_acl = gfs2_get_acl ,
2013-12-20 17:16:52 +04:00
. set_acl = gfs2_set_acl ,
2020-11-26 01:37:18 +03:00
. update_time = gfs2_update_time ,
2013-06-14 14:17:15 +04:00
. atomic_open = gfs2_atomic_open ,
2021-04-07 15:36:43 +03:00
. fileattr_get = gfs2_fileattr_get ,
. fileattr_set = gfs2_fileattr_set ,
2006-01-16 19:50:04 +03:00
} ;
2020-11-25 23:14:15 +03:00
static const struct inode_operations gfs2_symlink_iops = {
2015-11-17 18:20:54 +03:00
. get_link = gfs2_get_link ,
2008-07-16 05:03:57 +04:00
. permission = gfs2_permission ,
2006-01-16 19:50:04 +03:00
. setattr = gfs2_setattr ,
. getattr = gfs2_getattr ,
. listxattr = gfs2_listxattr ,
2008-10-14 17:43:29 +04:00
. fiemap = gfs2_fiemap ,
2006-01-16 19:50:04 +03:00
} ;