2019-05-19 15:08:55 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-11-17 08:57:37 +04:00
# include <linux/export.h>
2006-12-08 13:36:31 +03:00
# include <linux/fs.h>
# include <linux/fs_stack.h>
/* does _NOT_ require i_mutex to be held.
*
* This function cannot be inlined since i_size_ { read , write } is rather
* heavy - weight on 32 - bit systems
*/
2009-12-04 05:56:09 +03:00
void fsstack_copy_inode_size ( struct inode * dst , struct inode * src )
2006-12-08 13:36:31 +03:00
{
2009-12-04 05:56:09 +03:00
loff_t i_size ;
blkcnt_t i_blocks ;
/*
* i_size_read ( ) includes its own seqlocking and protection from
* preemption ( see include / linux / fs . h ) : we need nothing extra for
* that here , and prefer to avoid nesting locks than attempt to keep
* i_size and i_blocks in sync together .
*/
i_size = i_size_read ( src ) ;
/*
2019-04-05 19:08:59 +03:00
* But on 32 - bit , we ought to make an effort to keep the two halves of
2019-10-15 22:18:10 +03:00
* i_blocks in sync despite SMP or PREEMPTION - though stat ' s
2019-04-05 19:08:59 +03:00
* generic_fillattr ( ) doesn ' t bother , and we won ' t be applying quotas
* ( where i_blocks does become important ) at the upper level .
2009-12-04 05:56:09 +03:00
*
* We don ' t actually know what locking is used at the lower level ;
* but if it ' s a filesystem that supports quotas , it will be using
2011-08-04 03:21:27 +04:00
* i_lock as in inode_add_bytes ( ) .
2009-12-04 05:56:09 +03:00
*/
if ( sizeof ( i_blocks ) > sizeof ( long ) )
spin_lock ( & src - > i_lock ) ;
i_blocks = src - > i_blocks ;
if ( sizeof ( i_blocks ) > sizeof ( long ) )
spin_unlock ( & src - > i_lock ) ;
/*
2019-10-15 22:18:10 +03:00
* If CONFIG_SMP or CONFIG_PREEMPTION on 32 - bit , it ' s vital for
2009-12-04 05:56:09 +03:00
* fsstack_copy_inode_size ( ) to hold some lock around
* i_size_write ( ) , otherwise i_size_read ( ) may spin forever ( see
* include / linux / fs . h ) . We don ' t necessarily hold i_mutex when this
* is called , so take i_lock for that case .
*
2019-04-05 19:08:59 +03:00
* And if on 32 - bit , continue our effort to keep the two halves of
2019-10-15 22:18:10 +03:00
* i_blocks in sync despite SMP or PREEMPTION : use i_lock for that case
2019-04-05 19:08:59 +03:00
* too , and do both at once by combining the tests .
2009-12-04 05:56:09 +03:00
*
* There is none of this locking overhead in the 64 - bit case .
*/
if ( sizeof ( i_size ) > sizeof ( long ) | | sizeof ( i_blocks ) > sizeof ( long ) )
spin_lock ( & dst - > i_lock ) ;
i_size_write ( dst , i_size ) ;
dst - > i_blocks = i_blocks ;
if ( sizeof ( i_size ) > sizeof ( long ) | | sizeof ( i_blocks ) > sizeof ( long ) )
spin_unlock ( & dst - > i_lock ) ;
2006-12-08 13:36:31 +03:00
}
EXPORT_SYMBOL_GPL ( fsstack_copy_inode_size ) ;
2009-12-03 03:51:54 +03:00
/* copy all attributes */
void fsstack_copy_attr_all ( struct inode * dest , const struct inode * src )
2006-12-08 13:36:31 +03:00
{
dest - > i_mode = src - > i_mode ;
dest - > i_uid = src - > i_uid ;
dest - > i_gid = src - > i_gid ;
dest - > i_rdev = src - > i_rdev ;
dest - > i_atime = src - > i_atime ;
dest - > i_mtime = src - > i_mtime ;
dest - > i_ctime = src - > i_ctime ;
dest - > i_blkbits = src - > i_blkbits ;
dest - > i_flags = src - > i_flags ;
2011-10-28 16:13:29 +04:00
set_nlink ( dest , src - > i_nlink ) ;
2006-12-08 13:36:31 +03:00
}
EXPORT_SYMBOL_GPL ( fsstack_copy_attr_all ) ;