2018-06-06 05:42:14 +03:00
// SPDX-License-Identifier: GPL-2.0
2005-04-17 02:20:36 +04:00
/*
2005-11-02 06:58:39 +03:00
* Copyright ( c ) 2000 - 2002 , 2005 Silicon Graphics , Inc .
* All Rights Reserved .
2005-04-17 02:20:36 +04:00
*/
# include "xfs.h"
2005-11-02 06:38:42 +03:00
# include "xfs_fs.h"
2013-10-23 03:36:05 +04:00
# include "xfs_shared.h"
2013-10-23 03:51:50 +04:00
# include "xfs_format.h"
2013-10-23 03:50:10 +04:00
# include "xfs_log_format.h"
# include "xfs_trans_resv.h"
2005-04-17 02:20:36 +04:00
# include "xfs_mount.h"
# include "xfs_inode.h"
2013-10-23 03:51:50 +04:00
# include "xfs_btree.h"
2005-04-17 02:20:36 +04:00
# include "xfs_ialloc.h"
2013-10-23 03:51:50 +04:00
# include "xfs_ialloc_btree.h"
2019-07-02 19:39:40 +03:00
# include "xfs_iwalk.h"
2005-04-17 02:20:36 +04:00
# include "xfs_itable.h"
# include "xfs_error.h"
2012-10-08 14:56:11 +04:00
# include "xfs_icache.h"
2019-04-12 17:41:18 +03:00
# include "xfs_health.h"
2021-08-06 21:05:43 +03:00
# include "xfs_trans.h"
2005-04-17 02:20:36 +04:00
2010-06-23 12:11:11 +04:00
/*
2019-07-02 19:39:40 +03:00
* Bulk Stat
* = = = = = = = = =
*
2019-07-04 06:36:26 +03:00
* Use the inode walking functions to fill out struct xfs_bulkstat for every
2019-07-02 19:39:40 +03:00
* allocated inode , then pass the stat information to some externally provided
* iteration function .
2010-06-23 12:11:11 +04:00
*/
2019-07-02 19:39:40 +03:00
struct xfs_bstat_chunk {
bulkstat_one_fmt_pf formatter ;
struct xfs_ibulk * breq ;
2019-07-04 06:36:26 +03:00
struct xfs_bulkstat * buf ;
2019-07-02 19:39:40 +03:00
} ;
/*
* Fill out the bulkstat info for a single inode and report it somewhere .
*
* bc - > breq - > lastino is effectively the inode cursor as we walk through the
* filesystem . Therefore , we update it any time we need to move the cursor
* forward , regardless of whether or not we ' re sending any bstat information
* back to userspace . If the inode is internal metadata or , has been freed
* out from under us , we just simply keep going .
*
* However , if any other type of error happens we want to stop right where we
* are so that userspace will call back with exact number of the bad inode and
* we can send back an error code .
*
* Note that if the formatter tells us there ' s no space left in the buffer we
* move the cursor forward and abort the walk .
*/
STATIC int
2010-06-23 12:11:11 +04:00
xfs_bulkstat_one_int (
2019-07-02 19:39:40 +03:00
struct xfs_mount * mp ,
2021-01-21 16:19:58 +03:00
struct user_namespace * mnt_userns ,
2019-07-02 19:39:40 +03:00
struct xfs_trans * tp ,
xfs_ino_t ino ,
struct xfs_bstat_chunk * bc )
2005-04-17 02:20:36 +04:00
{
2021-01-21 16:19:58 +03:00
struct user_namespace * sb_userns = mp - > m_super - > s_user_ns ;
2010-06-23 12:11:11 +04:00
struct xfs_inode * ip ; /* incore inode pointer */
2016-02-09 08:54:58 +03:00
struct inode * inode ;
2019-07-04 06:36:26 +03:00
struct xfs_bulkstat * buf = bc - > buf ;
2022-03-09 15:58:37 +03:00
xfs_extnum_t nextents ;
2019-07-02 19:39:40 +03:00
int error = - EINVAL ;
2010-06-23 12:11:11 +04:00
2019-07-02 19:39:40 +03:00
if ( xfs_internal_inum ( mp , ino ) )
goto out_advance ;
2010-06-23 12:11:11 +04:00
2019-07-02 19:39:40 +03:00
error = xfs_iget ( mp , tp , ino ,
2012-03-22 09:15:10 +04:00
( XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED ) ,
XFS_ILOCK_SHARED , & ip ) ;
2019-07-02 19:39:40 +03:00
if ( error = = - ENOENT | | error = = - EINVAL )
goto out_advance ;
2014-07-24 05:33:28 +04:00
if ( error )
2019-07-02 19:39:40 +03:00
goto out ;
2005-04-17 02:20:36 +04:00
ASSERT ( ip ! = NULL ) ;
2008-11-28 06:23:41 +03:00
ASSERT ( ip - > i_imap . im_blkno ! = 0 ) ;
2016-02-09 08:54:58 +03:00
inode = VFS_I ( ip ) ;
2005-04-17 02:20:36 +04:00
/* xfs_iget returns the following without needing
* further change .
*/
2021-03-29 21:11:39 +03:00
buf - > bs_projectid = ip - > i_projid ;
2005-04-17 02:20:36 +04:00
buf - > bs_ino = ino ;
2021-01-21 16:19:58 +03:00
buf - > bs_uid = from_kuid ( sb_userns , i_uid_into_mnt ( mnt_userns , inode ) ) ;
buf - > bs_gid = from_kgid ( sb_userns , i_gid_into_mnt ( mnt_userns , inode ) ) ;
2021-03-29 21:11:40 +03:00
buf - > bs_size = ip - > i_disk_size ;
2016-02-09 08:54:58 +03:00
2016-02-09 08:54:58 +03:00
buf - > bs_nlink = inode - > i_nlink ;
2019-07-04 06:36:26 +03:00
buf - > bs_atime = inode - > i_atime . tv_sec ;
buf - > bs_atime_nsec = inode - > i_atime . tv_nsec ;
buf - > bs_mtime = inode - > i_mtime . tv_sec ;
buf - > bs_mtime_nsec = inode - > i_mtime . tv_nsec ;
buf - > bs_ctime = inode - > i_ctime . tv_sec ;
buf - > bs_ctime_nsec = inode - > i_ctime . tv_nsec ;
2016-02-09 08:54:58 +03:00
buf - > bs_gen = inode - > i_generation ;
2016-02-09 08:54:58 +03:00
buf - > bs_mode = inode - > i_mode ;
2016-02-09 08:54:58 +03:00
2005-04-17 02:20:36 +04:00
buf - > bs_xflags = xfs_ip2xflags ( ip ) ;
2021-03-29 21:11:41 +03:00
buf - > bs_extsize_blks = ip - > i_extsize ;
2022-03-09 15:58:37 +03:00
nextents = xfs_ifork_nextents ( & ip - > i_df ) ;
if ( ! ( bc - > breq - > flags & XFS_IBULK_NREXT64 ) )
buf - > bs_extents = min ( nextents , XFS_MAX_EXTCNT_DATA_FORK_SMALL ) ;
else
buf - > bs_extents64 = nextents ;
2019-04-12 17:41:18 +03:00
xfs_bulkstat_health ( ip , buf ) ;
2020-05-18 20:27:22 +03:00
buf - > bs_aextents = xfs_ifork_nextents ( ip - > i_afp ) ;
2010-03-05 07:41:14 +03:00
buf - > bs_forkoff = XFS_IFORK_BOFF ( ip ) ;
2019-07-04 06:36:26 +03:00
buf - > bs_version = XFS_BULKSTAT_VERSION_V5 ;
2005-04-17 02:20:36 +04:00
2021-08-19 04:46:37 +03:00
if ( xfs_has_v3inodes ( mp ) ) {
2021-03-29 21:11:45 +03:00
buf - > bs_btime = ip - > i_crtime . tv_sec ;
buf - > bs_btime_nsec = ip - > i_crtime . tv_nsec ;
2021-03-29 21:11:45 +03:00
if ( ip - > i_diflags2 & XFS_DIFLAG2_COWEXTSIZE )
2021-03-29 21:11:42 +03:00
buf - > bs_cowextsize_blks = ip - > i_cowextsize ;
2016-10-03 19:11:43 +03:00
}
2020-05-18 20:28:05 +03:00
switch ( ip - > i_df . if_format ) {
2005-04-17 02:20:36 +04:00
case XFS_DINODE_FMT_DEV :
2017-10-19 21:07:09 +03:00
buf - > bs_rdev = sysv_encode_dev ( inode - > i_rdev ) ;
2005-04-17 02:20:36 +04:00
buf - > bs_blksize = BLKDEV_IOSIZE ;
buf - > bs_blocks = 0 ;
break ;
case XFS_DINODE_FMT_LOCAL :
buf - > bs_rdev = 0 ;
buf - > bs_blksize = mp - > m_sb . sb_blocksize ;
buf - > bs_blocks = 0 ;
break ;
case XFS_DINODE_FMT_EXTENTS :
case XFS_DINODE_FMT_BTREE :
buf - > bs_rdev = 0 ;
buf - > bs_blksize = mp - > m_sb . sb_blocksize ;
2021-03-29 21:11:40 +03:00
buf - > bs_blocks = ip - > i_nblocks + ip - > i_delayed_blks ;
2005-04-17 02:20:36 +04:00
break ;
}
2010-06-24 05:52:50 +04:00
xfs_iunlock ( ip , XFS_ILOCK_SHARED ) ;
2018-07-25 22:52:32 +03:00
xfs_irele ( ip ) ;
2005-04-17 02:20:36 +04:00
2019-07-02 19:39:40 +03:00
error = bc - > formatter ( bc - > breq , buf ) ;
2019-08-29 00:37:57 +03:00
if ( error = = - ECANCELED )
2019-07-02 19:39:40 +03:00
goto out_advance ;
if ( error )
goto out ;
2005-04-17 02:20:36 +04:00
2019-07-02 19:39:40 +03:00
out_advance :
/*
* Advance the cursor to the inode that comes after the one we just
* looked at . We want the caller to move along if the bulkstat
* information was copied successfully ; if we tried to grab the inode
* but it ' s no longer allocated ; or if it ' s internal metadata .
*/
bc - > breq - > startino = ino + 1 ;
out :
2010-06-23 12:11:11 +04:00
return error ;
2005-04-17 02:20:36 +04:00
}
2019-07-02 19:39:40 +03:00
/* Bulkstat a single inode. */
2008-11-26 06:20:11 +03:00
int
xfs_bulkstat_one (
2019-07-02 19:39:40 +03:00
struct xfs_ibulk * breq ,
bulkstat_one_fmt_pf formatter )
2008-11-26 06:20:11 +03:00
{
2019-07-02 19:39:40 +03:00
struct xfs_bstat_chunk bc = {
. formatter = formatter ,
. breq = breq ,
} ;
2021-08-06 21:05:43 +03:00
struct xfs_trans * tp ;
2019-07-02 19:39:40 +03:00
int error ;
2021-03-14 20:59:39 +03:00
if ( breq - > mnt_userns ! = & init_user_ns ) {
xfs_warn_ratelimited ( breq - > mp ,
" bulkstat not supported inside of idmapped mounts. " ) ;
return - EINVAL ;
}
2019-07-02 19:39:40 +03:00
ASSERT ( breq - > icount = = 1 ) ;
2019-07-04 06:36:26 +03:00
bc . buf = kmem_zalloc ( sizeof ( struct xfs_bulkstat ) ,
2019-08-26 22:06:22 +03:00
KM_MAYFAIL ) ;
2019-07-02 19:39:40 +03:00
if ( ! bc . buf )
return - ENOMEM ;
2021-08-06 21:05:43 +03:00
/*
* Grab an empty transaction so that we can use its recursive buffer
* locking abilities to detect cycles in the inobt without deadlocking .
*/
error = xfs_trans_alloc_empty ( breq - > mp , & tp ) ;
if ( error )
goto out ;
2019-07-02 19:39:40 +03:00
2021-08-06 21:05:43 +03:00
error = xfs_bulkstat_one_int ( breq - > mp , breq - > mnt_userns , tp ,
breq - > startino , & bc ) ;
xfs_trans_cancel ( tp ) ;
out :
2019-07-02 19:39:40 +03:00
kmem_free ( bc . buf ) ;
/*
* If we reported one inode to userspace then we abort because we hit
* the end of the buffer . Don ' t leak that back to userspace .
*/
2019-08-29 00:37:57 +03:00
if ( error = = - ECANCELED )
2019-07-02 19:39:40 +03:00
error = 0 ;
return error ;
2006-09-28 05:01:46 +04:00
}
2014-11-07 00:30:30 +03:00
static int
2019-07-02 19:39:40 +03:00
xfs_bulkstat_iwalk (
struct xfs_mount * mp ,
struct xfs_trans * tp ,
xfs_ino_t ino ,
void * data )
2014-08-04 05:22:31 +04:00
{
2021-01-21 16:19:58 +03:00
struct xfs_bstat_chunk * bc = data ;
2019-07-02 19:39:40 +03:00
int error ;
2014-11-07 00:33:52 +03:00
2021-01-21 16:19:58 +03:00
error = xfs_bulkstat_one_int ( mp , bc - > breq - > mnt_userns , tp , ino , data ) ;
2019-07-02 19:39:40 +03:00
/* bulkstat just skips over missing inodes */
if ( error = = - ENOENT | | error = = - EINVAL )
return 0 ;
2014-08-04 05:22:31 +04:00
return error ;
}
2005-04-17 02:20:36 +04:00
/*
2019-07-02 19:39:40 +03:00
* Check the incoming lastino parameter .
*
* We allow any inode value that could map to physical space inside the
* filesystem because if there are no inodes there , bulkstat moves on to the
* next chunk . In other words , the magic agino value of zero takes us to the
* first chunk in the AG , and an agino value past the end of the AG takes us to
* the first chunk in the next AG .
*
* Therefore we can end early if the requested inode is beyond the end of the
* filesystem or doesn ' t map properly .
2005-04-17 02:20:36 +04:00
*/
2019-07-02 19:39:40 +03:00
static inline bool
xfs_bulkstat_already_done (
struct xfs_mount * mp ,
xfs_ino_t startino )
2005-04-17 02:20:36 +04:00
{
2019-07-02 19:39:40 +03:00
xfs_agnumber_t agno = XFS_INO_TO_AGNO ( mp , startino ) ;
xfs_agino_t agino = XFS_INO_TO_AGINO ( mp , startino ) ;
2005-04-17 02:20:36 +04:00
2019-07-02 19:39:40 +03:00
return agno > = mp - > m_sb . sb_agcount | |
startino ! = XFS_AGINO_TO_INO ( mp , agno , agino ) ;
}
2014-07-24 12:40:26 +04:00
2019-07-02 19:39:40 +03:00
/* Return stat information in bulk (by-inode) for the filesystem. */
int
xfs_bulkstat (
struct xfs_ibulk * breq ,
bulkstat_one_fmt_pf formatter )
{
struct xfs_bstat_chunk bc = {
. formatter = formatter ,
. breq = breq ,
} ;
2021-08-06 21:05:43 +03:00
struct xfs_trans * tp ;
2022-03-09 15:34:04 +03:00
unsigned int iwalk_flags = 0 ;
2019-07-02 19:39:40 +03:00
int error ;
2014-11-07 00:30:30 +03:00
2021-01-21 16:19:58 +03:00
if ( breq - > mnt_userns ! = & init_user_ns ) {
xfs_warn_ratelimited ( breq - > mp ,
" bulkstat not supported inside of idmapped mounts. " ) ;
return - EINVAL ;
}
2019-07-02 19:39:40 +03:00
if ( xfs_bulkstat_already_done ( breq - > mp , breq - > startino ) )
return 0 ;
2014-11-07 00:30:30 +03:00
2019-07-04 06:36:26 +03:00
bc . buf = kmem_zalloc ( sizeof ( struct xfs_bulkstat ) ,
2019-08-26 22:06:22 +03:00
KM_MAYFAIL ) ;
2019-07-02 19:39:40 +03:00
if ( ! bc . buf )
2014-06-25 08:58:08 +04:00
return - ENOMEM ;
2014-11-07 00:30:30 +03:00
2021-08-06 21:05:43 +03:00
/*
* Grab an empty transaction so that we can use its recursive buffer
* locking abilities to detect cycles in the inobt without deadlocking .
*/
error = xfs_trans_alloc_empty ( breq - > mp , & tp ) ;
if ( error )
goto out ;
2014-11-07 00:31:13 +03:00
2022-03-09 15:34:04 +03:00
if ( breq - > flags & XFS_IBULK_SAME_AG )
iwalk_flags | = XFS_IWALK_SAME_AG ;
error = xfs_iwalk ( breq - > mp , tp , breq - > startino , iwalk_flags ,
2021-08-06 21:05:43 +03:00
xfs_bulkstat_iwalk , breq - > icount , & bc ) ;
xfs_trans_cancel ( tp ) ;
out :
2019-07-02 19:39:40 +03:00
kmem_free ( bc . buf ) ;
2014-11-07 00:31:15 +03:00
2007-11-23 08:30:32 +03:00
/*
2014-11-07 00:31:15 +03:00
* We found some inodes , so clear the error status and return them .
* The lastino pointer will point directly at the inode that triggered
* any error that occurred , so on the next call the error will be
* triggered again and propagated to userspace as there will be no
* formatted inodes in the buffer .
2007-11-23 08:30:32 +03:00
*/
2019-07-02 19:39:40 +03:00
if ( breq - > ocount > 0 )
2014-11-07 00:31:15 +03:00
error = 0 ;
return error ;
2005-04-17 02:20:36 +04:00
}
2019-07-04 06:36:26 +03:00
/* Convert bulkstat (v5) to bstat (v1). */
void
xfs_bulkstat_to_bstat (
struct xfs_mount * mp ,
struct xfs_bstat * bs1 ,
const struct xfs_bulkstat * bstat )
{
2019-07-29 07:12:32 +03:00
/* memset is needed here because of padding holes in the structure. */
2019-07-04 06:36:26 +03:00
memset ( bs1 , 0 , sizeof ( struct xfs_bstat ) ) ;
bs1 - > bs_ino = bstat - > bs_ino ;
bs1 - > bs_mode = bstat - > bs_mode ;
bs1 - > bs_nlink = bstat - > bs_nlink ;
bs1 - > bs_uid = bstat - > bs_uid ;
bs1 - > bs_gid = bstat - > bs_gid ;
bs1 - > bs_rdev = bstat - > bs_rdev ;
bs1 - > bs_blksize = bstat - > bs_blksize ;
bs1 - > bs_size = bstat - > bs_size ;
bs1 - > bs_atime . tv_sec = bstat - > bs_atime ;
bs1 - > bs_mtime . tv_sec = bstat - > bs_mtime ;
bs1 - > bs_ctime . tv_sec = bstat - > bs_ctime ;
bs1 - > bs_atime . tv_nsec = bstat - > bs_atime_nsec ;
bs1 - > bs_mtime . tv_nsec = bstat - > bs_mtime_nsec ;
bs1 - > bs_ctime . tv_nsec = bstat - > bs_ctime_nsec ;
bs1 - > bs_blocks = bstat - > bs_blocks ;
bs1 - > bs_xflags = bstat - > bs_xflags ;
bs1 - > bs_extsize = XFS_FSB_TO_B ( mp , bstat - > bs_extsize_blks ) ;
bs1 - > bs_extents = bstat - > bs_extents ;
bs1 - > bs_gen = bstat - > bs_gen ;
bs1 - > bs_projid_lo = bstat - > bs_projectid & 0xFFFF ;
bs1 - > bs_forkoff = bstat - > bs_forkoff ;
bs1 - > bs_projid_hi = bstat - > bs_projectid > > 16 ;
bs1 - > bs_sick = bstat - > bs_sick ;
bs1 - > bs_checked = bstat - > bs_checked ;
bs1 - > bs_cowextsize = XFS_FSB_TO_B ( mp , bstat - > bs_cowextsize_blks ) ;
bs1 - > bs_dmevmask = 0 ;
bs1 - > bs_dmstate = 0 ;
bs1 - > bs_aextents = bstat - > bs_aextents ;
}
2019-07-02 19:39:43 +03:00
struct xfs_inumbers_chunk {
inumbers_fmt_pf formatter ;
struct xfs_ibulk * breq ;
} ;
/*
* INUMBERS
* = = = = = = = =
* This is how we export inode btree records to userspace , so that XFS tools
* can figure out where inodes are allocated .
*/
/*
* Format the inode group structure and report it somewhere .
*
* Similar to xfs_bulkstat_one_int , lastino is the inode cursor as we walk
* through the filesystem so we move it forward unless there was a runtime
* error . If the formatter tells us the buffer is now full we also move the
* cursor forward and abort the walk .
*/
STATIC int
xfs_inumbers_walk (
struct xfs_mount * mp ,
struct xfs_trans * tp ,
xfs_agnumber_t agno ,
const struct xfs_inobt_rec_incore * irec ,
void * data )
2007-07-11 05:10:19 +04:00
{
2019-07-04 06:36:27 +03:00
struct xfs_inumbers inogrp = {
2019-07-02 19:39:43 +03:00
. xi_startino = XFS_AGINO_TO_INO ( mp , agno , irec - > ir_startino ) ,
. xi_alloccount = irec - > ir_count - irec - > ir_freecount ,
. xi_allocmask = ~ irec - > ir_free ,
2019-07-04 06:36:27 +03:00
. xi_version = XFS_INUMBERS_VERSION_V5 ,
2019-07-02 19:39:43 +03:00
} ;
struct xfs_inumbers_chunk * ic = data ;
int error ;
error = ic - > formatter ( ic - > breq , & inogrp ) ;
2019-08-29 00:37:57 +03:00
if ( error & & error ! = - ECANCELED )
2019-07-02 19:39:43 +03:00
return error ;
2019-07-09 05:36:17 +03:00
ic - > breq - > startino = XFS_AGINO_TO_INO ( mp , agno , irec - > ir_startino ) +
XFS_INODES_PER_CHUNK ;
2019-07-02 19:39:43 +03:00
return error ;
2007-07-11 05:10:19 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* Return inode number table for the filesystem .
*/
2019-07-02 19:39:43 +03:00
int
2005-04-17 02:20:36 +04:00
xfs_inumbers (
2019-07-02 19:39:43 +03:00
struct xfs_ibulk * breq ,
2014-07-24 06:11:47 +04:00
inumbers_fmt_pf formatter )
2005-04-17 02:20:36 +04:00
{
2019-07-02 19:39:43 +03:00
struct xfs_inumbers_chunk ic = {
. formatter = formatter ,
. breq = breq ,
} ;
2021-08-06 21:05:43 +03:00
struct xfs_trans * tp ;
2014-07-24 06:11:47 +04:00
int error = 0 ;
2019-07-02 19:39:43 +03:00
if ( xfs_bulkstat_already_done ( breq - > mp , breq - > startino ) )
return 0 ;
2014-07-24 06:11:47 +04:00
2021-08-06 21:05:43 +03:00
/*
* Grab an empty transaction so that we can use its recursive buffer
* locking abilities to detect cycles in the inobt without deadlocking .
*/
error = xfs_trans_alloc_empty ( breq - > mp , & tp ) ;
if ( error )
goto out ;
error = xfs_inobt_walk ( breq - > mp , tp , breq - > startino , breq - > flags ,
2019-07-02 19:39:43 +03:00
xfs_inumbers_walk , breq - > icount , & ic ) ;
2021-08-06 21:05:43 +03:00
xfs_trans_cancel ( tp ) ;
out :
2014-07-24 06:18:47 +04:00
2019-07-02 19:39:43 +03:00
/*
* We found some inode groups , so clear the error status and return
* them . The lastino pointer will point directly at the inode that
* triggered any error that occurred , so on the next call the error
* will be triggered again and propagated to userspace as there will be
* no formatted inode groups in the buffer .
*/
if ( breq - > ocount > 0 )
error = 0 ;
2014-07-24 06:18:47 +04:00
2005-04-17 02:20:36 +04:00
return error ;
}
2019-07-04 06:36:27 +03:00
/* Convert an inumbers (v5) struct to a inogrp (v1) struct. */
void
xfs_inumbers_to_inogrp (
struct xfs_inogrp * ig1 ,
const struct xfs_inumbers * ig )
{
2019-07-29 07:12:32 +03:00
/* memset is needed here because of padding holes in the structure. */
memset ( ig1 , 0 , sizeof ( struct xfs_inogrp ) ) ;
2019-07-04 06:36:27 +03:00
ig1 - > xi_startino = ig - > xi_startino ;
ig1 - > xi_alloccount = ig - > xi_alloccount ;
ig1 - > xi_allocmask = ig - > xi_allocmask ;
}