2018-06-06 05:42:14 +03:00
// SPDX-License-Identifier: GPL-2.0+
2017-10-18 07:37:40 +03:00
/*
* Copyright ( C ) 2017 Oracle . All Rights Reserved .
* Author : Darrick J . Wong < darrick . wong @ oracle . com >
*/
# include "xfs.h"
# include "xfs_fs.h"
# include "xfs_shared.h"
# include "xfs_format.h"
# include "xfs_trans_resv.h"
# include "xfs_mount.h"
# include "xfs_btree.h"
# include "xfs_log_format.h"
# include "xfs_trans.h"
# include "xfs_inode.h"
# include "xfs_ialloc.h"
# include "xfs_ialloc_btree.h"
# include "xfs_icache.h"
# include "xfs_rmap.h"
# include "scrub/scrub.h"
# include "scrub/common.h"
# include "scrub/btree.h"
# include "scrub/trace.h"
2021-06-02 03:48:24 +03:00
# include "xfs_ag.h"
2017-10-18 07:37:40 +03:00
/*
* Set us up to scrub inode btrees .
* If we detect a discrepancy between the inobt and the inode ,
* try again after forcing logged inode cores out to disk .
*/
int
2018-07-19 22:29:11 +03:00
xchk_setup_ag_iallocbt (
2021-04-08 03:59:39 +03:00
struct xfs_scrub * sc )
2017-10-18 07:37:40 +03:00
{
2021-04-08 03:59:39 +03:00
return xchk_setup_ag_btree ( sc , sc - > flags & XCHK_TRY_HARDER ) ;
2017-10-18 07:37:40 +03:00
}
/* Inode btree scrubber. */
2018-12-12 19:46:26 +03:00
struct xchk_iallocbt {
/* Number of inodes we see while scanning inobt. */
unsigned long long inodes ;
2019-02-01 20:08:50 +03:00
/* Expected next startino, for big block filesystems. */
xfs_agino_t next_startino ;
/* Expected end of the current inode cluster. */
xfs_agino_t next_cluster_ino ;
2018-12-12 19:46:26 +03:00
} ;
2018-01-17 05:53:07 +03:00
/*
* If we ' re checking the finobt , cross - reference with the inobt .
* Otherwise we ' re checking the inobt ; if there is an finobt , make sure
* we have a record or not depending on freecount .
*/
static inline void
2018-07-19 22:29:11 +03:00
xchk_iallocbt_chunk_xref_other (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2018-01-17 05:53:07 +03:00
struct xfs_inobt_rec_incore * irec ,
xfs_agino_t agino )
{
struct xfs_btree_cur * * pcur ;
bool has_irec ;
int error ;
if ( sc - > sm - > sm_type = = XFS_SCRUB_TYPE_FINOBT )
pcur = & sc - > sa . ino_cur ;
else
pcur = & sc - > sa . fino_cur ;
if ( ! ( * pcur ) )
return ;
error = xfs_ialloc_has_inode_record ( * pcur , agino , agino , & has_irec ) ;
2018-07-19 22:29:11 +03:00
if ( ! xchk_should_check_xref ( sc , & error , pcur ) )
2018-01-17 05:53:07 +03:00
return ;
if ( ( ( irec - > ir_freecount > 0 & & ! has_irec ) | |
( irec - > ir_freecount = = 0 & & has_irec ) ) )
2018-07-19 22:29:11 +03:00
xchk_btree_xref_set_corrupt ( sc , * pcur , 0 ) ;
2018-01-17 05:53:07 +03:00
}
2018-01-17 05:53:05 +03:00
/* Cross-reference with the other btrees. */
STATIC void
2018-07-19 22:29:11 +03:00
xchk_iallocbt_chunk_xref (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2018-01-17 05:53:05 +03:00
struct xfs_inobt_rec_incore * irec ,
xfs_agino_t agino ,
xfs_agblock_t agbno ,
xfs_extlen_t len )
{
if ( sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
return ;
2018-01-17 05:53:06 +03:00
2018-07-19 22:29:11 +03:00
xchk_xref_is_used_space ( sc , agbno , len ) ;
xchk_iallocbt_chunk_xref_other ( sc , irec , agino ) ;
2018-12-12 19:46:23 +03:00
xchk_xref_is_owned_by ( sc , agbno , len , & XFS_RMAP_OINFO_INODES ) ;
2018-07-19 22:29:11 +03:00
xchk_xref_is_not_shared ( sc , agbno , len ) ;
2018-01-17 05:53:05 +03:00
}
2017-10-18 07:37:40 +03:00
/* Is this chunk worth checking? */
STATIC bool
2018-07-19 22:29:11 +03:00
xchk_iallocbt_chunk (
struct xchk_btree * bs ,
2017-10-18 07:37:40 +03:00
struct xfs_inobt_rec_incore * irec ,
xfs_agino_t agino ,
xfs_extlen_t len )
{
struct xfs_mount * mp = bs - > cur - > bc_mp ;
2021-06-02 03:48:24 +03:00
xfs_agnumber_t agno = bs - > cur - > bc_ag . pag - > pag_agno ;
2017-10-18 07:37:40 +03:00
xfs_agblock_t bno ;
bno = XFS_AGINO_TO_AGBNO ( mp , agino ) ;
if ( bno + len < = bno | |
! xfs_verify_agbno ( mp , agno , bno ) | |
! xfs_verify_agbno ( mp , agno , bno + len - 1 ) )
2018-07-19 22:29:11 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
2017-10-18 07:37:40 +03:00
2018-07-19 22:29:11 +03:00
xchk_iallocbt_chunk_xref ( bs - > sc , irec , agino , bno , len ) ;
2018-01-17 05:53:05 +03:00
2017-10-18 07:37:40 +03:00
return true ;
}
/* Count the number of free inodes. */
static unsigned int
2018-07-19 22:29:11 +03:00
xchk_iallocbt_freecount (
2017-10-18 07:37:40 +03:00
xfs_inofree_t freemask )
{
BUILD_BUG_ON ( sizeof ( freemask ) ! = sizeof ( __u64 ) ) ;
return hweight64 ( freemask ) ;
}
2019-02-01 20:08:51 +03:00
/*
* Check that an inode ' s allocation status matches ir_free in the inobt
* record . First we try querying the in - core inode state , and if the inode
* isn ' t loaded we examine the on - disk inode directly .
*
* Since there can be 1 : M and M : 1 mappings between inobt records and inode
* clusters , we pass in the inode location information as an inobt record ;
* the index of an inode cluster within the inobt record ( as well as the
* cluster buffer itself ) ; and the index of the inode within the cluster .
*
* @ irec is the inobt record .
2019-02-01 20:08:52 +03:00
* @ irec_ino is the inode offset from the start of the record .
* @ dip is the on - disk inode .
2019-02-01 20:08:51 +03:00
*/
2017-10-18 07:37:40 +03:00
STATIC int
2019-02-01 20:08:51 +03:00
xchk_iallocbt_check_cluster_ifree (
2018-07-19 22:29:11 +03:00
struct xchk_btree * bs ,
2017-10-18 07:37:40 +03:00
struct xfs_inobt_rec_incore * irec ,
2019-02-01 20:08:52 +03:00
unsigned int irec_ino ,
struct xfs_dinode * dip )
2017-10-18 07:37:40 +03:00
{
struct xfs_mount * mp = bs - > cur - > bc_mp ;
2019-02-01 20:08:51 +03:00
xfs_ino_t fsino ;
xfs_agino_t agino ;
bool irec_free ;
bool ino_inuse ;
2017-10-18 07:37:40 +03:00
bool freemask_ok ;
2019-02-21 18:53:20 +03:00
int error = 0 ;
2017-10-18 07:37:40 +03:00
2018-07-19 22:29:11 +03:00
if ( xchk_should_terminate ( bs - > sc , & error ) )
2017-10-18 07:37:40 +03:00
return error ;
2019-02-01 20:08:51 +03:00
/*
2019-02-01 20:08:52 +03:00
* Given an inobt record and the offset of an inode from the start of
* the record , compute which fs inode we ' re talking about .
2019-02-01 20:08:51 +03:00
*/
2019-02-01 20:08:52 +03:00
agino = irec - > ir_startino + irec_ino ;
2021-06-02 03:48:24 +03:00
fsino = XFS_AGINO_TO_INO ( mp , bs - > cur - > bc_ag . pag - > pag_agno , agino ) ;
2019-02-01 20:08:52 +03:00
irec_free = ( irec - > ir_free & XFS_INOBT_MASK ( irec_ino ) ) ;
2019-02-01 20:08:51 +03:00
2017-10-18 07:37:40 +03:00
if ( be16_to_cpu ( dip - > di_magic ) ! = XFS_DINODE_MAGIC | |
2019-02-01 20:08:51 +03:00
( dip - > di_version > = 3 & & be64_to_cpu ( dip - > di_ino ) ! = fsino ) ) {
2018-07-19 22:29:11 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
2017-10-18 07:37:40 +03:00
goto out ;
}
2019-02-01 20:08:51 +03:00
error = xfs_icache_inode_is_allocated ( mp , bs - > cur - > bc_tp , fsino ,
& ino_inuse ) ;
2017-10-18 07:37:40 +03:00
if ( error = = - ENODATA ) {
/* Not cached, just read the disk buffer */
2019-02-01 20:08:51 +03:00
freemask_ok = irec_free ^ ! ! ( dip - > di_mode ) ;
2019-04-16 18:21:59 +03:00
if ( ! ( bs - > sc - > flags & XCHK_TRY_HARDER ) & & ! freemask_ok )
2017-10-18 07:37:40 +03:00
return - EDEADLOCK ;
} else if ( error < 0 ) {
/*
* Inode is only half assembled , or there was an IO error ,
* or the verifier failed , so don ' t bother trying to check .
* The inode scrubber can deal with this .
*/
goto out ;
} else {
/* Inode is all there. */
2019-02-01 20:08:51 +03:00
freemask_ok = irec_free ^ ino_inuse ;
2017-10-18 07:37:40 +03:00
}
if ( ! freemask_ok )
2018-07-19 22:29:11 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
2017-10-18 07:37:40 +03:00
out :
return 0 ;
}
2019-02-01 20:08:51 +03:00
/*
* Check that the holemask and freemask of a hypothetical inode cluster match
* what ' s actually on disk . If sparse inodes are enabled , the cluster does
* not actually have to map to inodes if the corresponding holemask bit is set .
*
* @ cluster_base is the first inode in the cluster within the @ irec .
*/
2017-10-18 07:37:40 +03:00
STATIC int
2019-02-01 20:08:51 +03:00
xchk_iallocbt_check_cluster (
2018-07-19 22:29:11 +03:00
struct xchk_btree * bs ,
2019-02-01 20:08:51 +03:00
struct xfs_inobt_rec_incore * irec ,
2019-02-01 20:08:51 +03:00
unsigned int cluster_base )
2017-10-18 07:37:40 +03:00
{
struct xfs_imap imap ;
struct xfs_mount * mp = bs - > cur - > bc_mp ;
2019-02-01 20:08:51 +03:00
struct xfs_buf * cluster_bp ;
2019-02-01 20:08:51 +03:00
unsigned int nr_inodes ;
2021-06-02 03:48:24 +03:00
xfs_agnumber_t agno = bs - > cur - > bc_ag . pag - > pag_agno ;
2017-10-18 07:37:40 +03:00
xfs_agblock_t agbno ;
2019-02-01 20:08:51 +03:00
unsigned int cluster_index ;
uint16_t cluster_mask = 0 ;
2017-10-18 07:37:40 +03:00
uint16_t ir_holemask ;
int error = 0 ;
2019-02-01 20:08:49 +03:00
nr_inodes = min_t ( unsigned int , XFS_INODES_PER_CHUNK ,
2019-06-05 21:19:34 +03:00
M_IGEO ( mp ) - > inodes_per_cluster ) ;
2017-10-18 07:37:40 +03:00
2019-02-01 20:08:51 +03:00
/* Map this inode cluster */
agbno = XFS_AGINO_TO_AGBNO ( mp , irec - > ir_startino + cluster_base ) ;
2019-02-01 20:08:51 +03:00
2019-02-01 20:08:51 +03:00
/* Compute a bitmask for this cluster that can be used for holemask. */
for ( cluster_index = 0 ;
cluster_index < nr_inodes ;
cluster_index + = XFS_INODES_PER_HOLEMASK_BIT )
cluster_mask | = XFS_INOBT_MASK ( ( cluster_base + cluster_index ) /
2019-02-01 20:08:51 +03:00
XFS_INODES_PER_HOLEMASK_BIT ) ;
2019-02-01 20:08:52 +03:00
/*
* Map the first inode of this cluster to a buffer and offset .
* Be careful about inobt records that don ' t align with the start of
* the inode buffer when block sizes are large enough to hold multiple
* inode chunks . When this happens , cluster_base will be zero but
* ir_startino can be large enough to make im_boffset nonzero .
*/
2019-02-01 20:08:51 +03:00
ir_holemask = ( irec - > ir_holemask & cluster_mask ) ;
imap . im_blkno = XFS_AGB_TO_DADDR ( mp , agno , agbno ) ;
2019-06-05 21:19:34 +03:00
imap . im_len = XFS_FSB_TO_BB ( mp , M_IGEO ( mp ) - > blocks_per_cluster ) ;
2019-06-03 19:18:26 +03:00
imap . im_boffset = XFS_INO_TO_OFFSET ( mp , irec - > ir_startino ) < <
mp - > m_sb . sb_inodelog ;
2019-02-01 20:08:52 +03:00
if ( imap . im_boffset ! = 0 & & cluster_base ! = 0 ) {
ASSERT ( imap . im_boffset = = 0 | | cluster_base = = 0 ) ;
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
return 0 ;
}
2019-02-01 20:08:51 +03:00
trace_xchk_iallocbt_check_cluster ( mp , agno , irec - > ir_startino ,
imap . im_blkno , imap . im_len , cluster_base , nr_inodes ,
cluster_mask , ir_holemask ,
XFS_INO_TO_OFFSET ( mp , irec - > ir_startino +
cluster_base ) ) ;
2019-02-01 20:08:51 +03:00
/* The whole cluster must be a hole or not a hole. */
2019-02-01 20:08:51 +03:00
if ( ir_holemask ! = cluster_mask & & ir_holemask ! = 0 ) {
2019-02-01 20:08:51 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
return 0 ;
}
/* If any part of this is a hole, skip it. */
if ( ir_holemask ) {
xchk_xref_is_not_owned_by ( bs - > sc , agbno ,
2019-06-05 21:19:34 +03:00
M_IGEO ( mp ) - > blocks_per_cluster ,
2019-02-01 20:08:51 +03:00
& XFS_RMAP_OINFO_INODES ) ;
return 0 ;
}
2019-06-05 21:19:34 +03:00
xchk_xref_is_owned_by ( bs - > sc , agbno , M_IGEO ( mp ) - > blocks_per_cluster ,
2019-02-01 20:08:51 +03:00
& XFS_RMAP_OINFO_INODES ) ;
/* Grab the inode cluster buffer. */
2021-03-29 21:11:37 +03:00
error = xfs_imap_to_bp ( mp , bs - > cur - > bc_tp , & imap , & cluster_bp ) ;
2019-02-01 20:08:51 +03:00
if ( ! xchk_btree_xref_process_error ( bs - > sc , bs - > cur , 0 , & error ) )
2019-02-01 20:08:51 +03:00
return error ;
2019-02-01 20:08:51 +03:00
2019-02-01 20:08:51 +03:00
/* Check free status of each inode within this cluster. */
for ( cluster_index = 0 ; cluster_index < nr_inodes ; cluster_index + + ) {
2019-02-01 20:08:52 +03:00
struct xfs_dinode * dip ;
if ( imap . im_boffset > = BBTOB ( cluster_bp - > b_length ) ) {
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
break ;
}
dip = xfs_buf_offset ( cluster_bp , imap . im_boffset ) ;
2019-02-01 20:08:51 +03:00
error = xchk_iallocbt_check_cluster_ifree ( bs , irec ,
2019-02-01 20:08:52 +03:00
cluster_base + cluster_index , dip ) ;
2019-02-01 20:08:51 +03:00
if ( error )
break ;
2019-02-01 20:08:52 +03:00
imap . im_boffset + = mp - > m_sb . sb_inodesize ;
2019-02-01 20:08:51 +03:00
}
2019-02-01 20:08:51 +03:00
xfs_trans_brelse ( bs - > cur - > bc_tp , cluster_bp ) ;
2019-02-01 20:08:51 +03:00
return error ;
}
2019-02-01 20:08:51 +03:00
/*
* For all the inode clusters that could map to this inobt record , make sure
* that the holemask makes sense and that the allocation status of each inode
* matches the freemask .
*/
2019-02-01 20:08:51 +03:00
STATIC int
2019-02-01 20:08:51 +03:00
xchk_iallocbt_check_clusters (
2019-02-01 20:08:51 +03:00
struct xchk_btree * bs ,
struct xfs_inobt_rec_incore * irec )
{
2019-02-01 20:08:51 +03:00
unsigned int cluster_base ;
2019-02-01 20:08:51 +03:00
int error = 0 ;
2019-02-01 20:08:51 +03:00
/*
* For the common case where this inobt record maps to multiple inode
* clusters this will call _check_cluster for each cluster .
*
* For the case that multiple inobt records map to a single cluster ,
* this will call _check_cluster once .
*/
for ( cluster_base = 0 ;
cluster_base < XFS_INODES_PER_CHUNK ;
2019-06-05 21:19:34 +03:00
cluster_base + = M_IGEO ( bs - > sc - > mp ) - > inodes_per_cluster ) {
2019-02-01 20:08:51 +03:00
error = xchk_iallocbt_check_cluster ( bs , irec , cluster_base ) ;
2019-02-01 20:08:51 +03:00
if ( error )
break ;
2017-10-18 07:37:40 +03:00
}
return error ;
}
2019-02-01 20:08:50 +03:00
/*
* Make sure this inode btree record is aligned properly . Because a fs block
* contains multiple inodes , we check that the inobt record is aligned to the
* correct inode , not just the correct block on disk . This results in a finer
* grained corruption check .
*/
STATIC void
xchk_iallocbt_rec_alignment (
struct xchk_btree * bs ,
struct xfs_inobt_rec_incore * irec )
{
struct xfs_mount * mp = bs - > sc - > mp ;
2019-02-01 20:08:50 +03:00
struct xchk_iallocbt * iabt = bs - > private ;
2019-06-05 21:19:34 +03:00
struct xfs_ino_geometry * igeo = M_IGEO ( mp ) ;
2019-02-01 20:08:50 +03:00
/*
* finobt records have different positioning requirements than inobt
* records : each finobt record must have a corresponding inobt record .
* That is checked in the xref function , so for now we only catch the
* obvious case where the record isn ' t at all aligned properly .
*
* Note that if a fs block contains more than a single chunk of inodes ,
* we will have finobt records only for those chunks containing free
* inodes , and therefore expect chunk alignment of finobt records .
* Otherwise , we expect that the finobt record is aligned to the
* cluster alignment as told by the superblock .
*/
if ( bs - > cur - > bc_btnum = = XFS_BTNUM_FINO ) {
unsigned int imask ;
imask = min_t ( unsigned int , XFS_INODES_PER_CHUNK ,
2019-06-05 21:19:34 +03:00
igeo - > cluster_align_inodes ) - 1 ;
2019-02-01 20:08:50 +03:00
if ( irec - > ir_startino & imask )
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
return ;
}
2019-02-01 20:08:50 +03:00
if ( iabt - > next_startino ! = NULLAGINO ) {
/*
* We ' re midway through a cluster of inodes that is mapped by
* multiple inobt records . Did we get the record for the next
* irec in the sequence ?
*/
if ( irec - > ir_startino ! = iabt - > next_startino ) {
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
return ;
}
iabt - > next_startino + = XFS_INODES_PER_CHUNK ;
/* Are we done with the cluster? */
if ( iabt - > next_startino > = iabt - > next_cluster_ino ) {
iabt - > next_startino = NULLAGINO ;
iabt - > next_cluster_ino = NULLAGINO ;
}
return ;
}
2019-02-01 20:08:50 +03:00
/* inobt records must be aligned to cluster and inoalignmnt size. */
2019-06-05 21:19:34 +03:00
if ( irec - > ir_startino & ( igeo - > cluster_align_inodes - 1 ) ) {
2019-02-01 20:08:50 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
return ;
}
2019-06-05 21:19:34 +03:00
if ( irec - > ir_startino & ( igeo - > inodes_per_cluster - 1 ) ) {
2019-02-01 20:08:50 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
return ;
}
2019-02-01 20:08:50 +03:00
2019-06-05 21:19:34 +03:00
if ( igeo - > inodes_per_cluster < = XFS_INODES_PER_CHUNK )
2019-02-01 20:08:50 +03:00
return ;
/*
* If this is the start of an inode cluster that can be mapped by
* multiple inobt records , the next inobt record must follow exactly
* after this one .
*/
iabt - > next_startino = irec - > ir_startino + XFS_INODES_PER_CHUNK ;
2019-06-05 21:19:34 +03:00
iabt - > next_cluster_ino = irec - > ir_startino + igeo - > inodes_per_cluster ;
2019-02-01 20:08:50 +03:00
}
2017-10-18 07:37:40 +03:00
/* Scrub an inobt/finobt record. */
STATIC int
2018-07-19 22:29:11 +03:00
xchk_iallocbt_rec (
struct xchk_btree * bs ,
2021-08-11 03:02:17 +03:00
const union xfs_btree_rec * rec )
2017-10-18 07:37:40 +03:00
{
struct xfs_mount * mp = bs - > cur - > bc_mp ;
2018-12-12 19:46:26 +03:00
struct xchk_iallocbt * iabt = bs - > private ;
2017-10-18 07:37:40 +03:00
struct xfs_inobt_rec_incore irec ;
uint64_t holes ;
2021-06-02 03:48:24 +03:00
xfs_agnumber_t agno = bs - > cur - > bc_ag . pag - > pag_agno ;
2017-10-18 07:37:40 +03:00
xfs_agino_t agino ;
xfs_extlen_t len ;
int holecount ;
int i ;
int error = 0 ;
unsigned int real_freecount ;
uint16_t holemask ;
xfs_inobt_btrec_to_irec ( mp , rec , & irec ) ;
if ( irec . ir_count > XFS_INODES_PER_CHUNK | |
irec . ir_freecount > XFS_INODES_PER_CHUNK )
2018-07-19 22:29:11 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
2017-10-18 07:37:40 +03:00
real_freecount = irec . ir_freecount +
( XFS_INODES_PER_CHUNK - irec . ir_count ) ;
2018-07-19 22:29:11 +03:00
if ( real_freecount ! = xchk_iallocbt_freecount ( irec . ir_free ) )
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
2017-10-18 07:37:40 +03:00
agino = irec . ir_startino ;
/* Record has to be properly aligned within the AG. */
if ( ! xfs_verify_agino ( mp , agno , agino ) | |
! xfs_verify_agino ( mp , agno , agino + XFS_INODES_PER_CHUNK - 1 ) ) {
2018-07-19 22:29:11 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
2017-10-18 07:37:40 +03:00
goto out ;
}
2019-02-01 20:08:50 +03:00
xchk_iallocbt_rec_alignment ( bs , & irec ) ;
if ( bs - > sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
goto out ;
2017-10-18 07:37:40 +03:00
2018-12-12 19:46:26 +03:00
iabt - > inodes + = irec . ir_count ;
2018-01-17 05:53:08 +03:00
2017-10-18 07:37:40 +03:00
/* Handle non-sparse inodes */
if ( ! xfs_inobt_issparse ( irec . ir_holemask ) ) {
len = XFS_B_TO_FSB ( mp ,
XFS_INODES_PER_CHUNK * mp - > m_sb . sb_inodesize ) ;
if ( irec . ir_count ! = XFS_INODES_PER_CHUNK )
2018-07-19 22:29:11 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
2017-10-18 07:37:40 +03:00
2018-07-19 22:29:11 +03:00
if ( ! xchk_iallocbt_chunk ( bs , & irec , agino , len ) )
2017-10-18 07:37:40 +03:00
goto out ;
2019-02-01 20:08:51 +03:00
goto check_clusters ;
2017-10-18 07:37:40 +03:00
}
/* Check each chunk of a sparse inode cluster. */
holemask = irec . ir_holemask ;
holecount = 0 ;
len = XFS_B_TO_FSB ( mp ,
XFS_INODES_PER_HOLEMASK_BIT * mp - > m_sb . sb_inodesize ) ;
holes = ~ xfs_inobt_irec_to_allocmask ( & irec ) ;
if ( ( holes & irec . ir_free ) ! = holes | |
irec . ir_freecount > irec . ir_count )
2018-07-19 22:29:11 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
2017-10-18 07:37:40 +03:00
for ( i = 0 ; i < XFS_INOBT_HOLEMASK_BITS ; i + + ) {
if ( holemask & 1 )
holecount + = XFS_INODES_PER_HOLEMASK_BIT ;
2018-07-19 22:29:11 +03:00
else if ( ! xchk_iallocbt_chunk ( bs , & irec , agino , len ) )
2017-10-18 07:37:40 +03:00
break ;
holemask > > = 1 ;
agino + = XFS_INODES_PER_HOLEMASK_BIT ;
}
if ( holecount > XFS_INODES_PER_CHUNK | |
holecount + irec . ir_count ! = XFS_INODES_PER_CHUNK )
2018-07-19 22:29:11 +03:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
2017-10-18 07:37:40 +03:00
2019-02-01 20:08:51 +03:00
check_clusters :
error = xchk_iallocbt_check_clusters ( bs , & irec ) ;
2017-10-18 07:37:40 +03:00
if ( error )
goto out ;
out :
return error ;
}
2018-01-17 05:53:08 +03:00
/*
* Make sure the inode btrees are as large as the rmap thinks they are .
* Don ' t bother if we ' re missing btree cursors , as we ' re already corrupt .
*/
STATIC void
2018-07-19 22:29:11 +03:00
xchk_iallocbt_xref_rmap_btreeblks (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2018-07-19 22:29:12 +03:00
int which )
2018-01-17 05:53:08 +03:00
{
2018-07-19 22:29:12 +03:00
xfs_filblks_t blocks ;
xfs_extlen_t inobt_blocks = 0 ;
xfs_extlen_t finobt_blocks = 0 ;
int error ;
2018-01-17 05:53:08 +03:00
if ( ! sc - > sa . ino_cur | | ! sc - > sa . rmap_cur | |
2021-08-19 04:46:37 +03:00
( xfs_has_finobt ( sc - > mp ) & & ! sc - > sa . fino_cur ) | |
2018-07-19 22:29:11 +03:00
xchk_skip_xref ( sc - > sm ) )
2018-01-17 05:53:08 +03:00
return ;
/* Check that we saw as many inobt blocks as the rmap says. */
error = xfs_btree_count_blocks ( sc - > sa . ino_cur , & inobt_blocks ) ;
2018-07-19 22:29:11 +03:00
if ( ! xchk_process_error ( sc , 0 , 0 , & error ) )
2018-01-17 05:53:08 +03:00
return ;
if ( sc - > sa . fino_cur ) {
error = xfs_btree_count_blocks ( sc - > sa . fino_cur , & finobt_blocks ) ;
2018-07-19 22:29:11 +03:00
if ( ! xchk_process_error ( sc , 0 , 0 , & error ) )
2018-01-17 05:53:08 +03:00
return ;
}
2018-12-12 19:46:23 +03:00
error = xchk_count_rmap_ownedby_ag ( sc , sc - > sa . rmap_cur ,
& XFS_RMAP_OINFO_INOBT , & blocks ) ;
2018-07-19 22:29:11 +03:00
if ( ! xchk_should_check_xref ( sc , & error , & sc - > sa . rmap_cur ) )
2018-01-17 05:53:08 +03:00
return ;
if ( blocks ! = inobt_blocks + finobt_blocks )
2018-07-19 22:29:11 +03:00
xchk_btree_set_corrupt ( sc , sc - > sa . ino_cur , 0 ) ;
2018-01-17 05:53:08 +03:00
}
/*
* Make sure that the inobt records point to the same number of blocks as
* the rmap says are owned by inodes .
*/
STATIC void
2018-07-19 22:29:11 +03:00
xchk_iallocbt_xref_rmap_inodes (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2018-07-19 22:29:12 +03:00
int which ,
2018-12-12 19:46:26 +03:00
unsigned long long inodes )
2018-01-17 05:53:08 +03:00
{
2018-07-19 22:29:12 +03:00
xfs_filblks_t blocks ;
2018-12-12 19:46:26 +03:00
xfs_filblks_t inode_blocks ;
2018-07-19 22:29:12 +03:00
int error ;
2018-01-17 05:53:08 +03:00
2018-07-19 22:29:11 +03:00
if ( ! sc - > sa . rmap_cur | | xchk_skip_xref ( sc - > sm ) )
2018-01-17 05:53:08 +03:00
return ;
/* Check that we saw as many inode blocks as the rmap knows about. */
2018-12-12 19:46:23 +03:00
error = xchk_count_rmap_ownedby_ag ( sc , sc - > sa . rmap_cur ,
& XFS_RMAP_OINFO_INODES , & blocks ) ;
2018-07-19 22:29:11 +03:00
if ( ! xchk_should_check_xref ( sc , & error , & sc - > sa . rmap_cur ) )
2018-01-17 05:53:08 +03:00
return ;
2018-12-12 19:46:26 +03:00
inode_blocks = XFS_B_TO_FSB ( sc - > mp , inodes * sc - > mp - > m_sb . sb_inodesize ) ;
2018-01-17 05:53:08 +03:00
if ( blocks ! = inode_blocks )
2018-07-19 22:29:11 +03:00
xchk_btree_xref_set_corrupt ( sc , sc - > sa . rmap_cur , 0 ) ;
2018-01-17 05:53:08 +03:00
}
2017-10-18 07:37:40 +03:00
/* Scrub the inode btrees for some AG. */
STATIC int
2018-07-19 22:29:11 +03:00
xchk_iallocbt (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2018-07-19 22:29:12 +03:00
xfs_btnum_t which )
2017-10-18 07:37:40 +03:00
{
2018-07-19 22:29:12 +03:00
struct xfs_btree_cur * cur ;
2018-12-12 19:46:26 +03:00
struct xchk_iallocbt iabt = {
. inodes = 0 ,
2019-02-01 20:08:50 +03:00
. next_startino = NULLAGINO ,
. next_cluster_ino = NULLAGINO ,
2018-12-12 19:46:26 +03:00
} ;
2018-07-19 22:29:12 +03:00
int error ;
2017-10-18 07:37:40 +03:00
cur = which = = XFS_BTNUM_INO ? sc - > sa . ino_cur : sc - > sa . fino_cur ;
2018-12-12 19:46:23 +03:00
error = xchk_btree ( sc , cur , xchk_iallocbt_rec , & XFS_RMAP_OINFO_INOBT ,
2018-12-12 19:46:26 +03:00
& iabt ) ;
2018-01-17 05:53:08 +03:00
if ( error )
return error ;
2018-07-19 22:29:11 +03:00
xchk_iallocbt_xref_rmap_btreeblks ( sc , which ) ;
2018-01-17 05:53:08 +03:00
/*
* If we ' re scrubbing the inode btree , inode_blocks is the number of
* blocks pointed to by all the inode chunk records . Therefore , we
* should compare to the number of inode chunk blocks that the rmap
* knows about . We can ' t do this for the finobt since it only points
* to inode chunks with free inodes .
*/
if ( which = = XFS_BTNUM_INO )
2018-12-12 19:46:26 +03:00
xchk_iallocbt_xref_rmap_inodes ( sc , which , iabt . inodes ) ;
2018-01-17 05:53:08 +03:00
return error ;
2017-10-18 07:37:40 +03:00
}
int
2018-07-19 22:29:11 +03:00
xchk_inobt (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc )
2017-10-18 07:37:40 +03:00
{
2018-07-19 22:29:11 +03:00
return xchk_iallocbt ( sc , XFS_BTNUM_INO ) ;
2017-10-18 07:37:40 +03:00
}
int
2018-07-19 22:29:11 +03:00
xchk_finobt (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc )
2017-10-18 07:37:40 +03:00
{
2018-07-19 22:29:11 +03:00
return xchk_iallocbt ( sc , XFS_BTNUM_FINO ) ;
2017-10-18 07:37:40 +03:00
}
2018-01-17 05:53:07 +03:00
/* See if an inode btree has (or doesn't have) an inode chunk record. */
static inline void
2018-07-19 22:29:11 +03:00
xchk_xref_inode_check (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2018-07-19 22:29:12 +03:00
xfs_agblock_t agbno ,
xfs_extlen_t len ,
struct xfs_btree_cur * * icur ,
bool should_have_inodes )
2018-01-17 05:53:07 +03:00
{
2018-07-19 22:29:12 +03:00
bool has_inodes ;
int error ;
2018-01-17 05:53:07 +03:00
2018-07-19 22:29:11 +03:00
if ( ! ( * icur ) | | xchk_skip_xref ( sc - > sm ) )
2018-01-17 05:53:07 +03:00
return ;
error = xfs_ialloc_has_inodes_at_extent ( * icur , agbno , len , & has_inodes ) ;
2018-07-19 22:29:11 +03:00
if ( ! xchk_should_check_xref ( sc , & error , icur ) )
2018-01-17 05:53:07 +03:00
return ;
if ( has_inodes ! = should_have_inodes )
2018-07-19 22:29:11 +03:00
xchk_btree_xref_set_corrupt ( sc , * icur , 0 ) ;
2018-01-17 05:53:07 +03:00
}
/* xref check that the extent is not covered by inodes */
void
2018-07-19 22:29:11 +03:00
xchk_xref_is_not_inode_chunk (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2018-07-19 22:29:12 +03:00
xfs_agblock_t agbno ,
xfs_extlen_t len )
2018-01-17 05:53:07 +03:00
{
2018-07-19 22:29:11 +03:00
xchk_xref_inode_check ( sc , agbno , len , & sc - > sa . ino_cur , false ) ;
xchk_xref_inode_check ( sc , agbno , len , & sc - > sa . fino_cur , false ) ;
2018-01-17 05:53:07 +03:00
}
/* xref check that the extent is covered by inodes */
void
2018-07-19 22:29:11 +03:00
xchk_xref_is_inode_chunk (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2018-07-19 22:29:12 +03:00
xfs_agblock_t agbno ,
xfs_extlen_t len )
2018-01-17 05:53:07 +03:00
{
2018-07-19 22:29:11 +03:00
xchk_xref_inode_check ( sc , agbno , len , & sc - > sa . ino_cur , true ) ;
2018-01-17 05:53:07 +03:00
}