2023-04-11 18:59:56 -07:00
// SPDX-License-Identifier: GPL-2.0-or-later
2017-10-17 21:37:41 -07:00
/*
2023-04-11 18:59:57 -07:00
* Copyright ( C ) 2017 - 2023 Oracle . All Rights Reserved .
2023-04-11 18:59:56 -07:00
* Author : Darrick J . Wong < djwong @ kernel . org >
2017-10-17 21:37:41 -07:00
*/
# include "xfs.h"
# include "xfs_fs.h"
# include "xfs_shared.h"
# include "xfs_format.h"
2023-04-11 19:00:38 -07:00
# include "xfs_log_format.h"
2017-10-17 21:37:41 -07:00
# include "xfs_trans_resv.h"
# include "xfs_mount.h"
2023-04-11 19:00:38 -07:00
# include "xfs_trans.h"
2017-10-17 21:37:41 -07:00
# include "xfs_btree.h"
# include "xfs_rmap.h"
2018-01-16 18:53:09 -08:00
# include "xfs_refcount.h"
2023-04-11 19:00:38 -07:00
# include "xfs_ag.h"
# include "xfs_bit.h"
2023-04-11 19:00:38 -07:00
# include "xfs_alloc.h"
# include "xfs_alloc_btree.h"
2023-04-11 19:00:39 -07:00
# include "xfs_ialloc_btree.h"
2023-04-11 19:00:39 -07:00
# include "xfs_refcount_btree.h"
2017-10-17 21:37:41 -07:00
# include "scrub/scrub.h"
# include "scrub/common.h"
# include "scrub/btree.h"
2023-04-11 19:00:38 -07:00
# include "scrub/bitmap.h"
2023-12-15 10:03:30 -08:00
# include "scrub/agb_bitmap.h"
2024-02-22 12:43:38 -08:00
# include "scrub/repair.h"
2017-10-17 21:37:41 -07:00
/*
* Set us up to scrub reverse mapping btrees .
*/
int
2018-07-19 12:29:11 -07:00
xchk_setup_ag_rmapbt (
2021-04-07 17:59:39 -07:00
struct xfs_scrub * sc )
2017-10-17 21:37:41 -07:00
{
2023-04-11 18:59:59 -07:00
if ( xchk_need_intent_drain ( sc ) )
xchk_fsgates_enable ( sc , XCHK_FSGATES_DRAIN ) ;
2024-02-22 12:43:38 -08:00
if ( xchk_could_repair ( sc ) ) {
int error ;
error = xrep_setup_ag_rmapbt ( sc ) ;
if ( error )
return error ;
}
2021-04-07 17:59:39 -07:00
return xchk_setup_ag_btree ( sc , false ) ;
2017-10-17 21:37:41 -07:00
}
/* Reverse-mapping scrubber. */
2023-04-11 19:00:27 -07:00
struct xchk_rmap {
/*
* The furthest - reaching of the rmapbt records that we ' ve already
* processed . This enables us to detect overlapping records for space
* allocations that cannot be shared .
*/
struct xfs_rmap_irec overlap_rec ;
2023-04-11 19:00:28 -07:00
/*
* The previous rmapbt record , so that we can check for two records
* that could be one .
*/
struct xfs_rmap_irec prev_rec ;
2023-04-11 19:00:38 -07:00
/* Bitmaps containing all blocks for each type of AG metadata. */
struct xagb_bitmap fs_owned ;
struct xagb_bitmap log_owned ;
2023-04-11 19:00:38 -07:00
struct xagb_bitmap ag_owned ;
2023-04-11 19:00:39 -07:00
struct xagb_bitmap inobt_owned ;
2023-04-11 19:00:39 -07:00
struct xagb_bitmap refcbt_owned ;
2023-04-11 19:00:38 -07:00
/* Did we complete the AG space metadata bitmaps? */
bool bitmaps_complete ;
2023-04-11 19:00:27 -07:00
} ;
2018-01-16 18:53:09 -08:00
/* Cross-reference a rmap against the refcount btree. */
STATIC void
2018-07-19 12:29:11 -07:00
xchk_rmapbt_xref_refc (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc ,
2018-07-19 12:29:12 -07:00
struct xfs_rmap_irec * irec )
2018-01-16 18:53:09 -08:00
{
2018-07-19 12:29:12 -07:00
xfs_agblock_t fbno ;
xfs_extlen_t flen ;
bool non_inode ;
bool is_bmbt ;
bool is_attr ;
bool is_unwritten ;
int error ;
2018-01-16 18:53:09 -08:00
2018-07-19 12:29:11 -07:00
if ( ! sc - > sa . refc_cur | | xchk_skip_xref ( sc - > sm ) )
2018-01-16 18:53:09 -08:00
return ;
non_inode = XFS_RMAP_NON_INODE_OWNER ( irec - > rm_owner ) ;
is_bmbt = irec - > rm_flags & XFS_RMAP_BMBT_BLOCK ;
is_attr = irec - > rm_flags & XFS_RMAP_ATTR_FORK ;
is_unwritten = irec - > rm_flags & XFS_RMAP_UNWRITTEN ;
/* If this is shared, must be a data fork extent. */
error = xfs_refcount_find_shared ( sc - > sa . refc_cur , irec - > rm_startblock ,
irec - > rm_blockcount , & fbno , & flen , false ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_should_check_xref ( sc , & error , & sc - > sa . refc_cur ) )
2018-01-16 18:53:09 -08:00
return ;
if ( flen ! = 0 & & ( non_inode | | is_attr | | is_bmbt | | is_unwritten ) )
2018-07-19 12:29:11 -07:00
xchk_btree_xref_set_corrupt ( sc , sc - > sa . refc_cur , 0 ) ;
2018-01-16 18:53:09 -08:00
}
2018-01-16 18:53:05 -08:00
/* Cross-reference with the other btrees. */
STATIC void
2018-07-19 12:29:11 -07:00
xchk_rmapbt_xref (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc ,
2018-07-19 12:29:12 -07:00
struct xfs_rmap_irec * irec )
2018-01-16 18:53:05 -08:00
{
2018-07-19 12:29:12 -07:00
xfs_agblock_t agbno = irec - > rm_startblock ;
xfs_extlen_t len = irec - > rm_blockcount ;
2018-01-16 18:53:06 -08:00
2018-01-16 18:53:05 -08:00
if ( sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
return ;
2018-01-16 18:53:06 -08:00
2018-07-19 12:29:11 -07:00
xchk_xref_is_used_space ( sc , agbno , len ) ;
2018-01-16 18:53:07 -08:00
if ( irec - > rm_owner = = XFS_RMAP_OWN_INODES )
2018-07-19 12:29:11 -07:00
xchk_xref_is_inode_chunk ( sc , agbno , len ) ;
2018-01-16 18:53:07 -08:00
else
2018-07-19 12:29:11 -07:00
xchk_xref_is_not_inode_chunk ( sc , agbno , len ) ;
2018-01-16 18:53:09 -08:00
if ( irec - > rm_owner = = XFS_RMAP_OWN_COW )
2018-07-19 12:29:11 -07:00
xchk_xref_is_cow_staging ( sc , irec - > rm_startblock ,
2018-01-16 18:53:09 -08:00
irec - > rm_blockcount ) ;
else
2018-07-19 12:29:11 -07:00
xchk_rmapbt_xref_refc ( sc , irec ) ;
2018-01-16 18:53:05 -08:00
}
2023-04-11 19:00:07 -07:00
/*
* Check for bogus UNWRITTEN flags in the rmapbt node block keys .
*
* In reverse mapping records , the file mapping extent state
* ( XFS_RMAP_OFF_UNWRITTEN ) is a record attribute , not a key field . It is not
* involved in lookups in any way . In older kernels , the functions that
* convert rmapbt records to keys forgot to filter out the extent state bit ,
* even though the key comparison functions have filtered the flag correctly .
* If we spot an rmap key with the unwritten bit set in rm_offset , we should
* mark the btree as needing optimization to rebuild the btree without those
* flags .
*/
STATIC void
xchk_rmapbt_check_unwritten_in_keyflags (
struct xchk_btree * bs )
{
struct xfs_scrub * sc = bs - > sc ;
struct xfs_btree_cur * cur = bs - > cur ;
struct xfs_btree_block * keyblock ;
union xfs_btree_key * lkey , * hkey ;
__be64 badflag = cpu_to_be64 ( XFS_RMAP_OFF_UNWRITTEN ) ;
unsigned int level ;
if ( sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_PREEN )
return ;
for ( level = 1 ; level < cur - > bc_nlevels ; level + + ) {
struct xfs_buf * bp ;
unsigned int ptr ;
/* Only check the first time we've seen this node block. */
if ( cur - > bc_levels [ level ] . ptr > 1 )
continue ;
keyblock = xfs_btree_get_block ( cur , level , & bp ) ;
for ( ptr = 1 ; ptr < = be16_to_cpu ( keyblock - > bb_numrecs ) ; ptr + + ) {
lkey = xfs_btree_key_addr ( cur , ptr , keyblock ) ;
if ( lkey - > rmap . rm_offset & badflag ) {
xchk_btree_set_preen ( sc , cur , level ) ;
break ;
}
hkey = xfs_btree_high_key_addr ( cur , ptr , keyblock ) ;
if ( hkey - > rmap . rm_offset & badflag ) {
xchk_btree_set_preen ( sc , cur , level ) ;
break ;
}
}
}
}
2023-04-11 19:00:27 -07:00
static inline bool
xchk_rmapbt_is_shareable (
struct xfs_scrub * sc ,
const struct xfs_rmap_irec * irec )
{
if ( ! xfs_has_reflink ( sc - > mp ) )
return false ;
if ( XFS_RMAP_NON_INODE_OWNER ( irec - > rm_owner ) )
return false ;
if ( irec - > rm_flags & ( XFS_RMAP_BMBT_BLOCK | XFS_RMAP_ATTR_FORK |
XFS_RMAP_UNWRITTEN ) )
return false ;
return true ;
}
/* Flag failures for records that overlap but cannot. */
STATIC void
xchk_rmapbt_check_overlapping (
struct xchk_btree * bs ,
struct xchk_rmap * cr ,
const struct xfs_rmap_irec * irec )
{
xfs_agblock_t pnext , inext ;
if ( bs - > sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
return ;
/* No previous record? */
if ( cr - > overlap_rec . rm_blockcount = = 0 )
goto set_prev ;
/* Do overlap_rec and irec overlap? */
pnext = cr - > overlap_rec . rm_startblock + cr - > overlap_rec . rm_blockcount ;
if ( pnext < = irec - > rm_startblock )
goto set_prev ;
/* Overlap is only allowed if both records are data fork mappings. */
if ( ! xchk_rmapbt_is_shareable ( bs - > sc , & cr - > overlap_rec ) | |
! xchk_rmapbt_is_shareable ( bs - > sc , irec ) )
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
/* Save whichever rmap record extends furthest. */
inext = irec - > rm_startblock + irec - > rm_blockcount ;
if ( pnext > inext )
return ;
set_prev :
memcpy ( & cr - > overlap_rec , irec , sizeof ( struct xfs_rmap_irec ) ) ;
}
2023-04-11 19:00:28 -07:00
/* Decide if two reverse-mapping records can be merged. */
static inline bool
xchk_rmap_mergeable (
struct xchk_rmap * cr ,
const struct xfs_rmap_irec * r2 )
{
const struct xfs_rmap_irec * r1 = & cr - > prev_rec ;
/* Ignore if prev_rec is not yet initialized. */
if ( cr - > prev_rec . rm_blockcount = = 0 )
return false ;
if ( r1 - > rm_owner ! = r2 - > rm_owner )
return false ;
if ( r1 - > rm_startblock + r1 - > rm_blockcount ! = r2 - > rm_startblock )
return false ;
if ( ( unsigned long long ) r1 - > rm_blockcount + r2 - > rm_blockcount >
XFS_RMAP_LEN_MAX )
return false ;
if ( XFS_RMAP_NON_INODE_OWNER ( r2 - > rm_owner ) )
return true ;
/* must be an inode owner below here */
if ( r1 - > rm_flags ! = r2 - > rm_flags )
return false ;
if ( r1 - > rm_flags & XFS_RMAP_BMBT_BLOCK )
return true ;
return r1 - > rm_offset + r1 - > rm_blockcount = = r2 - > rm_offset ;
}
/* Flag failures for records that could be merged. */
STATIC void
xchk_rmapbt_check_mergeable (
struct xchk_btree * bs ,
struct xchk_rmap * cr ,
const struct xfs_rmap_irec * irec )
{
if ( bs - > sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
return ;
if ( xchk_rmap_mergeable ( cr , irec ) )
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
memcpy ( & cr - > prev_rec , irec , sizeof ( struct xfs_rmap_irec ) ) ;
}
2023-04-11 19:00:38 -07:00
/* Compare an rmap for AG metadata against the metadata walk. */
STATIC int
xchk_rmapbt_mark_bitmap (
struct xchk_btree * bs ,
struct xchk_rmap * cr ,
const struct xfs_rmap_irec * irec )
{
struct xfs_scrub * sc = bs - > sc ;
struct xagb_bitmap * bmp = NULL ;
xfs_extlen_t fsbcount = irec - > rm_blockcount ;
/*
* Skip corrupt records . It is essential that we detect records in the
* btree that cannot overlap but do , flag those as CORRUPT , and skip
* the bitmap comparison to avoid generating false XCORRUPT reports .
*/
if ( sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
return 0 ;
/*
* If the AG metadata walk didn ' t complete , there ' s no point in
* comparing against partial results .
*/
if ( ! cr - > bitmaps_complete )
return 0 ;
switch ( irec - > rm_owner ) {
case XFS_RMAP_OWN_FS :
bmp = & cr - > fs_owned ;
break ;
case XFS_RMAP_OWN_LOG :
bmp = & cr - > log_owned ;
break ;
2023-04-11 19:00:38 -07:00
case XFS_RMAP_OWN_AG :
bmp = & cr - > ag_owned ;
break ;
2023-04-11 19:00:39 -07:00
case XFS_RMAP_OWN_INOBT :
bmp = & cr - > inobt_owned ;
break ;
2023-04-11 19:00:39 -07:00
case XFS_RMAP_OWN_REFC :
bmp = & cr - > refcbt_owned ;
break ;
2023-04-11 19:00:38 -07:00
}
if ( ! bmp )
return 0 ;
if ( xagb_bitmap_test ( bmp , irec - > rm_startblock , & fsbcount ) ) {
/*
* The start of this reverse mapping corresponds to a set
* region in the bitmap . If the mapping covers more area than
* the set region , then it covers space that wasn ' t found by
* the AG metadata walk .
*/
if ( fsbcount < irec - > rm_blockcount )
xchk_btree_xref_set_corrupt ( bs - > sc ,
bs - > sc - > sa . rmap_cur , 0 ) ;
} else {
/*
* The start of this reverse mapping does not correspond to a
* completely set region in the bitmap . The region wasn ' t
* fully set by walking the AG metadata , so this is a
* cross - referencing corruption .
*/
xchk_btree_xref_set_corrupt ( bs - > sc , bs - > sc - > sa . rmap_cur , 0 ) ;
}
/* Unset the region so that we can detect missing rmap records. */
return xagb_bitmap_clear ( bmp , irec - > rm_startblock , irec - > rm_blockcount ) ;
}
2017-10-17 21:37:41 -07:00
/* Scrub an rmapbt record. */
STATIC int
2018-07-19 12:29:11 -07:00
xchk_rmapbt_rec (
2018-07-19 12:29:12 -07:00
struct xchk_btree * bs ,
2021-08-10 17:02:17 -07:00
const union xfs_btree_rec * rec )
2017-10-17 21:37:41 -07:00
{
2023-04-11 19:00:27 -07:00
struct xchk_rmap * cr = bs - > private ;
2018-07-19 12:29:12 -07:00
struct xfs_rmap_irec irec ;
2017-10-17 21:37:41 -07:00
2023-04-11 19:00:03 -07:00
if ( xfs_rmap_btrec_to_irec ( rec , & irec ) ! = NULL | |
2024-02-22 12:43:38 -08:00
xfs_rmap_check_irec ( bs - > cur - > bc_ag . pag , & irec ) ! = NULL ) {
2023-04-11 19:00:02 -07:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , 0 ) ;
return 0 ;
}
2017-10-17 21:37:41 -07:00
2023-04-11 19:00:07 -07:00
xchk_rmapbt_check_unwritten_in_keyflags ( bs ) ;
2023-04-11 19:00:28 -07:00
xchk_rmapbt_check_mergeable ( bs , cr , & irec ) ;
2023-04-11 19:00:27 -07:00
xchk_rmapbt_check_overlapping ( bs , cr , & irec ) ;
2018-07-19 12:29:11 -07:00
xchk_rmapbt_xref ( bs - > sc , & irec ) ;
2023-04-11 19:00:38 -07:00
return xchk_rmapbt_mark_bitmap ( bs , cr , & irec ) ;
}
2023-04-11 19:00:38 -07:00
/* Add an AGFL block to the rmap list. */
STATIC int
xchk_rmapbt_walk_agfl (
struct xfs_mount * mp ,
xfs_agblock_t agbno ,
void * priv )
{
struct xagb_bitmap * bitmap = priv ;
return xagb_bitmap_set ( bitmap , agbno , 1 ) ;
}
2023-04-11 19:00:38 -07:00
/*
* Set up bitmaps mapping all the AG metadata to compare with the rmapbt
* records .
2023-04-11 19:00:38 -07:00
*
* Grab our own btree cursors here if the scrub setup function didn ' t give us a
* btree cursor due to reports of poor health . We need to find out if the
* rmapbt disagrees with primary metadata btrees to tag the rmapbt as being
* XCORRUPT .
2023-04-11 19:00:38 -07:00
*/
STATIC int
xchk_rmapbt_walk_ag_metadata (
struct xfs_scrub * sc ,
struct xchk_rmap * cr )
{
struct xfs_mount * mp = sc - > mp ;
2023-04-11 19:00:38 -07:00
struct xfs_buf * agfl_bp ;
struct xfs_agf * agf = sc - > sa . agf_bp - > b_addr ;
struct xfs_btree_cur * cur ;
2023-04-11 19:00:38 -07:00
int error ;
/* OWN_FS: AG headers */
error = xagb_bitmap_set ( & cr - > fs_owned , XFS_SB_BLOCK ( mp ) ,
XFS_AGFL_BLOCK ( mp ) - XFS_SB_BLOCK ( mp ) + 1 ) ;
if ( error )
goto out ;
/* OWN_LOG: Internal log */
if ( xfs_ag_contains_log ( mp , sc - > sa . pag - > pag_agno ) ) {
error = xagb_bitmap_set ( & cr - > log_owned ,
XFS_FSB_TO_AGBNO ( mp , mp - > m_sb . sb_logstart ) ,
mp - > m_sb . sb_logblocks ) ;
if ( error )
goto out ;
}
2023-04-11 19:00:38 -07:00
/* OWN_AG: bnobt, cntbt, rmapbt, and AGFL */
cur = sc - > sa . bno_cur ;
if ( ! cur )
2024-02-22 12:40:12 -08:00
cur = xfs_bnobt_init_cursor ( sc - > mp , sc - > tp , sc - > sa . agf_bp ,
sc - > sa . pag ) ;
2023-04-11 19:00:38 -07:00
error = xagb_bitmap_set_btblocks ( & cr - > ag_owned , cur ) ;
if ( cur ! = sc - > sa . bno_cur )
xfs_btree_del_cursor ( cur , error ) ;
if ( error )
goto out ;
cur = sc - > sa . cnt_cur ;
if ( ! cur )
2024-02-22 12:40:12 -08:00
cur = xfs_cntbt_init_cursor ( sc - > mp , sc - > tp , sc - > sa . agf_bp ,
sc - > sa . pag ) ;
2023-04-11 19:00:38 -07:00
error = xagb_bitmap_set_btblocks ( & cr - > ag_owned , cur ) ;
if ( cur ! = sc - > sa . cnt_cur )
xfs_btree_del_cursor ( cur , error ) ;
if ( error )
goto out ;
error = xagb_bitmap_set_btblocks ( & cr - > ag_owned , sc - > sa . rmap_cur ) ;
if ( error )
goto out ;
error = xfs_alloc_read_agfl ( sc - > sa . pag , sc - > tp , & agfl_bp ) ;
if ( error )
goto out ;
error = xfs_agfl_walk ( sc - > mp , agf , agfl_bp , xchk_rmapbt_walk_agfl ,
& cr - > ag_owned ) ;
xfs_trans_brelse ( sc - > tp , agfl_bp ) ;
2023-04-11 19:00:39 -07:00
if ( error )
goto out ;
/* OWN_INOBT: inobt, finobt */
cur = sc - > sa . ino_cur ;
if ( ! cur )
2024-02-22 12:40:49 -08:00
cur = xfs_inobt_init_cursor ( sc - > sa . pag , sc - > tp , sc - > sa . agi_bp ) ;
2023-04-11 19:00:39 -07:00
error = xagb_bitmap_set_btblocks ( & cr - > inobt_owned , cur ) ;
if ( cur ! = sc - > sa . ino_cur )
xfs_btree_del_cursor ( cur , error ) ;
if ( error )
goto out ;
if ( xfs_has_finobt ( sc - > mp ) ) {
cur = sc - > sa . fino_cur ;
if ( ! cur )
2024-02-22 12:40:49 -08:00
cur = xfs_finobt_init_cursor ( sc - > sa . pag , sc - > tp ,
sc - > sa . agi_bp ) ;
2023-04-11 19:00:39 -07:00
error = xagb_bitmap_set_btblocks ( & cr - > inobt_owned , cur ) ;
if ( cur ! = sc - > sa . fino_cur )
xfs_btree_del_cursor ( cur , error ) ;
if ( error )
goto out ;
}
2023-04-11 19:00:38 -07:00
2023-04-11 19:00:39 -07:00
/* OWN_REFC: refcountbt */
if ( xfs_has_reflink ( sc - > mp ) ) {
cur = sc - > sa . refc_cur ;
if ( ! cur )
cur = xfs_refcountbt_init_cursor ( sc - > mp , sc - > tp ,
sc - > sa . agf_bp , sc - > sa . pag ) ;
error = xagb_bitmap_set_btblocks ( & cr - > refcbt_owned , cur ) ;
if ( cur ! = sc - > sa . refc_cur )
xfs_btree_del_cursor ( cur , error ) ;
if ( error )
goto out ;
}
2023-04-11 19:00:38 -07:00
out :
/*
* If there ' s an error , set XFAIL and disable the bitmap
* cross - referencing checks , but proceed with the scrub anyway .
*/
if ( error )
xchk_btree_xref_process_error ( sc , sc - > sa . rmap_cur ,
sc - > sa . rmap_cur - > bc_nlevels - 1 , & error ) ;
else
cr - > bitmaps_complete = true ;
2023-04-11 19:00:02 -07:00
return 0 ;
2017-10-17 21:37:41 -07:00
}
2023-04-11 19:00:38 -07:00
/*
* Check for set regions in the bitmaps ; if there are any , the rmap records do
* not describe all the AG metadata .
*/
STATIC void
xchk_rmapbt_check_bitmaps (
struct xfs_scrub * sc ,
struct xchk_rmap * cr )
{
struct xfs_btree_cur * cur = sc - > sa . rmap_cur ;
unsigned int level ;
if ( sc - > sm - > sm_flags & ( XFS_SCRUB_OFLAG_CORRUPT |
XFS_SCRUB_OFLAG_XFAIL ) )
return ;
if ( ! cur )
return ;
level = cur - > bc_nlevels - 1 ;
/*
* Any bitmap with bits still set indicates that the reverse mapping
* doesn ' t cover the entire primary structure .
*/
if ( xagb_bitmap_hweight ( & cr - > fs_owned ) ! = 0 )
xchk_btree_xref_set_corrupt ( sc , cur , level ) ;
if ( xagb_bitmap_hweight ( & cr - > log_owned ) ! = 0 )
xchk_btree_xref_set_corrupt ( sc , cur , level ) ;
2023-04-11 19:00:38 -07:00
if ( xagb_bitmap_hweight ( & cr - > ag_owned ) ! = 0 )
xchk_btree_xref_set_corrupt ( sc , cur , level ) ;
2023-04-11 19:00:39 -07:00
if ( xagb_bitmap_hweight ( & cr - > inobt_owned ) ! = 0 )
xchk_btree_xref_set_corrupt ( sc , cur , level ) ;
2023-04-11 19:00:39 -07:00
if ( xagb_bitmap_hweight ( & cr - > refcbt_owned ) ! = 0 )
xchk_btree_xref_set_corrupt ( sc , cur , level ) ;
2023-04-11 19:00:38 -07:00
}
2017-10-17 21:37:41 -07:00
/* Scrub the rmap btree for some AG. */
int
2018-07-19 12:29:11 -07:00
xchk_rmapbt (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc )
2017-10-17 21:37:41 -07:00
{
2023-04-11 19:00:27 -07:00
struct xchk_rmap * cr ;
int error ;
cr = kzalloc ( sizeof ( struct xchk_rmap ) , XCHK_GFP_FLAGS ) ;
if ( ! cr )
return - ENOMEM ;
2023-04-11 19:00:38 -07:00
xagb_bitmap_init ( & cr - > fs_owned ) ;
xagb_bitmap_init ( & cr - > log_owned ) ;
2023-04-11 19:00:38 -07:00
xagb_bitmap_init ( & cr - > ag_owned ) ;
2023-04-11 19:00:39 -07:00
xagb_bitmap_init ( & cr - > inobt_owned ) ;
2023-04-11 19:00:39 -07:00
xagb_bitmap_init ( & cr - > refcbt_owned ) ;
2023-04-11 19:00:38 -07:00
error = xchk_rmapbt_walk_ag_metadata ( sc , cr ) ;
if ( error )
goto out ;
2023-04-11 19:00:27 -07:00
error = xchk_btree ( sc , sc - > sa . rmap_cur , xchk_rmapbt_rec ,
& XFS_RMAP_OINFO_AG , cr ) ;
2023-04-11 19:00:38 -07:00
if ( error )
goto out ;
xchk_rmapbt_check_bitmaps ( sc , cr ) ;
out :
2023-04-11 19:00:39 -07:00
xagb_bitmap_destroy ( & cr - > refcbt_owned ) ;
2023-04-11 19:00:39 -07:00
xagb_bitmap_destroy ( & cr - > inobt_owned ) ;
2023-04-11 19:00:38 -07:00
xagb_bitmap_destroy ( & cr - > ag_owned ) ;
2023-04-11 19:00:38 -07:00
xagb_bitmap_destroy ( & cr - > log_owned ) ;
xagb_bitmap_destroy ( & cr - > fs_owned ) ;
2023-04-11 19:00:27 -07:00
kfree ( cr ) ;
return error ;
2017-10-17 21:37:41 -07:00
}
2018-01-16 18:53:08 -08:00
2023-04-11 19:00:15 -07:00
/* xref check that the extent is owned only by a given owner */
void
xchk_xref_is_only_owned_by (
2018-12-12 08:46:23 -08:00
struct xfs_scrub * sc ,
xfs_agblock_t bno ,
xfs_extlen_t len ,
2023-04-11 19:00:15 -07:00
const struct xfs_owner_info * oinfo )
2018-01-16 18:53:08 -08:00
{
2023-04-11 19:00:15 -07:00
struct xfs_rmap_matches res ;
2018-12-12 08:46:23 -08:00
int error ;
2018-01-16 18:53:08 -08:00
2018-07-19 12:29:11 -07:00
if ( ! sc - > sa . rmap_cur | | xchk_skip_xref ( sc - > sm ) )
2018-01-16 18:53:08 -08:00
return ;
2023-04-11 19:00:15 -07:00
error = xfs_rmap_count_owners ( sc - > sa . rmap_cur , bno , len , oinfo , & res ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_should_check_xref ( sc , & error , & sc - > sa . rmap_cur ) )
2018-01-16 18:53:08 -08:00
return ;
2023-04-11 19:00:15 -07:00
if ( res . matches ! = 1 )
xchk_btree_xref_set_corrupt ( sc , sc - > sa . rmap_cur , 0 ) ;
if ( res . bad_non_owner_matches )
xchk_btree_xref_set_corrupt ( sc , sc - > sa . rmap_cur , 0 ) ;
if ( res . non_owner_matches )
2018-07-19 12:29:11 -07:00
xchk_btree_xref_set_corrupt ( sc , sc - > sa . rmap_cur , 0 ) ;
2018-01-16 18:53:08 -08:00
}
/* xref check that the extent is not owned by a given owner */
void
2018-07-19 12:29:11 -07:00
xchk_xref_is_not_owned_by (
2018-12-12 08:46:23 -08:00
struct xfs_scrub * sc ,
xfs_agblock_t bno ,
xfs_extlen_t len ,
const struct xfs_owner_info * oinfo )
2018-01-16 18:53:08 -08:00
{
2023-04-11 19:00:15 -07:00
struct xfs_rmap_matches res ;
int error ;
if ( ! sc - > sa . rmap_cur | | xchk_skip_xref ( sc - > sm ) )
return ;
error = xfs_rmap_count_owners ( sc - > sa . rmap_cur , bno , len , oinfo , & res ) ;
if ( ! xchk_should_check_xref ( sc , & error , & sc - > sa . rmap_cur ) )
return ;
if ( res . matches ! = 0 )
xchk_btree_xref_set_corrupt ( sc , sc - > sa . rmap_cur , 0 ) ;
if ( res . bad_non_owner_matches )
xchk_btree_xref_set_corrupt ( sc , sc - > sa . rmap_cur , 0 ) ;
2018-01-16 18:53:08 -08:00
}
/* xref check that the extent has no reverse mapping at all */
void
2018-07-19 12:29:11 -07:00
xchk_xref_has_no_owner (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc ,
2018-07-19 12:29:12 -07:00
xfs_agblock_t bno ,
xfs_extlen_t len )
2018-01-16 18:53:08 -08:00
{
2023-04-11 19:00:10 -07:00
enum xbtree_recpacking outcome ;
2018-07-19 12:29:12 -07:00
int error ;
2018-01-16 18:53:08 -08:00
2018-07-19 12:29:11 -07:00
if ( ! sc - > sa . rmap_cur | | xchk_skip_xref ( sc - > sm ) )
2018-01-16 18:53:08 -08:00
return ;
2023-04-11 19:00:10 -07:00
error = xfs_rmap_has_records ( sc - > sa . rmap_cur , bno , len , & outcome ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_should_check_xref ( sc , & error , & sc - > sa . rmap_cur ) )
2018-01-16 18:53:08 -08:00
return ;
2023-04-11 19:00:10 -07:00
if ( outcome ! = XBTREE_RECPACKING_EMPTY )
2018-07-19 12:29:11 -07:00
xchk_btree_xref_set_corrupt ( sc , sc - > sa . rmap_cur , 0 ) ;
2018-01-16 18:53:08 -08:00
}