2018-06-05 19:42:14 -07:00
// SPDX-License-Identifier: GPL-2.0+
2017-10-17 21:37:37 -07:00
/*
* Copyright ( C ) 2017 Oracle . All Rights Reserved .
* Author : Darrick J . Wong < darrick . wong @ oracle . com >
*/
# include "xfs.h"
# include "xfs_fs.h"
# include "xfs_shared.h"
# include "xfs_format.h"
# include "xfs_trans_resv.h"
# include "xfs_mount.h"
2021-04-06 06:59:18 -07:00
# include "xfs_inode.h"
2017-10-17 21:37:37 -07:00
# include "xfs_btree.h"
# include "scrub/scrub.h"
# include "scrub/common.h"
# include "scrub/btree.h"
# include "scrub/trace.h"
/* btree scrubbing */
/*
* Check for btree operation errors . See the section about handling
* operational errors in common . c .
*/
2018-01-16 18:52:14 -08:00
static bool
2018-07-19 12:29:11 -07:00
__xchk_btree_process_error (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc ,
2018-07-19 12:29:12 -07:00
struct xfs_btree_cur * cur ,
int level ,
int * error ,
__u32 errflag ,
void * ret_ip )
2017-10-17 21:37:37 -07:00
{
if ( * error = = 0 )
return true ;
switch ( * error ) {
case - EDEADLOCK :
/* Used to restart an op with deadlock avoidance. */
2018-07-19 12:29:11 -07:00
trace_xchk_deadlock_retry ( sc - > ip , sc - > sm , * error ) ;
2017-10-17 21:37:37 -07:00
break ;
case - EFSBADCRC :
case - EFSCORRUPTED :
/* Note the badness but don't abort. */
2018-01-16 18:52:14 -08:00
sc - > sm - > sm_flags | = errflag ;
2017-10-17 21:37:37 -07:00
* error = 0 ;
2021-04-20 17:54:36 -05:00
fallthrough ;
2017-10-17 21:37:37 -07:00
default :
if ( cur - > bc_flags & XFS_BTREE_ROOT_IN_INODE )
2018-07-19 12:29:11 -07:00
trace_xchk_ifork_btree_op_error ( sc , cur , level ,
2018-01-16 18:52:14 -08:00
* error , ret_ip ) ;
2017-10-17 21:37:37 -07:00
else
2018-07-19 12:29:11 -07:00
trace_xchk_btree_op_error ( sc , cur , level ,
2018-01-16 18:52:14 -08:00
* error , ret_ip ) ;
2017-10-17 21:37:37 -07:00
break ;
}
return false ;
}
2018-01-16 18:52:14 -08:00
bool
2018-07-19 12:29:11 -07:00
xchk_btree_process_error (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc ,
2018-07-19 12:29:12 -07:00
struct xfs_btree_cur * cur ,
int level ,
int * error )
2018-01-16 18:52:14 -08:00
{
2018-07-19 12:29:11 -07:00
return __xchk_btree_process_error ( sc , cur , level , error ,
2018-01-16 18:52:14 -08:00
XFS_SCRUB_OFLAG_CORRUPT , __return_address ) ;
}
bool
2018-07-19 12:29:11 -07:00
xchk_btree_xref_process_error (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc ,
2018-07-19 12:29:12 -07:00
struct xfs_btree_cur * cur ,
int level ,
int * error )
2018-01-16 18:52:14 -08:00
{
2018-07-19 12:29:11 -07:00
return __xchk_btree_process_error ( sc , cur , level , error ,
2018-01-16 18:52:14 -08:00
XFS_SCRUB_OFLAG_XFAIL , __return_address ) ;
}
2017-10-17 21:37:37 -07:00
/* Record btree block corruption. */
2018-01-16 18:52:14 -08:00
static void
2018-07-19 12:29:11 -07:00
__xchk_btree_set_corrupt (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc ,
2018-07-19 12:29:12 -07:00
struct xfs_btree_cur * cur ,
int level ,
__u32 errflag ,
void * ret_ip )
2017-10-17 21:37:37 -07:00
{
2018-01-16 18:52:14 -08:00
sc - > sm - > sm_flags | = errflag ;
2017-10-17 21:37:37 -07:00
if ( cur - > bc_flags & XFS_BTREE_ROOT_IN_INODE )
2018-07-19 12:29:11 -07:00
trace_xchk_ifork_btree_error ( sc , cur , level ,
2018-01-16 18:52:14 -08:00
ret_ip ) ;
2017-10-17 21:37:37 -07:00
else
2018-07-19 12:29:11 -07:00
trace_xchk_btree_error ( sc , cur , level ,
2018-01-16 18:52:14 -08:00
ret_ip ) ;
}
void
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc ,
2018-07-19 12:29:12 -07:00
struct xfs_btree_cur * cur ,
int level )
2018-01-16 18:52:14 -08:00
{
2018-07-19 12:29:11 -07:00
__xchk_btree_set_corrupt ( sc , cur , level , XFS_SCRUB_OFLAG_CORRUPT ,
2018-01-16 18:52:14 -08:00
__return_address ) ;
}
void
2018-07-19 12:29:11 -07:00
xchk_btree_xref_set_corrupt (
2018-07-19 12:29:12 -07:00
struct xfs_scrub * sc ,
2018-07-19 12:29:12 -07:00
struct xfs_btree_cur * cur ,
int level )
2018-01-16 18:52:14 -08:00
{
2018-07-19 12:29:11 -07:00
__xchk_btree_set_corrupt ( sc , cur , level , XFS_SCRUB_OFLAG_XCORRUPT ,
2018-01-16 18:52:14 -08:00
__return_address ) ;
2017-10-17 21:37:37 -07:00
}
2017-10-17 21:37:37 -07:00
/*
* Make sure this record is in order and doesn ' t stray outside of the parent
* keys .
*/
STATIC void
2018-07-19 12:29:11 -07:00
xchk_btree_rec (
struct xchk_btree * bs )
2017-10-17 21:37:37 -07:00
{
struct xfs_btree_cur * cur = bs - > cur ;
union xfs_btree_rec * rec ;
union xfs_btree_key key ;
union xfs_btree_key hkey ;
union xfs_btree_key * keyp ;
struct xfs_btree_block * block ;
struct xfs_btree_block * keyblock ;
struct xfs_buf * bp ;
block = xfs_btree_get_block ( cur , 0 , & bp ) ;
rec = xfs_btree_rec_addr ( cur , cur - > bc_ptrs [ 0 ] , block ) ;
2018-07-19 12:29:11 -07:00
trace_xchk_btree_rec ( bs - > sc , cur , 0 ) ;
2017-10-17 21:37:37 -07:00
/* If this isn't the first record, are they in order? */
if ( ! bs - > firstrec & & ! cur - > bc_ops - > recs_inorder ( cur , & bs - > lastrec , rec ) )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , 0 ) ;
2017-10-17 21:37:37 -07:00
bs - > firstrec = false ;
memcpy ( & bs - > lastrec , rec , cur - > bc_ops - > rec_len ) ;
if ( cur - > bc_nlevels = = 1 )
return ;
/* Is this at least as large as the parent low key? */
cur - > bc_ops - > init_key_from_rec ( & key , rec ) ;
keyblock = xfs_btree_get_block ( cur , 1 , & bp ) ;
keyp = xfs_btree_key_addr ( cur , cur - > bc_ptrs [ 1 ] , keyblock ) ;
if ( cur - > bc_ops - > diff_two_keys ( cur , & key , keyp ) < 0 )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , 1 ) ;
2017-10-17 21:37:37 -07:00
if ( ! ( cur - > bc_flags & XFS_BTREE_OVERLAPPING ) )
return ;
/* Is this no larger than the parent high key? */
cur - > bc_ops - > init_high_key_from_rec ( & hkey , rec ) ;
keyp = xfs_btree_high_key_addr ( cur , cur - > bc_ptrs [ 1 ] , keyblock ) ;
if ( cur - > bc_ops - > diff_two_keys ( cur , keyp , & hkey ) < 0 )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , 1 ) ;
2017-10-17 21:37:37 -07:00
}
/*
* Make sure this key is in order and doesn ' t stray outside of the parent
* keys .
*/
STATIC void
2018-07-19 12:29:11 -07:00
xchk_btree_key (
struct xchk_btree * bs ,
2017-10-17 21:37:37 -07:00
int level )
{
struct xfs_btree_cur * cur = bs - > cur ;
union xfs_btree_key * key ;
union xfs_btree_key * keyp ;
struct xfs_btree_block * block ;
struct xfs_btree_block * keyblock ;
struct xfs_buf * bp ;
block = xfs_btree_get_block ( cur , level , & bp ) ;
key = xfs_btree_key_addr ( cur , cur - > bc_ptrs [ level ] , block ) ;
2018-07-19 12:29:11 -07:00
trace_xchk_btree_key ( bs - > sc , cur , level ) ;
2017-10-17 21:37:37 -07:00
/* If this isn't the first key, are they in order? */
if ( ! bs - > firstkey [ level ] & &
! cur - > bc_ops - > keys_inorder ( cur , & bs - > lastkey [ level ] , key ) )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , level ) ;
2017-10-17 21:37:37 -07:00
bs - > firstkey [ level ] = false ;
memcpy ( & bs - > lastkey [ level ] , key , cur - > bc_ops - > key_len ) ;
if ( level + 1 > = cur - > bc_nlevels )
return ;
/* Is this at least as large as the parent low key? */
keyblock = xfs_btree_get_block ( cur , level + 1 , & bp ) ;
keyp = xfs_btree_key_addr ( cur , cur - > bc_ptrs [ level + 1 ] , keyblock ) ;
if ( cur - > bc_ops - > diff_two_keys ( cur , key , keyp ) < 0 )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , level ) ;
2017-10-17 21:37:37 -07:00
if ( ! ( cur - > bc_flags & XFS_BTREE_OVERLAPPING ) )
return ;
/* Is this no larger than the parent high key? */
key = xfs_btree_high_key_addr ( cur , cur - > bc_ptrs [ level ] , block ) ;
keyp = xfs_btree_high_key_addr ( cur , cur - > bc_ptrs [ level + 1 ] , keyblock ) ;
if ( cur - > bc_ops - > diff_two_keys ( cur , keyp , key ) < 0 )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , level ) ;
2017-10-17 21:37:37 -07:00
}
2017-10-17 21:37:37 -07:00
/*
* Check a btree pointer . Returns true if it ' s ok to use this pointer .
* Callers do not need to set the corrupt flag .
*/
static bool
2018-07-19 12:29:11 -07:00
xchk_btree_ptr_ok (
2018-07-19 12:29:12 -07:00
struct xchk_btree * bs ,
int level ,
union xfs_btree_ptr * ptr )
2017-10-17 21:37:37 -07:00
{
2018-07-19 12:29:12 -07:00
bool res ;
2017-10-17 21:37:37 -07:00
/* A btree rooted in an inode has no block pointer to the root. */
if ( ( bs - > cur - > bc_flags & XFS_BTREE_ROOT_IN_INODE ) & &
level = = bs - > cur - > bc_nlevels )
return true ;
/* Otherwise, check the pointers. */
if ( bs - > cur - > bc_flags & XFS_BTREE_LONG_PTRS )
res = xfs_btree_check_lptr ( bs - > cur , be64_to_cpu ( ptr - > l ) , level ) ;
else
res = xfs_btree_check_sptr ( bs - > cur , be32_to_cpu ( ptr - > s ) , level ) ;
if ( ! res )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , level ) ;
2017-10-17 21:37:37 -07:00
return res ;
}
/* Check that a btree block's sibling matches what we expect it. */
STATIC int
2018-07-19 12:29:11 -07:00
xchk_btree_block_check_sibling (
2018-07-19 12:29:12 -07:00
struct xchk_btree * bs ,
int level ,
int direction ,
union xfs_btree_ptr * sibling )
2017-10-17 21:37:37 -07:00
{
2018-07-19 12:29:12 -07:00
struct xfs_btree_cur * cur = bs - > cur ;
struct xfs_btree_block * pblock ;
struct xfs_buf * pbp ;
struct xfs_btree_cur * ncur = NULL ;
union xfs_btree_ptr * pp ;
int success ;
int error ;
2017-10-17 21:37:37 -07:00
error = xfs_btree_dup_cursor ( cur , & ncur ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_btree_process_error ( bs - > sc , cur , level + 1 , & error ) | |
2017-10-17 21:37:37 -07:00
! ncur )
return error ;
/*
* If the pointer is null , we shouldn ' t be able to move the upper
* level pointer anywhere .
*/
if ( xfs_btree_ptr_is_null ( cur , sibling ) ) {
if ( direction > 0 )
error = xfs_btree_increment ( ncur , level + 1 , & success ) ;
else
error = xfs_btree_decrement ( ncur , level + 1 , & success ) ;
if ( error = = 0 & & success )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , level ) ;
2017-10-17 21:37:37 -07:00
error = 0 ;
goto out ;
}
/* Increment upper level pointer. */
if ( direction > 0 )
error = xfs_btree_increment ( ncur , level + 1 , & success ) ;
else
error = xfs_btree_decrement ( ncur , level + 1 , & success ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_btree_process_error ( bs - > sc , cur , level + 1 , & error ) )
2017-10-17 21:37:37 -07:00
goto out ;
if ( ! success ) {
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , level + 1 ) ;
2017-10-17 21:37:37 -07:00
goto out ;
}
/* Compare upper level pointer to sibling pointer. */
pblock = xfs_btree_get_block ( ncur , level + 1 , & pbp ) ;
pp = xfs_btree_ptr_addr ( ncur , ncur - > bc_ptrs [ level + 1 ] , pblock ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_btree_ptr_ok ( bs , level + 1 , pp ) )
2017-10-17 21:37:37 -07:00
goto out ;
2018-01-16 18:53:11 -08:00
if ( pbp )
2018-07-19 12:29:11 -07:00
xchk_buffer_recheck ( bs - > sc , pbp ) ;
2017-10-17 21:37:37 -07:00
if ( xfs_btree_diff_two_ptrs ( cur , pp , sibling ) )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , level ) ;
2017-10-17 21:37:37 -07:00
out :
xfs_btree_del_cursor ( ncur , XFS_BTREE_ERROR ) ;
return error ;
}
/* Check the siblings of a btree block. */
STATIC int
2018-07-19 12:29:11 -07:00
xchk_btree_block_check_siblings (
2018-07-19 12:29:12 -07:00
struct xchk_btree * bs ,
struct xfs_btree_block * block )
2017-10-17 21:37:37 -07:00
{
2018-07-19 12:29:12 -07:00
struct xfs_btree_cur * cur = bs - > cur ;
union xfs_btree_ptr leftsib ;
union xfs_btree_ptr rightsib ;
int level ;
int error = 0 ;
2017-10-17 21:37:37 -07:00
xfs_btree_get_sibling ( cur , block , & leftsib , XFS_BB_LEFTSIB ) ;
xfs_btree_get_sibling ( cur , block , & rightsib , XFS_BB_RIGHTSIB ) ;
level = xfs_btree_get_level ( block ) ;
/* Root block should never have siblings. */
if ( level = = cur - > bc_nlevels - 1 ) {
if ( ! xfs_btree_ptr_is_null ( cur , & leftsib ) | |
! xfs_btree_ptr_is_null ( cur , & rightsib ) )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , level ) ;
2017-10-17 21:37:37 -07:00
goto out ;
}
/*
* Does the left & right sibling pointers match the adjacent
* parent level pointers ?
* ( These function absorbs error codes for us . )
*/
2018-07-19 12:29:11 -07:00
error = xchk_btree_block_check_sibling ( bs , level , - 1 , & leftsib ) ;
2017-10-17 21:37:37 -07:00
if ( error )
return error ;
2018-07-19 12:29:11 -07:00
error = xchk_btree_block_check_sibling ( bs , level , 1 , & rightsib ) ;
2017-10-17 21:37:37 -07:00
if ( error )
return error ;
out :
return error ;
}
2018-01-16 18:53:05 -08:00
struct check_owner {
struct list_head list ;
xfs_daddr_t daddr ;
int level ;
} ;
/*
* Make sure this btree block isn ' t in the free list and that there ' s
* an rmap record for it .
*/
STATIC int
2018-07-19 12:29:11 -07:00
xchk_btree_check_block_owner (
2018-07-19 12:29:12 -07:00
struct xchk_btree * bs ,
int level ,
xfs_daddr_t daddr )
2018-01-16 18:53:05 -08:00
{
2018-07-19 12:29:12 -07:00
xfs_agnumber_t agno ;
xfs_agblock_t agbno ;
xfs_btnum_t btnum ;
bool init_sa ;
int error = 0 ;
2018-01-16 18:53:05 -08:00
if ( ! bs - > cur )
return 0 ;
2018-01-16 18:53:06 -08:00
btnum = bs - > cur - > bc_btnum ;
2018-01-16 18:53:05 -08:00
agno = xfs_daddr_to_agno ( bs - > cur - > bc_mp , daddr ) ;
2018-01-16 18:53:06 -08:00
agbno = xfs_daddr_to_agbno ( bs - > cur - > bc_mp , daddr ) ;
2018-01-16 18:53:05 -08:00
init_sa = bs - > cur - > bc_flags & XFS_BTREE_LONG_PTRS ;
if ( init_sa ) {
2021-08-06 11:06:35 -07:00
error = xchk_ag_init_existing ( bs - > sc , agno , & bs - > sc - > sa ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_btree_xref_process_error ( bs - > sc , bs - > cur ,
2018-01-16 18:53:05 -08:00
level , & error ) )
return error ;
}
2018-07-19 12:29:11 -07:00
xchk_xref_is_used_space ( bs - > sc , agbno , 1 ) ;
2018-01-16 18:53:06 -08:00
/*
* The bnobt scrubber aliases bs - > cur to bs - > sc - > sa . bno_cur , so we
* have to nullify it ( to shut down further block owner checks ) if
* self - xref encounters problems .
*/
if ( ! bs - > sc - > sa . bno_cur & & btnum = = XFS_BTNUM_BNO )
bs - > cur = NULL ;
2018-07-19 12:29:11 -07:00
xchk_xref_is_owned_by ( bs - > sc , agbno , 1 , bs - > oinfo ) ;
2018-01-16 18:53:08 -08:00
if ( ! bs - > sc - > sa . rmap_cur & & btnum = = XFS_BTNUM_RMAP )
bs - > cur = NULL ;
2018-01-16 18:53:05 -08:00
if ( init_sa )
2018-07-19 12:29:11 -07:00
xchk_ag_free ( bs - > sc , & bs - > sc - > sa ) ;
2018-01-16 18:53:05 -08:00
return error ;
}
/* Check the owner of a btree block. */
STATIC int
2018-07-19 12:29:11 -07:00
xchk_btree_check_owner (
2018-07-19 12:29:12 -07:00
struct xchk_btree * bs ,
int level ,
struct xfs_buf * bp )
2018-01-16 18:53:05 -08:00
{
2018-07-19 12:29:12 -07:00
struct xfs_btree_cur * cur = bs - > cur ;
struct check_owner * co ;
2018-01-16 18:53:05 -08:00
2019-03-19 08:16:22 -07:00
/*
* In theory , xfs_btree_get_block should only give us a null buffer
* pointer for the root of a root - in - inode btree type , but we need
* to check defensively here in case the cursor state is also screwed
* up .
*/
if ( bp = = NULL ) {
if ( ! ( cur - > bc_flags & XFS_BTREE_ROOT_IN_INODE ) )
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , level ) ;
2018-01-16 18:53:05 -08:00
return 0 ;
2019-03-19 08:16:22 -07:00
}
2018-01-16 18:53:05 -08:00
/*
* We want to cross - reference each btree block with the bnobt
* and the rmapbt . We cannot cross - reference the bnobt or
* rmapbt while scanning the bnobt or rmapbt , respectively ,
* because we cannot alter the cursor and we ' d prefer not to
* duplicate cursors . Therefore , save the buffer daddr for
* later scanning .
*/
if ( cur - > bc_btnum = = XFS_BTNUM_BNO | | cur - > bc_btnum = = XFS_BTNUM_RMAP ) {
co = kmem_alloc ( sizeof ( struct check_owner ) ,
2018-05-09 10:02:00 -07:00
KM_MAYFAIL ) ;
2018-01-16 18:53:05 -08:00
if ( ! co )
return - ENOMEM ;
co - > level = level ;
2021-08-18 18:46:57 -07:00
co - > daddr = xfs_buf_daddr ( bp ) ;
2018-01-16 18:53:05 -08:00
list_add_tail ( & co - > list , & bs - > to_check ) ;
return 0 ;
}
2021-08-18 18:46:57 -07:00
return xchk_btree_check_block_owner ( bs , level , xfs_buf_daddr ( bp ) ) ;
2018-01-16 18:53:05 -08:00
}
2021-04-06 06:59:18 -07:00
/* Decide if we want to check minrecs of a btree block in the inode root. */
static inline bool
xchk_btree_check_iroot_minrecs (
struct xchk_btree * bs )
{
/*
* xfs_bmap_add_attrfork_btree had an implementation bug wherein it
* would miscalculate the space required for the data fork bmbt root
* when adding an attr fork , and promote the iroot contents to an
* external block unnecessarily . This went unnoticed for many years
* until scrub found filesystems in this state . Inode rooted btrees are
* not supposed to have immediate child blocks that are small enough
* that the contents could fit in the inode root , but we can ' t fail
* existing filesystems , so instead we disable the check for data fork
* bmap btrees when there ' s an attr fork .
*/
if ( bs - > cur - > bc_btnum = = XFS_BTNUM_BMAP & &
bs - > cur - > bc_ino . whichfork = = XFS_DATA_FORK & &
XFS_IFORK_Q ( bs - > sc - > ip ) )
return false ;
return true ;
}
2018-05-09 10:02:00 -07:00
/*
* Check that this btree block has at least minrecs records or is one of the
* special blocks that don ' t require that .
*/
STATIC void
2018-07-19 12:29:11 -07:00
xchk_btree_check_minrecs (
struct xchk_btree * bs ,
2018-05-09 10:02:00 -07:00
int level ,
struct xfs_btree_block * block )
{
2020-11-08 16:32:41 -08:00
struct xfs_btree_cur * cur = bs - > cur ;
unsigned int root_level = cur - > bc_nlevels - 1 ;
unsigned int numrecs = be16_to_cpu ( block - > bb_numrecs ) ;
2018-05-09 10:02:00 -07:00
/* More records than minrecs means the block is ok. */
2020-11-08 16:32:41 -08:00
if ( numrecs > = cur - > bc_ops - > get_minrecs ( cur , level ) )
2018-05-09 10:02:00 -07:00
return ;
/*
2020-11-08 16:32:41 -08:00
* For btrees rooted in the inode , it ' s possible that the root block
* contents spilled into a regular ondisk block because there wasn ' t
* enough space in the inode root . The number of records in that
* child block might be less than the standard minrecs , but that ' s ok
* provided that there ' s only one direct child of the root .
2018-05-09 10:02:00 -07:00
*/
2020-11-08 16:32:41 -08:00
if ( ( cur - > bc_flags & XFS_BTREE_ROOT_IN_INODE ) & &
level = = cur - > bc_nlevels - 2 ) {
struct xfs_btree_block * root_block ;
struct xfs_buf * root_bp ;
int root_maxrecs ;
root_block = xfs_btree_get_block ( cur , root_level , & root_bp ) ;
root_maxrecs = cur - > bc_ops - > get_dmaxrecs ( cur , root_level ) ;
2021-04-06 06:59:18 -07:00
if ( xchk_btree_check_iroot_minrecs ( bs ) & &
( be16_to_cpu ( root_block - > bb_numrecs ) ! = 1 | |
numrecs < = root_maxrecs ) )
2020-11-08 16:32:41 -08:00
xchk_btree_set_corrupt ( bs - > sc , cur , level ) ;
2018-05-09 10:02:00 -07:00
return ;
2020-11-08 16:32:41 -08:00
}
2018-05-09 10:02:00 -07:00
2020-11-08 16:32:41 -08:00
/*
* Otherwise , only the root level is allowed to have fewer than minrecs
* records or keyptrs .
*/
if ( level < root_level )
xchk_btree_set_corrupt ( bs - > sc , cur , level ) ;
2018-05-09 10:02:00 -07:00
}
2017-10-17 21:37:37 -07:00
/*
* Grab and scrub a btree block given a btree pointer . Returns block
* and buffer pointers ( if applicable ) if they ' re ok to use .
*/
STATIC int
2018-07-19 12:29:11 -07:00
xchk_btree_get_block (
2018-07-19 12:29:12 -07:00
struct xchk_btree * bs ,
int level ,
union xfs_btree_ptr * pp ,
struct xfs_btree_block * * pblock ,
struct xfs_buf * * pbp )
2017-10-17 21:37:37 -07:00
{
2018-07-19 12:29:12 -07:00
xfs_failaddr_t failed_at ;
int error ;
2017-10-17 21:37:37 -07:00
* pblock = NULL ;
* pbp = NULL ;
error = xfs_btree_lookup_get_block ( bs - > cur , level , pp , pblock ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_btree_process_error ( bs - > sc , bs - > cur , level , & error ) | |
2017-11-06 12:09:29 -08:00
! * pblock )
2017-10-17 21:37:37 -07:00
return error ;
xfs_btree_get_block ( bs - > cur , level , pbp ) ;
if ( bs - > cur - > bc_flags & XFS_BTREE_LONG_PTRS )
failed_at = __xfs_btree_check_lblock ( bs - > cur , * pblock ,
level , * pbp ) ;
else
failed_at = __xfs_btree_check_sblock ( bs - > cur , * pblock ,
level , * pbp ) ;
if ( failed_at ) {
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , bs - > cur , level ) ;
2017-10-17 21:37:37 -07:00
return 0 ;
}
2018-01-16 18:53:11 -08:00
if ( * pbp )
2018-07-19 12:29:11 -07:00
xchk_buffer_recheck ( bs - > sc , * pbp ) ;
2017-10-17 21:37:37 -07:00
2018-07-19 12:29:11 -07:00
xchk_btree_check_minrecs ( bs , level , * pblock ) ;
2018-05-09 10:02:00 -07:00
2018-01-16 18:53:05 -08:00
/*
* Check the block ' s owner ; this function absorbs error codes
* for us .
*/
2018-07-19 12:29:11 -07:00
error = xchk_btree_check_owner ( bs , level , * pbp ) ;
2018-01-16 18:53:05 -08:00
if ( error )
return error ;
2017-10-17 21:37:37 -07:00
/*
* Check the block ' s siblings ; this function absorbs error codes
* for us .
*/
2018-07-19 12:29:11 -07:00
return xchk_btree_block_check_siblings ( bs , * pblock ) ;
2017-10-17 21:37:37 -07:00
}
2017-10-25 15:03:46 -07:00
/*
* Check that the low and high keys of this block match the keys stored
* in the parent block .
*/
STATIC void
2018-07-19 12:29:11 -07:00
xchk_btree_block_keys (
2018-07-19 12:29:12 -07:00
struct xchk_btree * bs ,
int level ,
struct xfs_btree_block * block )
2017-10-25 15:03:46 -07:00
{
2018-07-19 12:29:12 -07:00
union xfs_btree_key block_keys ;
struct xfs_btree_cur * cur = bs - > cur ;
union xfs_btree_key * high_bk ;
union xfs_btree_key * parent_keys ;
union xfs_btree_key * high_pk ;
struct xfs_btree_block * parent_block ;
struct xfs_buf * bp ;
2017-10-25 15:03:46 -07:00
if ( level > = cur - > bc_nlevels - 1 )
return ;
/* Calculate the keys for this block. */
xfs_btree_get_keys ( cur , block , & block_keys ) ;
/* Obtain the parent's copy of the keys for this block. */
parent_block = xfs_btree_get_block ( cur , level + 1 , & bp ) ;
parent_keys = xfs_btree_key_addr ( cur , cur - > bc_ptrs [ level + 1 ] ,
parent_block ) ;
if ( cur - > bc_ops - > diff_two_keys ( cur , & block_keys , parent_keys ) ! = 0 )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , 1 ) ;
2017-10-25 15:03:46 -07:00
if ( ! ( cur - > bc_flags & XFS_BTREE_OVERLAPPING ) )
return ;
/* Get high keys */
high_bk = xfs_btree_high_key_from_key ( cur , & block_keys ) ;
high_pk = xfs_btree_high_key_addr ( cur , cur - > bc_ptrs [ level + 1 ] ,
parent_block ) ;
if ( cur - > bc_ops - > diff_two_keys ( cur , high_bk , high_pk ) ! = 0 )
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( bs - > sc , cur , 1 ) ;
2017-10-25 15:03:46 -07:00
}
2017-10-17 21:37:37 -07:00
/*
* Visit all nodes and leaves of a btree . Check that all pointers and
* records are in order , that the keys reflect the records , and use a callback
2017-10-17 21:37:37 -07:00
* so that the caller can verify individual records .
2017-10-17 21:37:37 -07:00
*/
int
2018-07-19 12:29:11 -07:00
xchk_btree (
2018-12-12 08:46:23 -08:00
struct xfs_scrub * sc ,
struct xfs_btree_cur * cur ,
xchk_btree_rec_fn scrub_fn ,
const struct xfs_owner_info * oinfo ,
void * private )
2017-10-17 21:37:37 -07:00
{
2018-12-12 08:46:23 -08:00
struct xchk_btree bs = {
. cur = cur ,
. scrub_rec = scrub_fn ,
. oinfo = oinfo ,
. firstrec = true ,
. private = private ,
. sc = sc ,
} ;
union xfs_btree_ptr ptr ;
union xfs_btree_ptr * pp ;
union xfs_btree_rec * recp ;
struct xfs_btree_block * block ;
int level ;
struct xfs_buf * bp ;
struct check_owner * co ;
struct check_owner * n ;
int i ;
int error = 0 ;
2017-10-17 21:37:37 -07:00
/* Initialize scrub state */
for ( i = 0 ; i < XFS_BTREE_MAXLEVELS ; i + + )
bs . firstkey [ i ] = true ;
INIT_LIST_HEAD ( & bs . to_check ) ;
/* Don't try to check a tree with a height we can't handle. */
if ( cur - > bc_nlevels > XFS_BTREE_MAXLEVELS ) {
2018-07-19 12:29:11 -07:00
xchk_btree_set_corrupt ( sc , cur , 0 ) ;
2017-10-17 21:37:37 -07:00
goto out ;
}
/*
* Load the root of the btree . The helper function absorbs
* error codes for us .
*/
level = cur - > bc_nlevels - 1 ;
cur - > bc_ops - > init_ptr_from_cur ( cur , & ptr ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_btree_ptr_ok ( & bs , cur - > bc_nlevels , & ptr ) )
2017-10-17 21:37:37 -07:00
goto out ;
2018-07-19 12:29:11 -07:00
error = xchk_btree_get_block ( & bs , level , & ptr , & block , & bp ) ;
2017-10-17 21:37:37 -07:00
if ( error | | ! block )
goto out ;
cur - > bc_ptrs [ level ] = 1 ;
while ( level < cur - > bc_nlevels ) {
block = xfs_btree_get_block ( cur , level , & bp ) ;
if ( level = = 0 ) {
/* End of leaf, pop back towards the root. */
if ( cur - > bc_ptrs [ level ] >
be16_to_cpu ( block - > bb_numrecs ) ) {
2018-07-19 12:29:11 -07:00
xchk_btree_block_keys ( & bs , level , block ) ;
2017-10-17 21:37:37 -07:00
if ( level < cur - > bc_nlevels - 1 )
cur - > bc_ptrs [ level + 1 ] + + ;
level + + ;
continue ;
}
2017-10-17 21:37:37 -07:00
/* Records in order for scrub? */
2018-07-19 12:29:11 -07:00
xchk_btree_rec ( & bs ) ;
2017-10-17 21:37:37 -07:00
/* Call out to the record checker. */
recp = xfs_btree_rec_addr ( cur , cur - > bc_ptrs [ 0 ] , block ) ;
error = bs . scrub_rec ( & bs , recp ) ;
if ( error )
break ;
2018-07-19 12:29:11 -07:00
if ( xchk_should_terminate ( sc , & error ) | |
2017-10-17 21:37:37 -07:00
( sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT ) )
2017-10-17 21:37:37 -07:00
break ;
cur - > bc_ptrs [ level ] + + ;
continue ;
}
/* End of node, pop back towards the root. */
if ( cur - > bc_ptrs [ level ] > be16_to_cpu ( block - > bb_numrecs ) ) {
2018-07-19 12:29:11 -07:00
xchk_btree_block_keys ( & bs , level , block ) ;
2017-10-17 21:37:37 -07:00
if ( level < cur - > bc_nlevels - 1 )
cur - > bc_ptrs [ level + 1 ] + + ;
level + + ;
continue ;
}
2017-10-17 21:37:37 -07:00
/* Keys in order for scrub? */
2018-07-19 12:29:11 -07:00
xchk_btree_key ( & bs , level ) ;
2017-10-17 21:37:37 -07:00
2017-10-17 21:37:37 -07:00
/* Drill another level deeper. */
pp = xfs_btree_ptr_addr ( cur , cur - > bc_ptrs [ level ] , block ) ;
2018-07-19 12:29:11 -07:00
if ( ! xchk_btree_ptr_ok ( & bs , level , pp ) ) {
2017-10-17 21:37:37 -07:00
cur - > bc_ptrs [ level ] + + ;
continue ;
}
level - - ;
2018-07-19 12:29:11 -07:00
error = xchk_btree_get_block ( & bs , level , pp , & block , & bp ) ;
2017-10-17 21:37:37 -07:00
if ( error | | ! block )
goto out ;
cur - > bc_ptrs [ level ] = 1 ;
}
2017-10-17 21:37:37 -07:00
2017-10-17 21:37:37 -07:00
out :
2018-01-16 18:53:05 -08:00
/* Process deferred owner checks on btree blocks. */
list_for_each_entry_safe ( co , n , & bs . to_check , list ) {
if ( ! error & & bs . cur )
2018-07-19 12:29:11 -07:00
error = xchk_btree_check_block_owner ( & bs ,
2018-01-16 18:53:05 -08:00
co - > level , co - > daddr ) ;
list_del ( & co - > list ) ;
kmem_free ( co ) ;
}
2017-10-17 21:37:37 -07:00
return error ;
}