2018-06-06 05:42:14 +03:00
// SPDX-License-Identifier: GPL-2.0+
2017-10-18 07:37:45 +03:00
/*
* Copyright ( C ) 2017 Oracle . All Rights Reserved .
* Author : Darrick J . Wong < darrick . wong @ oracle . com >
*/
# include "xfs.h"
# include "xfs_fs.h"
# include "xfs_shared.h"
# include "xfs_format.h"
# include "xfs_trans_resv.h"
# include "xfs_mount.h"
# include "xfs_log_format.h"
# include "xfs_inode.h"
# include "xfs_da_format.h"
# include "xfs_da_btree.h"
# include "xfs_attr.h"
# include "xfs_attr_leaf.h"
# include "scrub/scrub.h"
# include "scrub/common.h"
# include "scrub/dabtree.h"
2019-07-05 20:29:55 +03:00
# include "scrub/attr.h"
2017-10-18 07:37:45 +03:00
2019-07-05 20:29:56 +03:00
/*
* Allocate enough memory to hold an attr value and attr block bitmaps ,
* reallocating the buffer if necessary . Buffer contents are not preserved
* across a reallocation .
*/
2017-10-18 07:37:45 +03:00
int
2019-07-05 20:29:55 +03:00
xchk_setup_xattr_buf (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2019-07-05 20:29:56 +03:00
size_t value_size ,
xfs_km_flags_t flags )
2017-10-18 07:37:45 +03:00
{
2018-07-19 22:29:12 +03:00
size_t sz ;
2019-07-05 20:29:56 +03:00
struct xchk_xattr_buf * ab = sc - > buf ;
2017-10-31 22:10:02 +03:00
/*
2019-07-05 20:29:55 +03:00
* We need enough space to read an xattr value from the file or enough
* space to hold three copies of the xattr free space bitmap . We don ' t
* need the buffer space for both purposes at the same time .
2017-10-31 22:10:02 +03:00
*/
2019-07-05 20:29:55 +03:00
sz = 3 * sizeof ( long ) * BITS_TO_LONGS ( sc - > mp - > m_attr_geo - > blksize ) ;
sz = max_t ( size_t , sz , value_size ) ;
2019-07-05 20:29:56 +03:00
/*
* If there ' s already a buffer , figure out if we need to reallocate it
* to accommodate a larger size .
*/
if ( ab ) {
if ( sz < = ab - > sz )
return 0 ;
kmem_free ( ab ) ;
sc - > buf = NULL ;
}
2019-07-05 20:29:56 +03:00
/*
* Don ' t zero the buffer upon allocation to avoid runtime overhead .
* All users must be careful never to read uninitialized contents .
*/
ab = kmem_alloc_large ( sizeof ( * ab ) + sz , flags ) ;
2019-07-05 20:29:56 +03:00
if ( ! ab )
2017-10-18 07:37:45 +03:00
return - ENOMEM ;
2019-07-05 20:29:56 +03:00
ab - > sz = sz ;
sc - > buf = ab ;
2019-07-05 20:29:55 +03:00
return 0 ;
}
/* Set us up to scrub an inode's extended attributes. */
int
xchk_setup_xattr (
struct xfs_scrub * sc ,
struct xfs_inode * ip )
{
int error ;
2019-07-05 20:29:56 +03:00
/*
* We failed to get memory while checking attrs , so this time try to
* get all the memory we ' re ever going to need . Allocate the buffer
* without the inode lock held , which means we can sleep .
*/
if ( sc - > flags & XCHK_TRY_HARDER ) {
2019-08-26 22:06:22 +03:00
error = xchk_setup_xattr_buf ( sc , XATTR_SIZE_MAX , 0 ) ;
2019-07-05 20:29:56 +03:00
if ( error )
return error ;
}
2019-07-05 20:29:55 +03:00
2018-07-19 22:29:11 +03:00
return xchk_setup_inode_contents ( sc , ip , 0 ) ;
2017-10-18 07:37:45 +03:00
}
/* Extended Attributes */
2018-07-19 22:29:11 +03:00
struct xchk_xattr {
2017-10-18 07:37:45 +03:00
struct xfs_attr_list_context context ;
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ;
2017-10-18 07:37:45 +03:00
} ;
/*
* Check that an extended attribute key can be looked up by hash .
*
2020-02-27 04:30:39 +03:00
* We use the XFS attribute list iterator ( i . e . xfs_attr_list_ilocked )
2017-10-18 07:37:45 +03:00
* to call this function for every attribute key in an inode . Once
* we ' re here , we load the attribute value to see if any errors happen ,
* or if we get more or less data than we expected .
*/
static void
2018-07-19 22:29:11 +03:00
xchk_xattr_listent (
2017-10-18 07:37:45 +03:00
struct xfs_attr_list_context * context ,
int flags ,
unsigned char * name ,
int namelen ,
int valuelen )
{
2018-07-19 22:29:11 +03:00
struct xchk_xattr * sx ;
2017-11-06 22:53:58 +03:00
struct xfs_da_args args = { NULL } ;
2017-10-18 07:37:45 +03:00
int error = 0 ;
2018-07-19 22:29:11 +03:00
sx = container_of ( context , struct xchk_xattr , context ) ;
2017-10-18 07:37:45 +03:00
2019-02-01 20:08:52 +03:00
if ( xchk_should_terminate ( sx - > sc , & error ) ) {
2019-07-05 20:29:54 +03:00
context - > seen_enough = error ;
2019-02-01 20:08:52 +03:00
return ;
}
2017-10-18 07:37:45 +03:00
if ( flags & XFS_ATTR_INCOMPLETE ) {
/* Incomplete attr key, just mark the inode for preening. */
2018-07-19 22:29:11 +03:00
xchk_ino_set_preen ( sx - > sc , context - > dp - > i_ino ) ;
2017-10-18 07:37:45 +03:00
return ;
}
2019-02-01 20:08:54 +03:00
/* Does this name make sense? */
if ( ! xfs_attr_namecheck ( name , namelen ) ) {
xchk_fblock_set_corrupt ( sx - > sc , XFS_ATTR_FORK , args . blkno ) ;
return ;
}
2019-07-05 20:29:56 +03:00
/*
* Try to allocate enough memory to extrat the attr value . If that
* doesn ' t work , we overload the seen_enough variable to convey
* the error message back to the main scrub function .
*/
error = xchk_setup_xattr_buf ( sx - > sc , valuelen , KM_MAYFAIL ) ;
if ( error = = - ENOMEM )
error = - EDEADLOCK ;
if ( error ) {
context - > seen_enough = error ;
return ;
}
2020-02-27 04:30:36 +03:00
args . op_flags = XFS_DA_OP_NOTIME ;
2020-02-27 04:30:42 +03:00
args . attr_filter = flags & XFS_ATTR_NSP_ONDISK_MASK ;
2017-10-18 07:37:45 +03:00
args . geo = context - > dp - > i_mount - > m_attr_geo ;
args . whichfork = XFS_ATTR_FORK ;
args . dp = context - > dp ;
args . name = name ;
args . namelen = namelen ;
args . hashval = xfs_da_hashname ( args . name , args . namelen ) ;
args . trans = context - > tp ;
2019-07-05 20:29:55 +03:00
args . value = xchk_xattr_valuebuf ( sx - > sc ) ;
2019-07-05 20:29:56 +03:00
args . valuelen = valuelen ;
2017-10-18 07:37:45 +03:00
2020-02-27 04:30:34 +03:00
error = xfs_attr_get_ilocked ( & args ) ;
2020-03-11 20:38:09 +03:00
/* ENODATA means the hash lookup failed and the attr is bad */
if ( error = = - ENODATA )
error = - EFSCORRUPTED ;
2018-07-19 22:29:11 +03:00
if ( ! xchk_fblock_process_error ( sx - > sc , XFS_ATTR_FORK , args . blkno ,
2017-10-18 07:37:45 +03:00
& error ) )
goto fail_xref ;
if ( args . valuelen ! = valuelen )
2018-07-19 22:29:11 +03:00
xchk_fblock_set_corrupt ( sx - > sc , XFS_ATTR_FORK ,
2017-10-18 07:37:45 +03:00
args . blkno ) ;
fail_xref :
2018-05-14 16:34:32 +03:00
if ( sx - > sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
2019-08-29 00:37:57 +03:00
context - > seen_enough = 1 ;
2017-10-18 07:37:45 +03:00
return ;
}
2017-10-31 22:10:02 +03:00
/*
* Mark a range [ start , start + len ) in this map . Returns true if the
* region was free , and false if there ' s a conflict or a problem .
*
* Within a char , the lowest bit of the char represents the byte with
* the smallest address
*/
STATIC bool
2018-07-19 22:29:11 +03:00
xchk_xattr_set_map (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2018-07-19 22:29:12 +03:00
unsigned long * map ,
unsigned int start ,
unsigned int len )
2017-10-31 22:10:02 +03:00
{
2018-07-19 22:29:12 +03:00
unsigned int mapsize = sc - > mp - > m_attr_geo - > blksize ;
bool ret = true ;
2017-10-31 22:10:02 +03:00
if ( start > = mapsize )
return false ;
if ( start + len > mapsize ) {
len = mapsize - start ;
ret = false ;
}
if ( find_next_bit ( map , mapsize , start ) < start + len )
ret = false ;
bitmap_set ( map , start , len ) ;
return ret ;
}
/*
* Check the leaf freemap from the usage bitmap . Returns false if the
* attr freemap has problems or points to used space .
*/
STATIC bool
2018-07-19 22:29:11 +03:00
xchk_xattr_check_freemap (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc ,
2017-10-31 22:10:02 +03:00
unsigned long * map ,
struct xfs_attr3_icleaf_hdr * leafhdr )
{
2019-07-05 20:29:55 +03:00
unsigned long * freemap = xchk_xattr_freemap ( sc ) ;
unsigned long * dstmap = xchk_xattr_dstmap ( sc ) ;
2017-10-31 22:10:02 +03:00
unsigned int mapsize = sc - > mp - > m_attr_geo - > blksize ;
int i ;
/* Construct bitmap of freemap contents. */
bitmap_zero ( freemap , mapsize ) ;
for ( i = 0 ; i < XFS_ATTR_LEAF_MAPSIZE ; i + + ) {
2018-07-19 22:29:11 +03:00
if ( ! xchk_xattr_set_map ( sc , freemap ,
2017-10-31 22:10:02 +03:00
leafhdr - > freemap [ i ] . base ,
leafhdr - > freemap [ i ] . size ) )
return false ;
}
/* Look for bits that are set in freemap and are marked in use. */
return bitmap_and ( dstmap , freemap , map , mapsize ) = = 0 ;
}
/*
* Check this leaf entry ' s relations to everything else .
* Returns the number of bytes used for the name / value data .
*/
STATIC void
2018-07-19 22:29:11 +03:00
xchk_xattr_entry (
struct xchk_da_btree * ds ,
2017-10-31 22:10:02 +03:00
int level ,
char * buf_end ,
struct xfs_attr_leafblock * leaf ,
struct xfs_attr3_icleaf_hdr * leafhdr ,
struct xfs_attr_leaf_entry * ent ,
int idx ,
unsigned int * usedbytes ,
__u32 * last_hashval )
{
struct xfs_mount * mp = ds - > state - > mp ;
2019-07-05 20:29:55 +03:00
unsigned long * usedmap = xchk_xattr_usedmap ( ds - > sc ) ;
2017-10-31 22:10:02 +03:00
char * name_end ;
struct xfs_attr_leaf_name_local * lentry ;
struct xfs_attr_leaf_name_remote * rentry ;
unsigned int nameidx ;
unsigned int namesize ;
if ( ent - > pad2 ! = 0 )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
/* Hash values in order? */
if ( be32_to_cpu ( ent - > hashval ) < * last_hashval )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
* last_hashval = be32_to_cpu ( ent - > hashval ) ;
nameidx = be16_to_cpu ( ent - > nameidx ) ;
if ( nameidx < leafhdr - > firstused | |
nameidx > = mp - > m_attr_geo - > blksize ) {
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
return ;
}
/* Check the name information. */
if ( ent - > flags & XFS_ATTR_LOCAL ) {
lentry = xfs_attr3_leaf_name_local ( leaf , idx ) ;
namesize = xfs_attr_leaf_entsize_local ( lentry - > namelen ,
be16_to_cpu ( lentry - > valuelen ) ) ;
name_end = ( char * ) lentry + namesize ;
if ( lentry - > namelen = = 0 )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
} else {
rentry = xfs_attr3_leaf_name_remote ( leaf , idx ) ;
namesize = xfs_attr_leaf_entsize_remote ( rentry - > namelen ) ;
name_end = ( char * ) rentry + namesize ;
if ( rentry - > namelen = = 0 | | rentry - > valueblk = = 0 )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
}
if ( name_end > buf_end )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
2018-07-19 22:29:11 +03:00
if ( ! xchk_xattr_set_map ( ds - > sc , usedmap , nameidx , namesize ) )
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
if ( ! ( ds - > sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT ) )
* usedbytes + = namesize ;
}
/* Scrub an attribute leaf. */
STATIC int
2018-07-19 22:29:11 +03:00
xchk_xattr_block (
struct xchk_da_btree * ds ,
2017-10-31 22:10:02 +03:00
int level )
{
struct xfs_attr3_icleaf_hdr leafhdr ;
struct xfs_mount * mp = ds - > state - > mp ;
struct xfs_da_state_blk * blk = & ds - > state - > path . blk [ level ] ;
struct xfs_buf * bp = blk - > bp ;
xfs_dablk_t * last_checked = ds - > private ;
struct xfs_attr_leafblock * leaf = bp - > b_addr ;
struct xfs_attr_leaf_entry * ent ;
struct xfs_attr_leaf_entry * entries ;
2019-07-05 20:29:56 +03:00
unsigned long * usedmap ;
2017-10-31 22:10:02 +03:00
char * buf_end ;
size_t off ;
__u32 last_hashval = 0 ;
unsigned int usedbytes = 0 ;
unsigned int hdrsize ;
int i ;
2019-07-05 20:29:56 +03:00
int error ;
2017-10-31 22:10:02 +03:00
if ( * last_checked = = blk - > blkno )
return 0 ;
2019-07-05 20:29:56 +03:00
/* Allocate memory for block usage checking. */
error = xchk_setup_xattr_buf ( ds - > sc , 0 , KM_MAYFAIL ) ;
if ( error = = - ENOMEM )
return - EDEADLOCK ;
if ( error )
return error ;
usedmap = xchk_xattr_usedmap ( ds - > sc ) ;
2017-10-31 22:10:02 +03:00
* last_checked = blk - > blkno ;
bitmap_zero ( usedmap , mp - > m_attr_geo - > blksize ) ;
/* Check all the padding. */
if ( xfs_sb_version_hascrc ( & ds - > sc - > mp - > m_sb ) ) {
struct xfs_attr3_leafblock * leaf = bp - > b_addr ;
if ( leaf - > hdr . pad1 ! = 0 | | leaf - > hdr . pad2 ! = 0 | |
leaf - > hdr . info . hdr . pad ! = 0 )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
} else {
if ( leaf - > hdr . pad1 ! = 0 | | leaf - > hdr . info . pad ! = 0 )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
}
/* Check the leaf header */
xfs_attr3_leaf_hdr_from_disk ( mp - > m_attr_geo , & leafhdr , leaf ) ;
hdrsize = xfs_attr3_leaf_hdr_size ( leaf ) ;
if ( leafhdr . usedbytes > mp - > m_attr_geo - > blksize )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
if ( leafhdr . firstused > mp - > m_attr_geo - > blksize )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
if ( leafhdr . firstused < hdrsize )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
if ( ! xchk_xattr_set_map ( ds - > sc , usedmap , 0 , hdrsize ) )
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
if ( ds - > sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
goto out ;
entries = xfs_attr3_leaf_entryp ( leaf ) ;
if ( ( char * ) & entries [ leafhdr . count ] > ( char * ) leaf + leafhdr . firstused )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
buf_end = ( char * ) bp - > b_addr + mp - > m_attr_geo - > blksize ;
for ( i = 0 , ent = entries ; i < leafhdr . count ; ent + + , i + + ) {
/* Mark the leaf entry itself. */
off = ( char * ) ent - ( char * ) leaf ;
2018-07-19 22:29:11 +03:00
if ( ! xchk_xattr_set_map ( ds - > sc , usedmap , off ,
2017-10-31 22:10:02 +03:00
sizeof ( xfs_attr_leaf_entry_t ) ) ) {
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
goto out ;
}
/* Check the entry and nameval. */
2018-07-19 22:29:11 +03:00
xchk_xattr_entry ( ds , level , buf_end , leaf , & leafhdr ,
2019-07-05 20:29:55 +03:00
ent , i , & usedbytes , & last_hashval ) ;
2017-10-31 22:10:02 +03:00
if ( ds - > sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
goto out ;
}
2018-07-19 22:29:11 +03:00
if ( ! xchk_xattr_check_freemap ( ds - > sc , usedmap , & leafhdr ) )
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
if ( leafhdr . usedbytes ! = usedbytes )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-31 22:10:02 +03:00
out :
return 0 ;
}
2017-10-18 07:37:45 +03:00
/* Scrub a attribute btree record. */
STATIC int
2018-07-19 22:29:11 +03:00
xchk_xattr_rec (
struct xchk_da_btree * ds ,
2019-11-09 01:52:07 +03:00
int level )
2017-10-18 07:37:45 +03:00
{
struct xfs_mount * mp = ds - > state - > mp ;
2019-11-09 01:52:07 +03:00
struct xfs_da_state_blk * blk = & ds - > state - > path . blk [ level ] ;
2017-10-18 07:37:45 +03:00
struct xfs_attr_leaf_name_local * lentry ;
struct xfs_attr_leaf_name_remote * rentry ;
struct xfs_buf * bp ;
2019-11-09 01:52:07 +03:00
struct xfs_attr_leaf_entry * ent ;
2017-10-18 07:37:45 +03:00
xfs_dahash_t calc_hash ;
xfs_dahash_t hash ;
int nameidx ;
int hdrsize ;
unsigned int badflags ;
int error ;
2019-11-09 01:52:07 +03:00
ASSERT ( blk - > magic = = XFS_ATTR_LEAF_MAGIC ) ;
ent = xfs_attr3_leaf_entryp ( blk - > bp - > b_addr ) + blk - > index ;
2017-10-18 07:37:45 +03:00
2017-10-31 22:10:02 +03:00
/* Check the whole block, if necessary. */
2018-07-19 22:29:11 +03:00
error = xchk_xattr_block ( ds , level ) ;
2017-10-31 22:10:02 +03:00
if ( error )
goto out ;
if ( ds - > sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
goto out ;
2017-10-18 07:37:45 +03:00
/* Check the hash of the entry. */
2018-07-19 22:29:11 +03:00
error = xchk_da_btree_hash ( ds , level , & ent - > hashval ) ;
2017-10-18 07:37:45 +03:00
if ( error )
goto out ;
/* Find the attr entry's location. */
bp = blk - > bp ;
hdrsize = xfs_attr3_leaf_hdr_size ( bp - > b_addr ) ;
nameidx = be16_to_cpu ( ent - > nameidx ) ;
if ( nameidx < hdrsize | | nameidx > = mp - > m_attr_geo - > blksize ) {
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-18 07:37:45 +03:00
goto out ;
}
/* Retrieve the entry and check it. */
hash = be32_to_cpu ( ent - > hashval ) ;
badflags = ~ ( XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
XFS_ATTR_INCOMPLETE ) ;
if ( ( ent - > flags & badflags ) ! = 0 )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-18 07:37:45 +03:00
if ( ent - > flags & XFS_ATTR_LOCAL ) {
lentry = ( struct xfs_attr_leaf_name_local * )
( ( ( char * ) bp - > b_addr ) + nameidx ) ;
if ( lentry - > namelen < = 0 ) {
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-18 07:37:45 +03:00
goto out ;
}
calc_hash = xfs_da_hashname ( lentry - > nameval , lentry - > namelen ) ;
} else {
rentry = ( struct xfs_attr_leaf_name_remote * )
( ( ( char * ) bp - > b_addr ) + nameidx ) ;
if ( rentry - > namelen < = 0 ) {
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-18 07:37:45 +03:00
goto out ;
}
calc_hash = xfs_da_hashname ( rentry - > name , rentry - > namelen ) ;
}
if ( calc_hash ! = hash )
2018-07-19 22:29:11 +03:00
xchk_da_set_corrupt ( ds , level ) ;
2017-10-18 07:37:45 +03:00
out :
return error ;
}
/* Scrub the extended attribute metadata. */
int
2018-07-19 22:29:11 +03:00
xchk_xattr (
2018-07-19 22:29:12 +03:00
struct xfs_scrub * sc )
2017-10-18 07:37:45 +03:00
{
2018-07-19 22:29:11 +03:00
struct xchk_xattr sx ;
2017-10-31 22:10:02 +03:00
xfs_dablk_t last_checked = - 1U ;
2017-10-18 07:37:45 +03:00
int error = 0 ;
if ( ! xfs_inode_hasattr ( sc - > ip ) )
return - ENOENT ;
memset ( & sx , 0 , sizeof ( sx ) ) ;
/* Check attribute tree structure */
2018-07-19 22:29:11 +03:00
error = xchk_da_btree ( sc , XFS_ATTR_FORK , xchk_xattr_rec ,
2017-10-31 22:10:02 +03:00
& last_checked ) ;
2017-10-18 07:37:45 +03:00
if ( error )
goto out ;
if ( sc - > sm - > sm_flags & XFS_SCRUB_OFLAG_CORRUPT )
goto out ;
/* Check that every attr key can also be looked up by hash. */
sx . context . dp = sc - > ip ;
sx . context . resynch = 1 ;
2018-07-19 22:29:11 +03:00
sx . context . put_listent = xchk_xattr_listent ;
2017-10-18 07:37:45 +03:00
sx . context . tp = sc - > tp ;
2020-02-27 04:30:29 +03:00
sx . context . allow_incomplete = true ;
2017-10-18 07:37:45 +03:00
sx . sc = sc ;
/*
* Look up every xattr in this file by name .
*
* Use the backend implementation of xfs_attr_list to call
2018-07-19 22:29:11 +03:00
* xchk_xattr_listent on every attribute key in this inode .
2017-10-18 07:37:45 +03:00
* In other words , we use the same iterator / callback mechanism
* that listattr uses to scrub extended attributes , though in our
* _listent function , we check the value of the attribute .
*
* The VFS only locks i_rwsem when modifying attrs , so keep all
* three locks held because that ' s the only way to ensure we ' re
* the only thread poking into the da btree . We traverse the da
* btree while holding a leaf buffer locked for the xattr name
* iteration , which doesn ' t really follow the usual buffer
* locking order .
*/
2020-02-27 04:30:39 +03:00
error = xfs_attr_list_ilocked ( & sx . context ) ;
2018-07-19 22:29:11 +03:00
if ( ! xchk_fblock_process_error ( sc , XFS_ATTR_FORK , 0 , & error ) )
2017-10-18 07:37:45 +03:00
goto out ;
2019-07-05 20:29:54 +03:00
/* Did our listent function try to return any errors? */
if ( sx . context . seen_enough < 0 )
error = sx . context . seen_enough ;
2017-10-18 07:37:45 +03:00
out :
return error ;
}