2023-08-10 07:48:01 -07:00
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright ( C ) 2022 - 2023 Oracle . All Rights Reserved .
* Author : Darrick J . Wong < djwong @ kernel . org >
*/
# include "xfs.h"
# include "xfs_fs.h"
# include "xfs_shared.h"
# include "xfs_format.h"
# include "xfs_trans_resv.h"
# include "xfs_mount.h"
# include "xfs_btree.h"
# include "xfs_log_format.h"
# include "xfs_trans.h"
# include "xfs_sb.h"
# include "xfs_inode.h"
# include "xfs_alloc.h"
# include "xfs_alloc_btree.h"
# include "xfs_ialloc.h"
# include "xfs_ialloc_btree.h"
# include "xfs_rmap.h"
# include "xfs_rmap_btree.h"
# include "xfs_refcount_btree.h"
# include "xfs_extent_busy.h"
# include "xfs_ag.h"
# include "xfs_ag_resv.h"
# include "xfs_quota.h"
# include "xfs_qm.h"
2023-08-10 07:48:02 -07:00
# include "xfs_bmap.h"
2023-08-10 07:48:04 -07:00
# include "xfs_da_format.h"
# include "xfs_da_btree.h"
# include "xfs_attr.h"
# include "xfs_attr_remote.h"
2023-12-06 18:41:00 -08:00
# include "xfs_defer.h"
2023-08-10 07:48:01 -07:00
# include "scrub/scrub.h"
# include "scrub/common.h"
# include "scrub/trace.h"
# include "scrub/repair.h"
# include "scrub/bitmap.h"
# include "scrub/reap.h"
/*
* Disposal of Blocks from Old Metadata
*
* Now that we ' ve constructed a new btree to replace the damaged one , we want
* to dispose of the blocks that ( we think ) the old btree was using .
* Previously , we used the rmapbt to collect the extents ( bitmap ) with the
* rmap owner corresponding to the tree we rebuilt , collected extents for any
* blocks with the same rmap owner that are owned by another data structure
* ( sublist ) , and subtracted sublist from bitmap . In theory the extents
* remaining in bitmap are the old btree ' s blocks .
*
* Unfortunately , it ' s possible that the btree was crosslinked with other
* blocks on disk . The rmap data can tell us if there are multiple owners , so
* if the rmapbt says there is an owner of this block other than @ oinfo , then
* the block is crosslinked . Remove the reverse mapping and continue .
*
* If there is one rmap record , we can free the block , which removes the
* reverse mapping but doesn ' t add the block to the free space . Our repair
* strategy is to hope the other metadata objects crosslinked on this block
* will be rebuilt ( atop different blocks ) , thereby removing all the cross
* links .
*
* If there are no rmap records at all , we also free the block . If the btree
* being rebuilt lives in the free space ( bnobt / cntbt / rmapbt ) then there isn ' t
* supposed to be a rmap record and everything is ok . For other btrees there
* had to have been an rmap entry for the block to have ended up on @ bitmap ,
* so if it ' s gone now there ' s something wrong and the fs will shut down .
*
* Note : If there are multiple rmap records with only the same rmap owner as
* the btree we ' re trying to rebuild and the block is indeed owned by another
* data structure with the same rmap owner , then the block will be in sublist
* and therefore doesn ' t need disposal . If there are multiple rmap records
* with only the same rmap owner but the block is not owned by something with
* the same rmap owner , the block will be freed .
*
* The caller is responsible for locking the AG headers for the entire rebuild
* operation so that nothing else can sneak in and change the AG state while
2023-08-10 07:48:02 -07:00
* we ' re not looking . We must also invalidate any buffers associated with
* @ bitmap .
2023-08-10 07:48:01 -07:00
*/
/* Information about reaping extents after a repair. */
2023-08-10 07:48:04 -07:00
struct xreap_state {
2023-08-10 07:48:01 -07:00
struct xfs_scrub * sc ;
/* Reverse mapping owner and metadata reservation type. */
const struct xfs_owner_info * oinfo ;
enum xfs_ag_resv_type resv ;
2023-08-10 07:48:02 -07:00
2023-08-10 07:48:04 -07:00
/* If true, roll the transaction before reaping the next extent. */
bool force_roll ;
2023-08-10 07:48:02 -07:00
/* Number of deferred reaps attached to the current transaction. */
unsigned int deferred ;
2023-08-10 07:48:04 -07:00
/* Number of invalidated buffers logged to the current transaction. */
unsigned int invalidated ;
/* Number of deferred reaps queued during the whole reap sequence. */
unsigned long long total_deferred ;
2023-08-10 07:48:01 -07:00
} ;
2023-08-10 07:48:02 -07:00
/* Put a block back on the AGFL. */
2023-08-10 07:48:01 -07:00
STATIC int
2023-08-10 07:48:04 -07:00
xreap_put_freelist (
2023-08-10 07:48:01 -07:00
struct xfs_scrub * sc ,
xfs_agblock_t agbno )
{
struct xfs_buf * agfl_bp ;
int error ;
/* Make sure there's space on the freelist. */
error = xrep_fix_freelist ( sc , true ) ;
if ( error )
return error ;
/*
* Since we ' re " freeing " a lost block onto the AGFL , we have to
* create an rmap for the block prior to merging it or else other
* parts will break .
*/
error = xfs_rmap_alloc ( sc - > tp , sc - > sa . agf_bp , sc - > sa . pag , agbno , 1 ,
& XFS_RMAP_OINFO_AG ) ;
if ( error )
return error ;
/* Put the block on the AGFL. */
error = xfs_alloc_read_agfl ( sc - > sa . pag , sc - > tp , & agfl_bp ) ;
if ( error )
return error ;
error = xfs_alloc_put_freelist ( sc - > sa . pag , sc - > tp , sc - > sa . agf_bp ,
agfl_bp , agbno , 0 ) ;
if ( error )
return error ;
xfs_extent_busy_insert ( sc - > tp , sc - > sa . pag , agbno , 1 ,
XFS_EXTENT_BUSY_SKIP_DISCARD ) ;
return 0 ;
}
2023-08-10 07:48:04 -07:00
/* Are there any uncommitted reap operations? */
static inline bool xreap_dirty ( const struct xreap_state * rs )
{
if ( rs - > force_roll )
return true ;
if ( rs - > deferred )
return true ;
if ( rs - > invalidated )
return true ;
if ( rs - > total_deferred )
return true ;
return false ;
}
# define XREAP_MAX_BINVAL (2048)
/*
* Decide if we want to roll the transaction after reaping an extent . We don ' t
* want to overrun the transaction reservation , so we prohibit more than
* 128 EFIs per transaction . For the same reason , we limit the number
* of buffer invalidations to 2048.
*/
static inline bool xreap_want_roll ( const struct xreap_state * rs )
{
if ( rs - > force_roll )
return true ;
if ( rs - > deferred > XREP_MAX_ITRUNCATE_EFIS )
return true ;
if ( rs - > invalidated > XREAP_MAX_BINVAL )
return true ;
return false ;
}
static inline void xreap_reset ( struct xreap_state * rs )
{
rs - > total_deferred + = rs - > deferred ;
rs - > deferred = 0 ;
rs - > invalidated = 0 ;
rs - > force_roll = false ;
}
# define XREAP_MAX_DEFER_CHAIN (2048)
/*
* Decide if we want to finish the deferred ops that are attached to the scrub
* transaction . We don ' t want to queue huge chains of deferred ops because
* that can consume a lot of log space and kernel memory . Hence we trigger a
* xfs_defer_finish if there are more than 2048 deferred reap operations or the
* caller did some real work .
*/
static inline bool
xreap_want_defer_finish ( const struct xreap_state * rs )
{
if ( rs - > force_roll )
return true ;
if ( rs - > total_deferred > XREAP_MAX_DEFER_CHAIN )
return true ;
return false ;
}
static inline void xreap_defer_finish_reset ( struct xreap_state * rs )
{
rs - > total_deferred = 0 ;
rs - > deferred = 0 ;
rs - > invalidated = 0 ;
rs - > force_roll = false ;
}
/* Try to invalidate the incore buffers for an extent that we're freeing. */
2023-08-10 07:48:02 -07:00
STATIC void
2023-08-10 07:48:04 -07:00
xreap_agextent_binval (
struct xreap_state * rs ,
xfs_agblock_t agbno ,
xfs_extlen_t * aglenp )
2023-08-10 07:48:02 -07:00
{
2023-08-10 07:48:04 -07:00
struct xfs_scrub * sc = rs - > sc ;
struct xfs_perag * pag = sc - > sa . pag ;
struct xfs_mount * mp = sc - > mp ;
xfs_agnumber_t agno = sc - > sa . pag - > pag_agno ;
xfs_agblock_t agbno_next = agbno + * aglenp ;
xfs_agblock_t bno = agbno ;
2023-08-10 07:48:02 -07:00
/*
* Avoid invalidating AG headers and post - EOFS blocks because we never
* own those .
*/
2023-08-10 07:48:04 -07:00
if ( ! xfs_verify_agbno ( pag , agbno ) | |
! xfs_verify_agbno ( pag , agbno_next - 1 ) )
2023-08-10 07:48:02 -07:00
return ;
/*
2023-08-10 07:48:04 -07:00
* If there are incore buffers for these blocks , invalidate them . We
* assume that the lack of any other known owners means that the buffer
* can be locked without risk of deadlocking . The buffer cache cannot
* detect aliasing , so employ nested loops to scan for incore buffers
* of any plausible size .
2023-08-10 07:48:02 -07:00
*/
2023-08-10 07:48:04 -07:00
while ( bno < agbno_next ) {
xfs_agblock_t fsbcount ;
xfs_agblock_t max_fsbs ;
/*
* Max buffer size is the max remote xattr buffer size , which
* is one fs block larger than 64 k .
*/
max_fsbs = min_t ( xfs_agblock_t , agbno_next - bno ,
xfs_attr3_rmt_blocks ( mp , XFS_XATTR_SIZE_MAX ) ) ;
xfs: fix an off-by-one error in xreap_agextent_binval
Overall, this function tries to find and invalidate all buffers for a
given extent of space on the data device. The inner for loop in this
function tries to find all xfs_bufs for a given daddr. The lengths of
all possible cached buffers range from 1 fsblock to the largest needed
to contain a 64k xattr value (~17fsb). The scan is capped to avoid
looking at anything buffer going past the given extent.
Unfortunately, the loop continuation test is wrong -- max_fsbs is the
largest size we want to scan, not one past that. Put another way, this
loop is actually 1-indexed, not 0-indexed. Therefore, the continuation
test should use <=, not <.
As a result, online repairs of btree blocks fails to stale any buffers
for btrees that are being torn down, which causes later assertions in
the buffer cache when another thread creates a different-sized buffer.
This happens in xfs/709 when allocating an inode cluster buffer:
------------[ cut here ]------------
WARNING: CPU: 0 PID: 3346128 at fs/xfs/xfs_message.c:104 assfail+0x3a/0x40 [xfs]
CPU: 0 PID: 3346128 Comm: fsstress Not tainted 6.7.0-rc4-djwx #rc4
RIP: 0010:assfail+0x3a/0x40 [xfs]
Call Trace:
<TASK>
_xfs_buf_obj_cmp+0x4a/0x50
xfs_buf_get_map+0x191/0xba0
xfs_trans_get_buf_map+0x136/0x280
xfs_ialloc_inode_init+0x186/0x340
xfs_ialloc_ag_alloc+0x254/0x720
xfs_dialloc+0x21f/0x870
xfs_create_tmpfile+0x1a9/0x2f0
xfs_rename+0x369/0xfd0
xfs_vn_rename+0xfa/0x170
vfs_rename+0x5fb/0xc30
do_renameat2+0x52d/0x6e0
__x64_sys_renameat2+0x4b/0x60
do_syscall_64+0x3b/0xe0
entry_SYSCALL_64_after_hwframe+0x46/0x4e
A later refactoring patch in the online repair series fixed this by
accident, which is why I didn't notice this until I started testing only
the patches that are likely to end up in 6.8.
Fixes: 1c7ce115e521 ("xfs: reap large AG metadata extents when possible")
Signed-off-by: "Darrick J. Wong" <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
2023-12-14 13:38:45 -08:00
for ( fsbcount = 1 ; fsbcount < = max_fsbs ; fsbcount + + ) {
2023-08-10 07:48:04 -07:00
struct xfs_buf * bp = NULL ;
xfs_daddr_t daddr ;
int error ;
daddr = XFS_AGB_TO_DADDR ( mp , agno , bno ) ;
error = xfs_buf_incore ( mp - > m_ddev_targp , daddr ,
XFS_FSB_TO_BB ( mp , fsbcount ) ,
XBF_LIVESCAN , & bp ) ;
if ( error )
continue ;
xfs_trans_bjoin ( sc - > tp , bp ) ;
xfs_trans_binval ( sc - > tp , bp ) ;
rs - > invalidated + + ;
/*
* Stop invalidating if we ' ve hit the limit ; we should
* still have enough reservation left to free however
* far we ' ve gotten .
*/
if ( rs - > invalidated > XREAP_MAX_BINVAL ) {
* aglenp - = agbno_next - bno ;
goto out ;
}
}
bno + + ;
}
2023-08-10 07:48:02 -07:00
2023-08-10 07:48:04 -07:00
out :
trace_xreap_agextent_binval ( sc - > sa . pag , agbno , * aglenp ) ;
2023-08-10 07:48:02 -07:00
}
2023-08-10 07:48:04 -07:00
/*
* Figure out the longest run of blocks that we can dispose of with a single
* call . Cross - linked blocks should have their reverse mappings removed , but
* single - owner extents can be freed . AGFL blocks can only be put back one at
* a time .
*/
2023-08-10 07:48:01 -07:00
STATIC int
2023-08-10 07:48:04 -07:00
xreap_agextent_select (
struct xreap_state * rs ,
xfs_agblock_t agbno ,
xfs_agblock_t agbno_next ,
bool * crosslinked ,
xfs_extlen_t * aglenp )
2023-08-10 07:48:01 -07:00
{
2023-08-10 07:48:04 -07:00
struct xfs_scrub * sc = rs - > sc ;
struct xfs_btree_cur * cur ;
xfs_agblock_t bno = agbno + 1 ;
xfs_extlen_t len = 1 ;
int error ;
2023-08-10 07:48:01 -07:00
2023-08-10 07:48:04 -07:00
/*
* Determine if there are any other rmap records covering the first
* block of this extent . If so , the block is crosslinked .
*/
cur = xfs_rmapbt_init_cursor ( sc - > mp , sc - > tp , sc - > sa . agf_bp ,
sc - > sa . pag ) ;
error = xfs_rmap_has_other_keys ( cur , agbno , 1 , rs - > oinfo ,
crosslinked ) ;
if ( error )
goto out_cur ;
2023-08-10 07:48:01 -07:00
2023-08-10 07:48:04 -07:00
/* AGFL blocks can only be deal with one at a time. */
if ( rs - > resv = = XFS_AG_RESV_AGFL )
goto out_found ;
2023-08-10 07:48:02 -07:00
2023-08-10 07:48:04 -07:00
/*
* Figure out how many of the subsequent blocks have the same crosslink
* status .
*/
while ( bno < agbno_next ) {
bool also_crosslinked ;
2023-08-10 07:48:01 -07:00
2023-08-10 07:48:04 -07:00
error = xfs_rmap_has_other_keys ( cur , bno , 1 , rs - > oinfo ,
& also_crosslinked ) ;
if ( error )
goto out_cur ;
if ( * crosslinked ! = also_crosslinked )
break ;
len + + ;
bno + + ;
}
out_found :
* aglenp = len ;
trace_xreap_agextent_select ( sc - > sa . pag , agbno , len , * crosslinked ) ;
out_cur :
2023-08-10 07:48:01 -07:00
xfs_btree_del_cursor ( cur , error ) ;
2023-08-10 07:48:04 -07:00
return error ;
}
/*
* Dispose of as much of the beginning of this AG extent as possible . The
* number of blocks disposed of will be returned in @ aglenp .
*/
STATIC int
xreap_agextent_iter (
struct xreap_state * rs ,
xfs_agblock_t agbno ,
xfs_extlen_t * aglenp ,
bool crosslinked )
{
struct xfs_scrub * sc = rs - > sc ;
xfs_fsblock_t fsbno ;
int error = 0 ;
fsbno = XFS_AGB_TO_FSB ( sc - > mp , sc - > sa . pag - > pag_agno , agbno ) ;
2023-08-10 07:48:01 -07:00
/*
* If there are other rmappings , this block is cross linked and must
* not be freed . Remove the reverse mapping and move on . Otherwise ,
* we were the only owner of the block , so free the extent , which will
* also remove the rmap .
*
* XXX : XFS doesn ' t support detecting the case where a single block
* metadata structure is crosslinked with a multi - block structure
* because the buffer cache doesn ' t detect aliasing problems , so we
* can ' t fix 100 % of crosslinking problems ( yet ) . The verifiers will
* blow on writeout , the filesystem will shut down , and the admin gets
* to run xfs_repair .
*/
2023-08-10 07:48:04 -07:00
if ( crosslinked ) {
trace_xreap_dispose_unmap_extent ( sc - > sa . pag , agbno , * aglenp ) ;
rs - > force_roll = true ;
return xfs_rmap_free ( sc - > tp , sc - > sa . agf_bp , sc - > sa . pag , agbno ,
* aglenp , rs - > oinfo ) ;
}
trace_xreap_dispose_free_extent ( sc - > sa . pag , agbno , * aglenp ) ;
2023-08-10 07:48:03 -07:00
2023-08-10 07:48:04 -07:00
/*
* Invalidate as many buffers as we can , starting at agbno . If this
* function sets * aglenp to zero , the transaction is full of logged
* buffer invalidations , so we need to return early so that we can
* roll and retry .
*/
xreap_agextent_binval ( rs , agbno , aglenp ) ;
if ( * aglenp = = 0 ) {
ASSERT ( xreap_want_roll ( rs ) ) ;
return 0 ;
}
/* Put blocks back on the AGFL one at a time. */
if ( rs - > resv = = XFS_AG_RESV_AGFL ) {
ASSERT ( * aglenp = = 1 ) ;
error = xreap_put_freelist ( sc , agbno ) ;
2023-08-10 07:48:03 -07:00
if ( error )
return error ;
2023-08-10 07:48:04 -07:00
rs - > force_roll = true ;
return 0 ;
}
/*
* Use deferred frees to get rid of the old btree blocks to try to
* minimize the window in which we could crash and lose the old blocks .
2023-12-06 18:41:00 -08:00
* Add a defer ops barrier every other extent to avoid stressing the
* system with large EFIs .
2023-08-10 07:48:04 -07:00
*/
2023-12-06 18:40:57 -08:00
error = xfs_free_extent_later ( sc - > tp , fsbno , * aglenp , rs - > oinfo ,
2023-08-10 07:48:04 -07:00
rs - > resv , true ) ;
if ( error )
return error ;
rs - > deferred + + ;
2023-12-06 18:41:00 -08:00
if ( rs - > deferred % 2 = = 0 )
xfs_defer_add_barrier ( sc - > tp ) ;
2023-08-10 07:48:04 -07:00
return 0 ;
}
/*
* Break an AG metadata extent into sub - extents by fate ( crosslinked , not
* crosslinked ) , and dispose of each sub - extent separately .
*/
STATIC int
xreap_agmeta_extent (
2023-12-15 10:03:30 -08:00
uint32_t agbno ,
uint32_t len ,
2023-08-10 07:48:04 -07:00
void * priv )
{
struct xreap_state * rs = priv ;
struct xfs_scrub * sc = rs - > sc ;
xfs_agblock_t agbno_next = agbno + len ;
int error = 0 ;
ASSERT ( len < = XFS_MAX_BMBT_EXTLEN ) ;
ASSERT ( sc - > ip = = NULL ) ;
while ( agbno < agbno_next ) {
xfs_extlen_t aglen ;
bool crosslinked ;
2023-08-10 07:48:03 -07:00
2023-08-10 07:48:04 -07:00
error = xreap_agextent_select ( rs , agbno , agbno_next ,
& crosslinked , & aglen ) ;
if ( error )
return error ;
2023-08-10 07:48:03 -07:00
2023-08-10 07:48:04 -07:00
error = xreap_agextent_iter ( rs , agbno , & aglen , crosslinked ) ;
2023-08-10 07:48:02 -07:00
if ( error )
return error ;
2023-08-10 07:48:04 -07:00
if ( xreap_want_defer_finish ( rs ) ) {
error = xrep_defer_finish ( sc ) ;
if ( error )
return error ;
xreap_defer_finish_reset ( rs ) ;
} else if ( xreap_want_roll ( rs ) ) {
error = xrep_roll_ag_trans ( sc ) ;
if ( error )
return error ;
xreap_reset ( rs ) ;
}
agbno + = aglen ;
2023-08-10 07:48:02 -07:00
}
2023-08-10 07:48:01 -07:00
2023-08-10 07:48:04 -07:00
return 0 ;
2023-08-10 07:48:01 -07:00
}
2023-08-10 07:48:04 -07:00
/* Dispose of every block of every AG metadata extent in the bitmap. */
2023-08-10 07:48:01 -07:00
int
2023-08-10 07:48:04 -07:00
xrep_reap_agblocks (
2023-08-10 07:48:01 -07:00
struct xfs_scrub * sc ,
2023-08-10 07:48:04 -07:00
struct xagb_bitmap * bitmap ,
2023-08-10 07:48:01 -07:00
const struct xfs_owner_info * oinfo ,
enum xfs_ag_resv_type type )
{
2023-08-10 07:48:04 -07:00
struct xreap_state rs = {
2023-08-10 07:48:01 -07:00
. sc = sc ,
. oinfo = oinfo ,
. resv = type ,
} ;
2023-08-10 07:48:02 -07:00
int error ;
2023-08-10 07:48:01 -07:00
ASSERT ( xfs_has_rmapbt ( sc - > mp ) ) ;
2023-08-10 07:48:04 -07:00
ASSERT ( sc - > ip = = NULL ) ;
2023-08-10 07:48:01 -07:00
2023-08-10 07:48:04 -07:00
error = xagb_bitmap_walk ( bitmap , xreap_agmeta_extent , & rs ) ;
2023-08-10 07:48:04 -07:00
if ( error )
2023-08-10 07:48:02 -07:00
return error ;
2023-08-10 07:48:04 -07:00
if ( xreap_dirty ( & rs ) )
return xrep_defer_finish ( sc ) ;
return 0 ;
2023-08-10 07:48:01 -07:00
}