2023-04-12 04:59:56 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2018-07-30 08:37:09 +03:00
/*
2023-04-12 04:59:57 +03:00
* Copyright ( C ) 2018 - 2023 Oracle . All Rights Reserved .
2023-04-12 04:59:56 +03:00
* Author : Darrick J . Wong < djwong @ kernel . org >
2018-07-30 08:37:09 +03:00
*/
# include "xfs.h"
# include "xfs_fs.h"
# include "xfs_shared.h"
# include "xfs_format.h"
# include "xfs_trans_resv.h"
# include "xfs_mount.h"
2018-08-10 08:43:02 +03:00
# include "xfs_btree.h"
2022-11-07 04:03:16 +03:00
# include "scrub/scrub.h"
2018-07-30 08:37:09 +03:00
# include "scrub/bitmap.h"
2018-07-30 21:18:13 +03:00
/*
* Set a range of this bitmap . Caller must ensure the range is not set .
*
* This is the logical equivalent of bitmap | = mask ( start , len ) .
*/
2018-07-30 08:37:09 +03:00
int
2020-03-17 03:13:05 +03:00
xbitmap_set (
struct xbitmap * bitmap ,
2018-07-30 21:18:13 +03:00
uint64_t start ,
uint64_t len )
2018-07-30 08:37:09 +03:00
{
2020-03-17 03:13:05 +03:00
struct xbitmap_range * bmr ;
2018-07-30 08:37:09 +03:00
2022-11-07 04:03:16 +03:00
bmr = kmalloc ( sizeof ( struct xbitmap_range ) , XCHK_GFP_FLAGS ) ;
2018-07-30 21:18:13 +03:00
if ( ! bmr )
2018-07-30 08:37:09 +03:00
return - ENOMEM ;
2018-07-30 21:18:13 +03:00
INIT_LIST_HEAD ( & bmr - > list ) ;
bmr - > start = start ;
bmr - > len = len ;
list_add_tail ( & bmr - > list , & bitmap - > list ) ;
2018-07-30 08:37:09 +03:00
return 0 ;
}
2018-07-30 21:18:13 +03:00
/* Free everything related to this bitmap. */
2018-07-30 08:37:09 +03:00
void
2020-03-17 03:13:05 +03:00
xbitmap_destroy (
struct xbitmap * bitmap )
2018-07-30 08:37:09 +03:00
{
2020-03-17 03:13:05 +03:00
struct xbitmap_range * bmr ;
struct xbitmap_range * n ;
2018-07-30 08:37:09 +03:00
2020-03-17 03:13:05 +03:00
for_each_xbitmap_extent ( bmr , n , bitmap ) {
2018-07-30 21:18:13 +03:00
list_del ( & bmr - > list ) ;
2022-11-07 04:03:16 +03:00
kfree ( bmr ) ;
2018-07-30 08:37:09 +03:00
}
}
2018-07-30 21:18:13 +03:00
/* Set up a per-AG block bitmap. */
void
2020-03-17 03:13:05 +03:00
xbitmap_init (
struct xbitmap * bitmap )
2018-07-30 21:18:13 +03:00
{
INIT_LIST_HEAD ( & bitmap - > list ) ;
}
2018-07-30 08:37:09 +03:00
/* Compare two btree extents. */
static int
2020-03-17 03:13:05 +03:00
xbitmap_range_cmp (
2018-07-30 08:37:09 +03:00
void * priv ,
2021-04-08 21:28:34 +03:00
const struct list_head * a ,
const struct list_head * b )
2018-07-30 08:37:09 +03:00
{
2020-03-17 03:13:05 +03:00
struct xbitmap_range * ap ;
struct xbitmap_range * bp ;
2018-07-30 08:37:09 +03:00
2020-03-17 03:13:05 +03:00
ap = container_of ( a , struct xbitmap_range , list ) ;
bp = container_of ( b , struct xbitmap_range , list ) ;
2018-07-30 08:37:09 +03:00
2018-07-30 21:18:13 +03:00
if ( ap - > start > bp - > start )
2018-07-30 08:37:09 +03:00
return 1 ;
2018-07-30 21:18:13 +03:00
if ( ap - > start < bp - > start )
2018-07-30 08:37:09 +03:00
return - 1 ;
return 0 ;
}
/*
2018-07-30 21:18:13 +03:00
* Remove all the blocks mentioned in @ sub from the extents in @ bitmap .
2018-07-30 08:37:09 +03:00
*
* The intent is that callers will iterate the rmapbt for all of its records
2018-07-30 21:18:13 +03:00
* for a given owner to generate @ bitmap ; and iterate all the blocks of the
2018-07-30 08:37:09 +03:00
* metadata structures that are not being rebuilt and have the same rmapbt
2018-07-30 21:18:13 +03:00
* owner to generate @ sub . This routine subtracts all the extents
* mentioned in sub from all the extents linked in @ bitmap , which leaves
* @ bitmap as the list of blocks that are not accounted for , which we assume
2018-07-30 08:37:09 +03:00
* are the dead blocks of the old metadata structure . The blocks mentioned in
2018-07-30 21:18:13 +03:00
* @ bitmap can be reaped .
*
* This is the logical equivalent of bitmap & = ~ sub .
2018-07-30 08:37:09 +03:00
*/
# define LEFT_ALIGNED (1 << 0)
# define RIGHT_ALIGNED (1 << 1)
int
2020-03-17 03:13:05 +03:00
xbitmap_disunion (
struct xbitmap * bitmap ,
struct xbitmap * sub )
2018-07-30 08:37:09 +03:00
{
struct list_head * lp ;
2020-03-17 03:13:05 +03:00
struct xbitmap_range * br ;
struct xbitmap_range * new_br ;
struct xbitmap_range * sub_br ;
2018-07-30 21:18:13 +03:00
uint64_t sub_start ;
uint64_t sub_len ;
2018-07-30 08:37:09 +03:00
int state ;
int error = 0 ;
2018-07-30 21:18:13 +03:00
if ( list_empty ( & bitmap - > list ) | | list_empty ( & sub - > list ) )
2018-07-30 08:37:09 +03:00
return 0 ;
2018-07-30 21:18:13 +03:00
ASSERT ( ! list_empty ( & sub - > list ) ) ;
2018-07-30 08:37:09 +03:00
2020-03-17 03:13:05 +03:00
list_sort ( NULL , & bitmap - > list , xbitmap_range_cmp ) ;
list_sort ( NULL , & sub - > list , xbitmap_range_cmp ) ;
2018-07-30 08:37:09 +03:00
/*
2018-07-30 21:18:13 +03:00
* Now that we ' ve sorted both lists , we iterate bitmap once , rolling
* forward through sub and / or bitmap as necessary until we find an
2018-07-30 08:37:09 +03:00
* overlap or reach the end of either list . We do not reset lp to the
2018-07-30 21:18:13 +03:00
* head of bitmap nor do we reset sub_br to the head of sub . The
2018-07-30 08:37:09 +03:00
* list traversal is similar to merge sort , but we ' re deleting
* instead . In this manner we avoid O ( n ^ 2 ) operations .
*/
2020-03-17 03:13:05 +03:00
sub_br = list_first_entry ( & sub - > list , struct xbitmap_range ,
2018-07-30 08:37:09 +03:00
list ) ;
2018-07-30 21:18:13 +03:00
lp = bitmap - > list . next ;
while ( lp ! = & bitmap - > list ) {
2020-03-17 03:13:05 +03:00
br = list_entry ( lp , struct xbitmap_range , list ) ;
2018-07-30 08:37:09 +03:00
/*
2018-07-30 21:18:13 +03:00
* Advance sub_br and / or br until we find a pair that
2018-07-30 08:37:09 +03:00
* intersect or we run out of extents .
*/
2018-07-30 21:18:13 +03:00
while ( sub_br - > start + sub_br - > len < = br - > start ) {
if ( list_is_last ( & sub_br - > list , & sub - > list ) )
2018-07-30 08:37:09 +03:00
goto out ;
2018-07-30 21:18:13 +03:00
sub_br = list_next_entry ( sub_br , list ) ;
2018-07-30 08:37:09 +03:00
}
2018-07-30 21:18:13 +03:00
if ( sub_br - > start > = br - > start + br - > len ) {
2018-07-30 08:37:09 +03:00
lp = lp - > next ;
continue ;
}
2018-07-30 21:18:13 +03:00
/* trim sub_br to fit the extent we have */
sub_start = sub_br - > start ;
sub_len = sub_br - > len ;
if ( sub_br - > start < br - > start ) {
sub_len - = br - > start - sub_br - > start ;
sub_start = br - > start ;
2018-07-30 08:37:09 +03:00
}
2018-07-30 21:18:13 +03:00
if ( sub_len > br - > len )
sub_len = br - > len ;
2018-07-30 08:37:09 +03:00
state = 0 ;
2018-07-30 21:18:13 +03:00
if ( sub_start = = br - > start )
2018-07-30 08:37:09 +03:00
state | = LEFT_ALIGNED ;
2018-07-30 21:18:13 +03:00
if ( sub_start + sub_len = = br - > start + br - > len )
2018-07-30 08:37:09 +03:00
state | = RIGHT_ALIGNED ;
switch ( state ) {
case LEFT_ALIGNED :
/* Coincides with only the left. */
2018-07-30 21:18:13 +03:00
br - > start + = sub_len ;
br - > len - = sub_len ;
2018-07-30 08:37:09 +03:00
break ;
case RIGHT_ALIGNED :
/* Coincides with only the right. */
2018-07-30 21:18:13 +03:00
br - > len - = sub_len ;
2018-07-30 08:37:09 +03:00
lp = lp - > next ;
break ;
case LEFT_ALIGNED | RIGHT_ALIGNED :
/* Total overlap, just delete ex. */
lp = lp - > next ;
2018-07-30 21:18:13 +03:00
list_del ( & br - > list ) ;
2022-11-07 04:03:16 +03:00
kfree ( br ) ;
2018-07-30 08:37:09 +03:00
break ;
case 0 :
/*
* Deleting from the middle : add the new right extent
* and then shrink the left extent .
*/
2022-11-07 04:03:16 +03:00
new_br = kmalloc ( sizeof ( struct xbitmap_range ) ,
XCHK_GFP_FLAGS ) ;
2018-07-30 21:18:13 +03:00
if ( ! new_br ) {
2018-07-30 08:37:09 +03:00
error = - ENOMEM ;
goto out ;
}
2018-07-30 21:18:13 +03:00
INIT_LIST_HEAD ( & new_br - > list ) ;
new_br - > start = sub_start + sub_len ;
new_br - > len = br - > start + br - > len - new_br - > start ;
list_add ( & new_br - > list , & br - > list ) ;
br - > len = sub_start - br - > start ;
2018-07-30 08:37:09 +03:00
lp = lp - > next ;
break ;
default :
ASSERT ( 0 ) ;
break ;
}
}
out :
return error ;
}
# undef LEFT_ALIGNED
# undef RIGHT_ALIGNED
2018-08-10 08:43:02 +03:00
/*
* Record all btree blocks seen while iterating all records of a btree .
*
* We know that the btree query_all function starts at the left edge and walks
* towards the right edge of the tree . Therefore , we know that we can walk up
* the btree cursor towards the root ; if the pointer for a given level points
* to the first record / key in that block , we haven ' t seen this block before ;
* and therefore we need to remember that we saw this block in the btree .
*
* So if our btree is :
*
* 4
* / | \
* 1 2 3
*
* Pretend for this example that each leaf block has 100 btree records . For
2021-09-16 22:24:04 +03:00
* the first btree record , we ' ll observe that bc_levels [ 0 ] . ptr = = 1 , so we
* record that we saw block 1. Then we observe that bc_levels [ 1 ] . ptr = = 1 , so
* we record block 4. The list is [ 1 , 4 ] .
2018-08-10 08:43:02 +03:00
*
2021-09-16 22:24:04 +03:00
* For the second btree record , we see that bc_levels [ 0 ] . ptr = = 2 , so we exit
* the loop . The list remains [ 1 , 4 ] .
2018-08-10 08:43:02 +03:00
*
* For the 101 st btree record , we ' ve moved onto leaf block 2. Now
2021-09-16 22:24:04 +03:00
* bc_levels [ 0 ] . ptr = = 1 again , so we record that we saw block 2. We see that
* bc_levels [ 1 ] . ptr = = 2 , so we exit the loop . The list is now [ 1 , 4 , 2 ] .
2018-08-10 08:43:02 +03:00
*
2021-09-16 22:24:04 +03:00
* For the 102 nd record , bc_levels [ 0 ] . ptr = = 2 , so we continue .
2018-08-10 08:43:02 +03:00
*
2021-09-16 22:24:04 +03:00
* For the 201 st record , we ' ve moved on to leaf block 3.
* bc_levels [ 0 ] . ptr = = 1 , so we add 3 to the list . Now it is [ 1 , 4 , 2 , 3 ] .
2018-08-10 08:43:02 +03:00
*
* For the 300 th record we just exit , with the list being [ 1 , 4 , 2 , 3 ] .
*/
/*
* Record all the buffers pointed to by the btree cursor . Callers already
* engaged in a btree walk should call this function to capture the list of
* blocks going from the leaf towards the root .
*/
int
2020-03-17 03:13:05 +03:00
xbitmap_set_btcur_path (
struct xbitmap * bitmap ,
2018-08-10 08:43:02 +03:00
struct xfs_btree_cur * cur )
{
struct xfs_buf * bp ;
xfs_fsblock_t fsb ;
int i ;
int error ;
2021-09-16 22:24:04 +03:00
for ( i = 0 ; i < cur - > bc_nlevels & & cur - > bc_levels [ i ] . ptr = = 1 ; i + + ) {
2018-08-10 08:43:02 +03:00
xfs_btree_get_block ( cur , i , & bp ) ;
if ( ! bp )
continue ;
2021-08-19 04:47:05 +03:00
fsb = XFS_DADDR_TO_FSB ( cur - > bc_mp , xfs_buf_daddr ( bp ) ) ;
2020-03-17 03:13:05 +03:00
error = xbitmap_set ( bitmap , fsb , 1 ) ;
2018-08-10 08:43:02 +03:00
if ( error )
return error ;
}
return 0 ;
}
/* Collect a btree's block in the bitmap. */
STATIC int
2020-03-17 03:13:05 +03:00
xbitmap_collect_btblock (
2018-08-10 08:43:02 +03:00
struct xfs_btree_cur * cur ,
int level ,
void * priv )
{
2020-03-17 03:13:05 +03:00
struct xbitmap * bitmap = priv ;
2018-08-10 08:43:02 +03:00
struct xfs_buf * bp ;
xfs_fsblock_t fsbno ;
xfs_btree_get_block ( cur , level , & bp ) ;
if ( ! bp )
return 0 ;
2021-08-19 04:47:05 +03:00
fsbno = XFS_DADDR_TO_FSB ( cur - > bc_mp , xfs_buf_daddr ( bp ) ) ;
2020-03-17 03:13:05 +03:00
return xbitmap_set ( bitmap , fsbno , 1 ) ;
2018-08-10 08:43:02 +03:00
}
/* Walk the btree and mark the bitmap wherever a btree block is found. */
int
2020-03-17 03:13:05 +03:00
xbitmap_set_btblocks (
struct xbitmap * bitmap ,
2018-08-10 08:43:02 +03:00
struct xfs_btree_cur * cur )
{
2020-03-17 03:13:05 +03:00
return xfs_btree_visit_blocks ( cur , xbitmap_collect_btblock ,
2019-10-29 02:12:35 +03:00
XFS_BTREE_VISIT_ALL , bitmap ) ;
2018-08-10 08:43:02 +03:00
}
2020-03-17 03:16:35 +03:00
/* How many bits are set in this bitmap? */
uint64_t
xbitmap_hweight (
struct xbitmap * bitmap )
{
struct xbitmap_range * bmr ;
struct xbitmap_range * n ;
uint64_t ret = 0 ;
for_each_xbitmap_extent ( bmr , n , bitmap )
ret + = bmr - > len ;
return ret ;
}