2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
2020-12-17 15:08:58 -05:00
# include "bkey_buf.h"
2017-03-16 22:18:50 -08:00
# include "btree_update.h"
2023-04-16 21:49:12 -04:00
# include "buckets.h"
2022-03-29 15:48:45 -04:00
# include "darray.h"
2017-03-16 22:18:50 -08:00
# include "dirent.h"
# include "error.h"
2019-10-02 18:35:36 -04:00
# include "fs-common.h"
2017-03-16 22:18:50 -08:00
# include "fsck.h"
# include "inode.h"
# include "keylist.h"
2021-03-16 00:42:25 -04:00
# include "subvolume.h"
2017-03-16 22:18:50 -08:00
# include "super.h"
# include "xattr.h"
2021-04-21 21:08:49 -04:00
# include <linux/bsearch.h>
2017-03-16 22:18:50 -08:00
# include <linux/dcache.h> /* struct qstr */
# define QSTR(n) { { { .len = strlen(n) } }, .name = n }
2022-08-18 13:00:26 -04:00
/*
* XXX : this is handling transaction restarts without returning
* - BCH_ERR_transaction_restart_nested , this is not how we do things anymore :
*/
2021-04-20 00:15:44 -04:00
static s64 bch2_count_inode_sectors ( struct btree_trans * trans , u64 inum ,
u32 snapshot )
2019-03-25 15:10:15 -04:00
{
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2019-03-25 15:10:15 -04:00
struct bkey_s_c k ;
u64 sectors = 0 ;
2019-04-17 15:49:28 -04:00
int ret ;
2019-03-25 15:10:15 -04:00
2022-10-11 04:32:41 -04:00
for_each_btree_key_upto ( trans , iter , BTREE_ID_extents ,
SPOS ( inum , 0 , snapshot ) ,
POS ( inum , U64_MAX ) ,
0 , k , ret )
2019-03-25 15:10:15 -04:00
if ( bkey_extent_is_allocation ( k . k ) )
sectors + = k . k - > size ;
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2019-04-17 15:49:28 -04:00
return ret ? : sectors ;
2019-03-25 15:10:15 -04:00
}
2021-04-20 00:15:44 -04:00
static s64 bch2_count_subdirs ( struct btree_trans * trans , u64 inum ,
u32 snapshot )
{
struct btree_iter iter ;
struct bkey_s_c k ;
struct bkey_s_c_dirent d ;
u64 subdirs = 0 ;
int ret ;
2022-10-11 04:32:41 -04:00
for_each_btree_key_upto ( trans , iter , BTREE_ID_dirents ,
SPOS ( inum , 0 , snapshot ) ,
POS ( inum , U64_MAX ) ,
0 , k , ret ) {
2021-04-20 00:15:44 -04:00
if ( k . k - > type ! = KEY_TYPE_dirent )
continue ;
d = bkey_s_c_to_dirent ( k ) ;
if ( d . v - > d_type = = DT_DIR )
subdirs + + ;
}
bch2_trans_iter_exit ( trans , & iter ) ;
return ret ? : subdirs ;
}
2021-04-19 23:31:40 -04:00
static int __snapshot_lookup_subvol ( struct btree_trans * trans , u32 snapshot ,
u32 * subvol )
{
2023-04-29 19:33:09 -04:00
struct bch_snapshot s ;
int ret = bch2_bkey_get_val_typed ( trans , BTREE_ID_snapshots ,
POS ( 0 , snapshot ) , 0 ,
snapshot , & s ) ;
if ( ! ret )
* subvol = le32_to_cpu ( s . subvol ) ;
2023-05-27 19:59:59 -04:00
else if ( bch2_err_matches ( ret , ENOENT ) )
2021-04-19 23:31:40 -04:00
bch_err ( trans - > c , " snapshot %u not fonud " , snapshot ) ;
return ret ;
}
2021-04-20 00:15:44 -04:00
static int __subvol_lookup ( struct btree_trans * trans , u32 subvol ,
u32 * snapshot , u64 * inum )
2021-04-19 23:31:40 -04:00
{
2021-09-30 19:46:23 -04:00
struct bch_subvolume s ;
2021-04-19 23:31:40 -04:00
int ret ;
2021-09-30 19:46:23 -04:00
ret = bch2_subvolume_get ( trans , subvol , false , 0 , & s ) ;
2021-04-19 23:31:40 -04:00
2021-09-30 19:46:23 -04:00
* snapshot = le32_to_cpu ( s . snapshot ) ;
* inum = le64_to_cpu ( s . inode ) ;
2021-04-19 23:31:40 -04:00
return ret ;
}
2021-04-20 00:15:44 -04:00
static int subvol_lookup ( struct btree_trans * trans , u32 subvol ,
u32 * snapshot , u64 * inum )
2021-04-19 23:31:40 -04:00
{
2021-04-20 00:15:44 -04:00
return lockrestart_do ( trans , __subvol_lookup ( trans , subvol , snapshot , inum ) ) ;
2021-04-19 23:31:40 -04:00
}
2021-11-03 21:22:46 -04:00
static int lookup_first_inode ( struct btree_trans * trans , u64 inode_nr ,
struct bch_inode_unpacked * inode )
{
struct btree_iter iter ;
struct bkey_s_c k ;
int ret ;
bch2_trans_iter_init ( trans , & iter , BTREE_ID_inodes ,
POS ( 0 , inode_nr ) ,
BTREE_ITER_ALL_SNAPSHOTS ) ;
k = bch2_btree_iter_peek ( & iter ) ;
ret = bkey_err ( k ) ;
if ( ret )
goto err ;
2022-11-24 03:12:22 -05:00
if ( ! k . k | | ! bkey_eq ( k . k - > p , POS ( 0 , inode_nr ) ) ) {
2023-05-27 19:59:59 -04:00
ret = - BCH_ERR_ENOENT_inode ;
2021-11-03 21:22:46 -04:00
goto err ;
}
2021-10-29 21:14:23 -04:00
ret = bch2_inode_unpack ( k , inode ) ;
2021-11-03 21:22:46 -04:00
err :
2022-07-17 23:06:38 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2022-07-18 19:42:58 -04:00
bch_err ( trans - > c , " error fetching inode %llu: %s " ,
inode_nr , bch2_err_str ( ret ) ) ;
2021-11-03 21:22:46 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
return ret ;
}
2021-04-19 22:19:18 -04:00
static int __lookup_inode ( struct btree_trans * trans , u64 inode_nr ,
struct bch_inode_unpacked * inode ,
u32 * snapshot )
2021-04-06 20:15:26 -04:00
{
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2021-04-06 20:15:26 -04:00
struct bkey_s_c k ;
int ret ;
2023-04-29 19:33:09 -04:00
k = bch2_bkey_get_iter ( trans , & iter , BTREE_ID_inodes ,
SPOS ( 0 , inode_nr , * snapshot ) , 0 ) ;
2021-04-06 20:15:26 -04:00
ret = bkey_err ( k ) ;
if ( ret )
goto err ;
2021-10-29 21:14:23 -04:00
ret = bkey_is_inode ( k . k )
? bch2_inode_unpack ( k , inode )
2023-05-27 19:59:59 -04:00
: - BCH_ERR_ENOENT_inode ;
2021-10-12 12:06:02 -04:00
if ( ! ret )
* snapshot = iter . pos . snapshot ;
2021-04-06 20:15:26 -04:00
err :
2022-07-17 23:06:38 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2022-07-18 19:42:58 -04:00
bch_err ( trans - > c , " error fetching inode %llu:%u: %s " ,
inode_nr , * snapshot , bch2_err_str ( ret ) ) ;
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2021-04-06 20:15:26 -04:00
return ret ;
}
2021-04-19 22:19:18 -04:00
static int lookup_inode ( struct btree_trans * trans , u64 inode_nr ,
struct bch_inode_unpacked * inode ,
u32 * snapshot )
{
return lockrestart_do ( trans , __lookup_inode ( trans , inode_nr , inode , snapshot ) ) ;
}
2021-04-20 00:15:44 -04:00
static int __lookup_dirent ( struct btree_trans * trans ,
struct bch_hash_info hash_info ,
subvol_inum dir , struct qstr * name ,
u64 * target , unsigned * type )
{
struct btree_iter iter ;
struct bkey_s_c_dirent d ;
int ret ;
ret = bch2_hash_lookup ( trans , & iter , bch2_dirent_hash_desc ,
& hash_info , dir , name , 0 ) ;
if ( ret )
return ret ;
d = bkey_s_c_to_dirent ( bch2_btree_iter_peek_slot ( & iter ) ) ;
* target = le64_to_cpu ( d . v - > d_inum ) ;
* type = d . v - > d_type ;
bch2_trans_iter_exit ( trans , & iter ) ;
return 0 ;
}
2021-04-19 22:19:18 -04:00
static int __write_inode ( struct btree_trans * trans ,
struct bch_inode_unpacked * inode ,
u32 snapshot )
2021-04-06 20:15:26 -04:00
{
2023-06-25 01:34:45 -04:00
struct bkey_inode_buf * inode_p =
bch2_trans_kmalloc ( trans , sizeof ( * inode_p ) ) ;
2021-08-30 15:18:31 -04:00
2023-06-25 01:34:45 -04:00
if ( IS_ERR ( inode_p ) )
return PTR_ERR ( inode_p ) ;
2021-08-30 15:18:31 -04:00
2023-06-25 01:34:45 -04:00
bch2_inode_pack ( inode_p , inode ) ;
inode_p - > inode . k . p . snapshot = snapshot ;
return bch2_btree_insert_nonextent ( trans , BTREE_ID_inodes ,
& inode_p - > inode . k_i ,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE ) ;
2021-04-19 22:19:18 -04:00
}
static int write_inode ( struct btree_trans * trans ,
struct bch_inode_unpacked * inode ,
u32 snapshot )
{
2022-07-13 05:25:29 -04:00
int ret = commit_do ( trans , NULL , NULL ,
2021-04-06 20:15:26 -04:00
BTREE_INSERT_NOFAIL |
BTREE_INSERT_LAZY_RW ,
2021-04-19 22:19:18 -04:00
__write_inode ( trans , inode , snapshot ) ) ;
2021-04-06 20:15:26 -04:00
if ( ret )
2022-07-18 19:42:58 -04:00
bch_err ( trans - > c , " error in fsck: error updating inode: %s " ,
bch2_err_str ( ret ) ) ;
2021-04-06 20:15:26 -04:00
return ret ;
}
2021-04-16 14:48:51 -04:00
static int __remove_dirent ( struct btree_trans * trans , struct bpos pos )
2017-03-16 22:18:50 -08:00
{
2019-03-27 22:03:30 -04:00
struct bch_fs * c = trans - > c ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2017-03-16 22:18:50 -08:00
struct bch_inode_unpacked dir_inode ;
struct bch_hash_info dir_hash_info ;
int ret ;
2021-11-03 21:22:46 -04:00
ret = lookup_first_inode ( trans , pos . inode , & dir_inode ) ;
2019-12-22 23:04:30 -05:00
if ( ret )
2022-04-11 22:36:53 -04:00
goto err ;
2017-03-16 22:18:50 -08:00
dir_hash_info = bch2_hash_info_init ( c , & dir_inode ) ;
2021-08-30 15:18:31 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_dirents , pos , BTREE_ITER_INTENT ) ;
2019-12-22 23:04:30 -05:00
2021-04-16 14:48:51 -04:00
ret = bch2_hash_delete_at ( trans , bch2_dirent_hash_desc ,
2022-04-12 13:09:09 -04:00
& dir_hash_info , & iter ,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE ) ;
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2022-04-11 22:36:53 -04:00
err :
2022-07-17 23:06:38 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-04-16 14:48:51 -04:00
return ret ;
2019-12-22 23:04:30 -05:00
}
2021-04-19 22:19:18 -04:00
/* Get lost+found, create if it doesn't exist: */
2021-04-20 00:15:44 -04:00
static int lookup_lostfound ( struct btree_trans * trans , u32 subvol ,
2021-04-19 22:19:18 -04:00
struct bch_inode_unpacked * lostfound )
2017-03-16 22:18:50 -08:00
{
2021-04-19 22:19:18 -04:00
struct bch_fs * c = trans - > c ;
struct bch_inode_unpacked root ;
struct bch_hash_info root_hash_info ;
struct qstr lostfound_str = QSTR ( " lost+found " ) ;
2021-04-20 00:15:44 -04:00
subvol_inum root_inum = { . subvol = subvol } ;
u64 inum = 0 ;
unsigned d_type = 0 ;
2021-04-19 22:19:18 -04:00
u32 snapshot ;
int ret ;
2021-10-28 16:16:55 -04:00
ret = __subvol_lookup ( trans , subvol , & snapshot , & root_inum . inum ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
return ret ;
2021-04-19 23:31:40 -04:00
2021-10-28 16:16:55 -04:00
ret = __lookup_inode ( trans , root_inum . inum , & root , & snapshot ) ;
if ( ret )
2021-04-19 22:19:18 -04:00
return ret ;
root_hash_info = bch2_hash_info_init ( c , & root ) ;
2021-04-20 00:15:44 -04:00
2021-10-28 16:16:55 -04:00
ret = __lookup_dirent ( trans , root_hash_info , root_inum ,
2021-04-20 00:15:44 -04:00
& lostfound_str , & inum , & d_type ) ;
2023-05-27 19:59:59 -04:00
if ( bch2_err_matches ( ret , ENOENT ) ) {
2021-04-19 22:19:18 -04:00
bch_notice ( c , " creating lost+found " ) ;
goto create_lostfound ;
}
2022-07-17 23:06:38 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2022-07-18 19:42:58 -04:00
bch_err ( c , " error looking up lost+found: %s " , bch2_err_str ( ret ) ) ;
2021-10-28 16:16:55 -04:00
if ( ret )
2021-04-20 00:15:44 -04:00
return ret ;
if ( d_type ! = DT_DIR ) {
bch_err ( c , " error looking up lost+found: not a directory " ) ;
return ret ;
}
2021-10-28 16:16:55 -04:00
/*
2023-07-07 02:42:28 -04:00
* The bch2_check_dirents pass has already run , dangling dirents
2021-10-28 16:16:55 -04:00
* shouldn ' t exist here :
*/
return __lookup_inode ( trans , inum , lostfound , & snapshot ) ;
2021-04-19 22:19:18 -04:00
create_lostfound :
2021-10-28 16:16:55 -04:00
bch2_inode_init_early ( c , lostfound ) ;
ret = bch2_create_trans ( trans , root_inum , & root ,
lostfound , & lostfound_str ,
0 , 0 , S_IFDIR | 0700 , 0 , NULL , NULL ,
( subvol_inum ) { } , 0 ) ;
2022-07-17 23:06:38 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2022-07-18 19:42:58 -04:00
bch_err ( c , " error creating lost+found: %s " , bch2_err_str ( ret ) ) ;
2021-10-28 16:16:55 -04:00
return ret ;
2021-04-19 22:19:18 -04:00
}
2021-10-28 16:16:55 -04:00
static int __reattach_inode ( struct btree_trans * trans ,
2021-04-19 23:31:40 -04:00
struct bch_inode_unpacked * inode ,
2021-04-20 00:15:44 -04:00
u32 inode_snapshot )
2021-04-19 22:19:18 -04:00
{
struct bch_hash_info dir_hash ;
struct bch_inode_unpacked lostfound ;
2017-03-16 22:18:50 -08:00
char name_buf [ 20 ] ;
struct qstr name ;
2021-04-09 03:25:37 -04:00
u64 dir_offset = 0 ;
2021-04-19 23:31:40 -04:00
u32 subvol ;
2017-03-16 22:18:50 -08:00
int ret ;
2021-10-28 16:16:55 -04:00
ret = __snapshot_lookup_subvol ( trans , inode_snapshot , & subvol ) ;
2021-04-19 23:31:40 -04:00
if ( ret )
return ret ;
ret = lookup_lostfound ( trans , subvol , & lostfound ) ;
2021-04-09 03:25:37 -04:00
if ( ret )
2021-04-07 03:11:07 -04:00
return ret ;
2021-04-09 03:25:37 -04:00
2021-04-19 22:19:18 -04:00
if ( S_ISDIR ( inode - > bi_mode ) ) {
lostfound . bi_nlink + + ;
2021-04-09 03:25:37 -04:00
2021-10-28 16:16:55 -04:00
ret = __write_inode ( trans , & lostfound , U32_MAX ) ;
2021-04-09 03:25:37 -04:00
if ( ret )
2021-04-07 03:11:07 -04:00
return ret ;
2021-04-09 03:25:37 -04:00
}
2021-04-19 22:19:18 -04:00
dir_hash = bch2_hash_info_init ( trans - > c , & lostfound ) ;
2021-04-09 03:25:37 -04:00
2021-04-19 22:19:18 -04:00
snprintf ( name_buf , sizeof ( name_buf ) , " %llu " , inode - > bi_inum ) ;
name = ( struct qstr ) QSTR ( name_buf ) ;
2021-04-09 03:25:37 -04:00
2021-10-28 16:16:55 -04:00
ret = bch2_dirent_create ( trans ,
( subvol_inum ) {
. subvol = subvol ,
. inum = lostfound . bi_inum ,
} ,
& dir_hash ,
inode_d_type ( inode ) ,
& name , inode - > bi_inum , & dir_offset ,
BCH_HASH_SET_MUST_CREATE ) ;
if ( ret )
return ret ;
inode - > bi_dir = lostfound . bi_inum ;
inode - > bi_dir_offset = dir_offset ;
return __write_inode ( trans , inode , inode_snapshot ) ;
}
static int reattach_inode ( struct btree_trans * trans ,
struct bch_inode_unpacked * inode ,
u32 inode_snapshot )
{
2022-07-13 05:25:29 -04:00
int ret = commit_do ( trans , NULL , NULL ,
2021-10-28 16:16:55 -04:00
BTREE_INSERT_LAZY_RW |
BTREE_INSERT_NOFAIL ,
__reattach_inode ( trans , inode , inode_snapshot ) ) ;
2021-04-19 22:19:18 -04:00
if ( ret ) {
2022-07-18 19:42:58 -04:00
bch_err ( trans - > c , " error reattaching inode %llu: %s " ,
inode - > bi_inum , bch2_err_str ( ret ) ) ;
2021-04-19 22:19:18 -04:00
return ret ;
}
2021-04-09 03:25:37 -04:00
2021-10-28 16:16:55 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-04-07 03:11:07 -04:00
static int remove_backpointer ( struct btree_trans * trans ,
struct bch_inode_unpacked * inode )
{
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2023-04-29 19:33:09 -04:00
struct bkey_s_c_dirent d ;
2021-04-07 03:11:07 -04:00
int ret ;
2023-04-29 19:33:09 -04:00
d = bch2_bkey_get_iter_typed ( trans , & iter , BTREE_ID_dirents ,
POS ( inode - > bi_dir , inode - > bi_dir_offset ) , 0 ,
dirent ) ;
ret = bkey_err ( d ) ? :
__remove_dirent ( trans , d . k - > p ) ;
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2021-04-07 03:11:07 -04:00
return ret ;
}
2022-07-14 02:47:36 -04:00
struct snapshots_seen_entry {
u32 id ;
u32 equiv ;
} ;
struct snapshots_seen {
struct bpos pos ;
DARRAY ( struct snapshots_seen_entry ) ids ;
} ;
static inline void snapshots_seen_exit ( struct snapshots_seen * s )
{
darray_exit ( & s - > ids ) ;
}
static inline void snapshots_seen_init ( struct snapshots_seen * s )
{
memset ( s , 0 , sizeof ( * s ) ) ;
}
static int snapshots_seen_update ( struct bch_fs * c , struct snapshots_seen * s ,
enum btree_id btree_id , struct bpos pos )
2021-04-20 00:15:44 -04:00
{
2022-07-14 02:47:36 -04:00
struct snapshots_seen_entry * i , n = {
. id = pos . snapshot ,
. equiv = bch2_snapshot_equiv ( c , pos . snapshot ) ,
} ;
2022-08-10 20:22:01 -04:00
int ret = 0 ;
2021-04-20 00:15:44 -04:00
2022-11-24 03:12:22 -05:00
if ( ! bkey_eq ( s - > pos , pos ) )
2022-03-29 15:48:45 -04:00
s - > ids . nr = 0 ;
2022-07-14 02:47:36 -04:00
2021-04-20 00:15:44 -04:00
s - > pos = pos ;
2023-07-16 21:09:37 -04:00
s - > pos . snapshot = n . equiv ;
2021-04-20 00:15:44 -04:00
2023-07-16 21:09:37 -04:00
darray_for_each ( s - > ids , i ) {
if ( i - > id = = n . id )
2022-07-14 02:47:36 -04:00
return 0 ;
2023-07-16 21:09:37 -04:00
/*
* We currently don ' t rigorously track for snapshot cleanup
* needing to be run , so it shouldn ' t be a fsck error yet :
*/
if ( i - > equiv = = n . equiv ) {
bch_err ( c , " snapshot deletion did not finish: \n "
" duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u) \n " ,
bch2_btree_ids [ btree_id ] ,
pos . inode , pos . offset ,
i - > id , n . id , n . equiv ) ;
2023-07-16 23:21:17 -04:00
return bch2_run_explicit_recovery_pass ( c , BCH_RECOVERY_PASS_delete_dead_snapshots ) ;
2022-07-14 02:47:36 -04:00
}
2023-07-16 21:09:37 -04:00
}
2022-07-14 02:47:36 -04:00
ret = darray_push ( & s - > ids , n ) ;
if ( ret )
bch_err ( c , " error reallocating snapshots_seen table (size %zu) " ,
s - > ids . size ) ;
return ret ;
2021-04-20 00:15:44 -04:00
}
/**
* key_visible_in_snapshot - returns true if @ id is a descendent of @ ancestor ,
* and @ ancestor hasn ' t been overwritten in @ seen
*
* That is , returns whether key in @ ancestor snapshot is visible in @ id snapshot
*/
static bool key_visible_in_snapshot ( struct bch_fs * c , struct snapshots_seen * seen ,
u32 id , u32 ancestor )
{
ssize_t i ;
2023-07-16 18:15:01 -04:00
EBUG_ON ( id > ancestor ) ;
EBUG_ON ( ! bch2_snapshot_is_equiv ( c , id ) ) ;
EBUG_ON ( ! bch2_snapshot_is_equiv ( c , ancestor ) ) ;
2021-04-20 00:15:44 -04:00
/* @ancestor should be the snapshot most recently added to @seen */
2023-07-16 18:15:01 -04:00
EBUG_ON ( ancestor ! = seen - > pos . snapshot ) ;
EBUG_ON ( ancestor ! = seen - > ids . data [ seen - > ids . nr - 1 ] . equiv ) ;
2021-04-20 00:15:44 -04:00
if ( id = = ancestor )
return true ;
if ( ! bch2_snapshot_is_ancestor ( c , id , ancestor ) )
return false ;
2023-07-16 18:15:01 -04:00
/*
* We know that @ id is a descendant of @ ancestor , we ' re checking if
* we ' ve seen a key that overwrote @ ancestor - i . e . also a descendent of
* @ ascestor and with @ id as a descendent .
*
* But we already know that we ' re scanning IDs between @ id and @ ancestor
* numerically , since snapshot ID lists are kept sorted , so if we find
* an id that ' s an ancestor of @ id we ' re done :
*/
2022-03-29 15:48:45 -04:00
for ( i = seen - > ids . nr - 2 ;
2022-07-14 02:47:36 -04:00
i > = 0 & & seen - > ids . data [ i ] . equiv > = id ;
2021-04-20 00:15:44 -04:00
- - i )
2023-07-16 18:15:01 -04:00
if ( bch2_snapshot_is_ancestor ( c , id , seen - > ids . data [ i ] . equiv ) )
2021-04-20 00:15:44 -04:00
return false ;
return true ;
}
/**
* ref_visible - given a key with snapshot id @ src that points to a key with
* snapshot id @ dst , test whether there is some snapshot in which @ dst is
* visible .
*
* This assumes we ' re visiting @ src keys in natural key order .
*
* @ s - list of snapshot IDs already seen at @ src
* @ src - snapshot ID of src key
* @ dst - snapshot ID of dst key
*/
static int ref_visible ( struct bch_fs * c , struct snapshots_seen * s ,
u32 src , u32 dst )
{
return dst < = src
? key_visible_in_snapshot ( c , s , dst , src )
: bch2_snapshot_is_ancestor ( c , src , dst ) ;
}
2022-05-08 15:03:28 +12:00
static int ref_visible2 ( struct bch_fs * c ,
u32 src , struct snapshots_seen * src_seen ,
u32 dst , struct snapshots_seen * dst_seen )
{
src = bch2_snapshot_equiv ( c , src ) ;
dst = bch2_snapshot_equiv ( c , dst ) ;
if ( dst > src ) {
swap ( dst , src ) ;
swap ( dst_seen , src_seen ) ;
}
return key_visible_in_snapshot ( c , src_seen , dst , src ) ;
}
2022-07-14 02:47:36 -04:00
# define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \
for ( _i = ( _w ) - > inodes . data ; _i < ( _w ) - > inodes . data + ( _w ) - > inodes . nr & & \
( _i ) - > snapshot < = ( _snapshot ) ; _i + + ) \
2021-04-20 00:15:44 -04:00
if ( key_visible_in_snapshot ( _c , _s , _i - > snapshot , _snapshot ) )
2022-03-29 15:48:45 -04:00
struct inode_walker_entry {
struct bch_inode_unpacked inode ;
u32 snapshot ;
2023-07-16 14:24:36 -04:00
bool seen_this_pos ;
2022-03-29 15:48:45 -04:00
u64 count ;
} ;
2017-03-16 22:18:50 -08:00
struct inode_walker {
2021-04-20 00:15:44 -04:00
bool first_this_inode ;
2023-07-13 03:11:16 -04:00
bool recalculate_sums ;
2023-07-16 14:24:36 -04:00
struct bpos last_pos ;
2021-04-20 00:15:44 -04:00
2022-03-29 15:48:45 -04:00
DARRAY ( struct inode_walker_entry ) inodes ;
2017-03-16 22:18:50 -08:00
} ;
2021-04-20 00:15:44 -04:00
static void inode_walker_exit ( struct inode_walker * w )
{
2022-03-29 15:48:45 -04:00
darray_exit ( & w - > inodes ) ;
2021-04-20 00:15:44 -04:00
}
2017-03-16 22:18:50 -08:00
static struct inode_walker inode_walker_init ( void )
{
2021-04-20 00:15:44 -04:00
return ( struct inode_walker ) { 0 , } ;
}
static int add_inode ( struct bch_fs * c , struct inode_walker * w ,
2021-10-29 21:14:23 -04:00
struct bkey_s_c inode )
2021-04-20 00:15:44 -04:00
{
struct bch_inode_unpacked u ;
BUG_ON ( bch2_inode_unpack ( inode , & u ) ) ;
2022-03-29 15:48:45 -04:00
return darray_push ( & w - > inodes , ( ( struct inode_walker_entry ) {
2021-04-20 00:15:44 -04:00
. inode = u ,
2022-07-14 02:47:36 -04:00
. snapshot = bch2_snapshot_equiv ( c , inode . k - > p . snapshot ) ,
2022-03-29 15:48:45 -04:00
} ) ) ;
2017-03-16 22:18:50 -08:00
}
2023-06-25 16:35:49 -04:00
static int get_inodes_all_snapshots ( struct btree_trans * trans ,
struct inode_walker * w , u64 inum )
2017-03-16 22:18:50 -08:00
{
2021-04-20 00:15:44 -04:00
struct bch_fs * c = trans - > c ;
struct btree_iter iter ;
struct bkey_s_c k ;
2022-07-22 06:57:05 -04:00
u32 restart_count = trans - > restart_count ;
2021-04-20 00:15:44 -04:00
int ret ;
2017-03-16 22:18:50 -08:00
2023-07-13 03:11:16 -04:00
w - > recalculate_sums = false ;
2022-03-29 15:48:45 -04:00
w - > inodes . nr = 0 ;
2021-04-20 00:15:44 -04:00
2023-06-25 16:35:49 -04:00
for_each_btree_key ( trans , iter , BTREE_ID_inodes , POS ( 0 , inum ) ,
2021-04-20 00:15:44 -04:00
BTREE_ITER_ALL_SNAPSHOTS , k , ret ) {
2023-06-25 16:35:49 -04:00
if ( k . k - > p . offset ! = inum )
2021-04-20 00:15:44 -04:00
break ;
2021-10-29 21:14:23 -04:00
if ( bkey_is_inode ( k . k ) )
add_inode ( c , w , k ) ;
2021-04-20 00:15:44 -04:00
}
bch2_trans_iter_exit ( trans , & iter ) ;
if ( ret )
return ret ;
2023-07-16 14:24:36 -04:00
w - > first_this_inode = true ;
2022-07-22 06:57:05 -04:00
if ( trans_was_restarted ( trans , restart_count ) )
return - BCH_ERR_transaction_restart_nested ;
2023-06-25 16:35:49 -04:00
return 0 ;
}
static struct inode_walker_entry *
2023-07-16 14:19:08 -04:00
lookup_inode_for_snapshot ( struct bch_fs * c , struct inode_walker * w ,
u32 snapshot , bool is_whiteout )
2023-06-25 16:35:49 -04:00
{
struct inode_walker_entry * i ;
snapshot = bch2_snapshot_equiv ( c , snapshot ) ;
darray_for_each ( w - > inodes , i )
if ( bch2_snapshot_is_ancestor ( c , snapshot , i - > snapshot ) )
2021-04-20 00:15:44 -04:00
goto found ;
2023-06-25 16:35:49 -04:00
return NULL ;
2021-04-20 00:15:44 -04:00
found :
2023-06-25 16:35:49 -04:00
BUG_ON ( snapshot > i - > snapshot ) ;
2021-04-20 00:15:44 -04:00
2023-07-16 14:19:08 -04:00
if ( snapshot ! = i - > snapshot & & ! is_whiteout ) {
2023-06-25 16:35:49 -04:00
struct inode_walker_entry new = * i ;
2023-07-16 21:56:18 -04:00
size_t pos ;
2023-06-25 16:35:49 -04:00
int ret ;
2022-07-14 02:47:36 -04:00
2023-06-25 16:35:49 -04:00
new . snapshot = snapshot ;
new . count = 0 ;
2022-07-14 02:47:36 -04:00
bch_info ( c , " have key for inode %llu:%u but have inode in ancestor snapshot %u " ,
2023-07-16 14:24:36 -04:00
w - > last_pos . inode , snapshot , i - > snapshot ) ;
2021-04-20 00:15:44 -04:00
2023-06-25 16:35:49 -04:00
while ( i > w - > inodes . data & & i [ - 1 ] . snapshot > snapshot )
2021-04-20 00:15:44 -04:00
- - i ;
2023-07-16 21:56:18 -04:00
pos = i - w - > inodes . data ;
ret = darray_insert_item ( & w - > inodes , pos , new ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
2023-06-25 16:35:49 -04:00
return ERR_PTR ( ret ) ;
2023-07-16 21:56:18 -04:00
i = w - > inodes . data + pos ;
2021-04-20 00:15:44 -04:00
}
return i ;
2017-03-16 22:18:50 -08:00
}
2023-06-25 16:35:49 -04:00
static struct inode_walker_entry * walk_inode ( struct btree_trans * trans ,
2023-07-16 14:19:08 -04:00
struct inode_walker * w , struct bpos pos ,
bool is_whiteout )
2023-06-25 16:35:49 -04:00
{
2023-07-16 14:24:36 -04:00
if ( w - > last_pos . inode ! = pos . inode ) {
int ret = get_inodes_all_snapshots ( trans , w , pos . inode ) ;
if ( ret )
return ERR_PTR ( ret ) ;
} else if ( bkey_cmp ( w - > last_pos , pos ) ) {
struct inode_walker_entry * i ;
darray_for_each ( w - > inodes , i )
i - > seen_this_pos = false ;
}
w - > last_pos = pos ;
2023-06-25 16:35:49 -04:00
2023-07-16 14:19:08 -04:00
return lookup_inode_for_snapshot ( trans - > c , w , pos . snapshot , is_whiteout ) ;
2023-06-25 16:35:49 -04:00
}
2021-04-20 00:15:44 -04:00
static int __get_visible_inodes ( struct btree_trans * trans ,
struct inode_walker * w ,
struct snapshots_seen * s ,
u64 inum )
{
struct bch_fs * c = trans - > c ;
struct btree_iter iter ;
struct bkey_s_c k ;
int ret ;
2022-03-29 15:48:45 -04:00
w - > inodes . nr = 0 ;
2021-04-20 00:15:44 -04:00
2022-08-18 17:00:12 -04:00
for_each_btree_key_norestart ( trans , iter , BTREE_ID_inodes , POS ( 0 , inum ) ,
2021-04-20 00:15:44 -04:00
BTREE_ITER_ALL_SNAPSHOTS , k , ret ) {
2022-07-14 02:47:36 -04:00
u32 equiv = bch2_snapshot_equiv ( c , k . k - > p . snapshot ) ;
2021-04-20 00:15:44 -04:00
if ( k . k - > p . offset ! = inum )
break ;
2022-07-14 02:47:36 -04:00
if ( ! ref_visible ( c , s , s - > pos . snapshot , equiv ) )
2021-04-20 00:15:44 -04:00
continue ;
2022-07-14 02:47:36 -04:00
if ( bkey_is_inode ( k . k ) )
2021-10-29 21:14:23 -04:00
add_inode ( c , w , k ) ;
2022-07-14 02:47:36 -04:00
if ( equiv > = s - > pos . snapshot )
break ;
2021-04-20 00:15:44 -04:00
}
bch2_trans_iter_exit ( trans , & iter ) ;
return ret ;
}
static int check_key_has_snapshot ( struct btree_trans * trans ,
struct btree_iter * iter ,
struct bkey_s_c k )
{
struct bch_fs * c = trans - > c ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2021-04-20 00:15:44 -04:00
int ret = 0 ;
2022-07-14 02:47:36 -04:00
if ( mustfix_fsck_err_on ( ! bch2_snapshot_equiv ( c , k . k - > p . snapshot ) , c ,
2021-04-20 00:15:44 -04:00
" key in missing snapshot: %s " ,
2022-02-25 13:18:19 -05:00
( bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) )
ret = bch2_btree_delete_at ( trans , iter ,
2021-10-28 16:16:55 -04:00
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE ) ? : 1 ;
2021-04-20 00:15:44 -04:00
fsck_err :
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
2021-04-20 00:15:44 -04:00
return ret ;
2021-07-14 20:28:27 -04:00
}
2021-04-07 01:55:57 -04:00
static int hash_redo_key ( struct btree_trans * trans ,
const struct bch_hash_desc desc ,
struct bch_hash_info * hash_info ,
struct btree_iter * k_iter , struct bkey_s_c k )
2017-03-16 22:18:50 -08:00
{
2021-04-24 22:33:25 -04:00
struct bkey_i * delete ;
2017-03-16 22:18:50 -08:00
struct bkey_i * tmp ;
2021-04-24 22:33:25 -04:00
delete = bch2_trans_kmalloc ( trans , sizeof ( * delete ) ) ;
if ( IS_ERR ( delete ) )
return PTR_ERR ( delete ) ;
2023-04-30 19:21:06 -04:00
tmp = bch2_bkey_make_mut_noupdate ( trans , k ) ;
2019-12-22 23:04:30 -05:00
if ( IS_ERR ( tmp ) )
return PTR_ERR ( tmp ) ;
2017-03-16 22:18:50 -08:00
2021-04-24 22:33:25 -04:00
bkey_init ( & delete - > k ) ;
delete - > k . p = k_iter - > pos ;
2021-06-14 18:16:10 -04:00
return bch2_btree_iter_traverse ( k_iter ) ? :
bch2_trans_update ( trans , k_iter , delete , 0 ) ? :
2022-09-04 14:10:12 -04:00
bch2_hash_set_snapshot ( trans , desc , hash_info ,
( subvol_inum ) { 0 , k . k - > p . inode } ,
k . k - > p . snapshot , tmp ,
BCH_HASH_SET_MUST_CREATE ,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE ) ? :
bch2_trans_commit ( trans , NULL , NULL ,
BTREE_INSERT_NOFAIL |
BTREE_INSERT_LAZY_RW ) ;
2017-03-16 22:18:50 -08:00
}
2021-04-07 01:55:57 -04:00
static int hash_check_key ( struct btree_trans * trans ,
const struct bch_hash_desc desc ,
struct bch_hash_info * hash_info ,
struct btree_iter * k_iter , struct bkey_s_c hash_k )
2018-07-12 19:19:41 -04:00
{
2019-03-25 15:10:15 -04:00
struct bch_fs * c = trans - > c ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter = { NULL } ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2021-04-07 01:55:57 -04:00
struct bkey_s_c k ;
u64 hash ;
2018-07-12 19:19:41 -04:00
int ret = 0 ;
2021-04-07 01:55:57 -04:00
if ( hash_k . k - > type ! = desc . key_type )
return 0 ;
hash = desc . hash_bkey ( hash_info , hash_k ) ;
if ( likely ( hash = = hash_k . k - > p . offset ) )
2018-07-12 19:19:41 -04:00
return 0 ;
2021-04-07 01:55:57 -04:00
if ( hash_k . k - > p . offset < hash )
goto bad_hash ;
2018-07-12 19:19:41 -04:00
2022-04-17 19:02:04 -04:00
for_each_btree_key_norestart ( trans , iter , desc . btree_id ,
2023-02-01 16:46:42 -05:00
SPOS ( hash_k . k - > p . inode , hash , hash_k . k - > p . snapshot ) ,
2022-04-17 19:02:04 -04:00
BTREE_ITER_SLOTS , k , ret ) {
2022-11-24 03:12:22 -05:00
if ( bkey_eq ( k . k - > p , hash_k . k - > p ) )
2018-07-12 19:19:41 -04:00
break ;
2021-04-07 01:55:57 -04:00
if ( fsck_err_on ( k . k - > type = = desc . key_type & &
! desc . cmp_bkey ( k , hash_k ) , c ,
2018-07-12 19:19:41 -04:00
" duplicate hash table keys: \n %s " ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , hash_k ) ,
buf . buf ) ) ) {
2021-10-28 16:16:55 -04:00
ret = bch2_hash_delete_at ( trans , desc , hash_info , k_iter , 0 ) ? : 1 ;
2018-07-12 19:19:41 -04:00
break ;
}
2021-04-07 01:55:57 -04:00
if ( bkey_deleted ( k . k ) ) {
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2021-04-07 01:55:57 -04:00
goto bad_hash ;
2017-03-16 22:18:50 -08:00
}
2021-04-07 01:55:57 -04:00
}
2022-02-25 13:18:19 -05:00
out :
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
2017-03-16 22:18:50 -08:00
return ret ;
2021-04-07 01:55:57 -04:00
bad_hash :
2022-10-22 15:59:53 -04:00
if ( fsck_err ( c , " hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu \n %s " ,
2022-04-17 19:02:04 -04:00
bch2_btree_ids [ desc . btree_id ] , hash_k . k - > p . inode , hash_k . k - > p . offset , hash ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
2022-07-19 17:20:18 -04:00
bch2_bkey_val_to_text ( & buf , c , hash_k ) , buf . buf ) ) ) {
ret = hash_redo_key ( trans , desc , hash_info , k_iter , hash_k ) ;
2023-04-25 14:32:39 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2022-07-19 17:20:18 -04:00
bch_err ( c , " hash_redo_key err %s " , bch2_err_str ( ret ) ) ;
2023-04-25 14:32:39 -04:00
if ( ret )
2022-07-19 17:20:18 -04:00
return ret ;
ret = - BCH_ERR_transaction_restart_nested ;
2018-08-21 19:42:00 -04:00
}
fsck_err :
2022-02-25 13:18:19 -05:00
goto out ;
2018-08-21 19:42:00 -04:00
}
2021-04-06 21:41:48 -04:00
static int check_inode ( struct btree_trans * trans ,
struct btree_iter * iter ,
2022-07-15 20:51:09 -04:00
struct bkey_s_c k ,
2021-04-20 00:15:44 -04:00
struct bch_inode_unpacked * prev ,
2022-07-14 02:47:36 -04:00
struct snapshots_seen * s ,
2021-10-28 16:16:55 -04:00
bool full )
2021-04-06 21:41:48 -04:00
{
struct bch_fs * c = trans - > c ;
2021-10-28 16:16:55 -04:00
struct bch_inode_unpacked u ;
2021-04-06 21:41:48 -04:00
bool do_update = false ;
2021-10-28 16:16:55 -04:00
int ret ;
ret = check_key_has_snapshot ( trans , iter , k ) ;
2022-04-11 22:36:53 -04:00
if ( ret < 0 )
goto err ;
2021-10-28 16:16:55 -04:00
if ( ret )
2022-04-11 22:36:53 -04:00
return 0 ;
2021-10-28 16:16:55 -04:00
2022-07-14 02:47:36 -04:00
ret = snapshots_seen_update ( c , s , iter - > btree_id , k . k - > p ) ;
if ( ret )
goto err ;
2021-10-28 16:16:55 -04:00
/*
* if snapshot id isn ' t a leaf node , skip it - deletion in
* particular is not atomic , so on the internal snapshot nodes
* we can see inodes marked for deletion after a clean shutdown
*/
2023-07-12 13:55:03 -04:00
if ( bch2_snapshot_is_internal_node ( c , k . k - > p . snapshot ) )
2021-10-28 16:16:55 -04:00
return 0 ;
2021-10-29 21:14:23 -04:00
if ( ! bkey_is_inode ( k . k ) )
2021-10-28 16:16:55 -04:00
return 0 ;
2021-10-29 21:14:23 -04:00
BUG_ON ( bch2_inode_unpack ( k , & u ) ) ;
2021-10-28 16:16:55 -04:00
if ( ! full & &
2021-10-29 21:14:23 -04:00
! ( u . bi_flags & ( BCH_INODE_I_SIZE_DIRTY |
BCH_INODE_I_SECTORS_DIRTY |
BCH_INODE_UNLINKED ) ) )
2021-10-28 16:16:55 -04:00
return 0 ;
if ( prev - > bi_inum ! = u . bi_inum )
* prev = u ;
if ( fsck_err_on ( prev - > bi_hash_seed ! = u . bi_hash_seed | |
inode_d_type ( prev ) ! = inode_d_type ( & u ) , c ,
2021-04-20 00:15:44 -04:00
" inodes in different snapshots don't match " ) ) {
bch_err ( c , " repair not implemented yet " ) ;
return - EINVAL ;
}
2021-04-06 21:41:48 -04:00
if ( u . bi_flags & BCH_INODE_UNLINKED & &
( ! c - > sb . clean | |
fsck_err ( c , " filesystem marked clean, but inode %llu unlinked " ,
u . bi_inum ) ) ) {
bch2_trans_unlock ( trans ) ;
bch2_fs_lazy_rw ( c ) ;
2023-07-21 03:20:08 -04:00
ret = bch2_inode_rm_snapshot ( trans , u . bi_inum , iter - > pos . snapshot ) ;
2022-08-17 22:17:08 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2022-07-18 19:42:58 -04:00
bch_err ( c , " error in fsck: error while deleting inode: %s " ,
bch2_err_str ( ret ) ) ;
2021-04-06 21:41:48 -04:00
return ret ;
}
if ( u . bi_flags & BCH_INODE_I_SIZE_DIRTY & &
( ! c - > sb . clean | |
fsck_err ( c , " filesystem marked clean, but inode %llu has i_size dirty " ,
u . bi_inum ) ) ) {
bch_verbose ( c , " truncating inode %llu " , u . bi_inum ) ;
bch2_trans_unlock ( trans ) ;
bch2_fs_lazy_rw ( c ) ;
/*
* XXX : need to truncate partial blocks too here - or ideally
* just switch units to bytes and that issue goes away
*/
ret = bch2_btree_delete_range_trans ( trans , BTREE_ID_extents ,
2021-04-20 00:15:44 -04:00
SPOS ( u . bi_inum , round_up ( u . bi_size , block_bytes ( c ) ) > > 9 ,
iter - > pos . snapshot ) ,
2021-04-06 21:41:48 -04:00
POS ( u . bi_inum , U64_MAX ) ,
2021-04-20 00:15:44 -04:00
0 , NULL ) ;
2023-03-10 14:34:30 -05:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2022-07-18 19:42:58 -04:00
bch_err ( c , " error in fsck: error truncating inode: %s " ,
bch2_err_str ( ret ) ) ;
2023-03-10 14:34:30 -05:00
if ( ret )
2021-04-06 21:41:48 -04:00
return ret ;
/*
* We truncated without our normal sector accounting hook , just
* make sure we recalculate it :
*/
u . bi_flags | = BCH_INODE_I_SECTORS_DIRTY ;
u . bi_flags & = ~ BCH_INODE_I_SIZE_DIRTY ;
do_update = true ;
}
if ( u . bi_flags & BCH_INODE_I_SECTORS_DIRTY & &
( ! c - > sb . clean | |
fsck_err ( c , " filesystem marked clean, but inode %llu has i_sectors dirty " ,
u . bi_inum ) ) ) {
s64 sectors ;
bch_verbose ( c , " recounting sectors for inode %llu " ,
u . bi_inum ) ;
2021-04-20 00:15:44 -04:00
sectors = bch2_count_inode_sectors ( trans , u . bi_inum , iter - > pos . snapshot ) ;
2021-04-06 21:41:48 -04:00
if ( sectors < 0 ) {
2022-07-18 19:42:58 -04:00
bch_err ( c , " error in fsck: error recounting inode sectors: %s " ,
bch2_err_str ( sectors ) ) ;
2021-04-06 21:41:48 -04:00
return sectors ;
}
u . bi_sectors = sectors ;
u . bi_flags & = ~ BCH_INODE_I_SECTORS_DIRTY ;
do_update = true ;
}
2021-04-07 03:11:07 -04:00
if ( u . bi_flags & BCH_INODE_BACKPTR_UNTRUSTED ) {
u . bi_dir = 0 ;
u . bi_dir_offset = 0 ;
u . bi_flags & = ~ BCH_INODE_BACKPTR_UNTRUSTED ;
2021-04-06 21:41:48 -04:00
do_update = true ;
}
if ( do_update ) {
2022-07-15 20:51:09 -04:00
ret = __write_inode ( trans , & u , iter - > pos . snapshot ) ;
2021-04-06 21:41:48 -04:00
if ( ret )
2022-07-18 19:42:58 -04:00
bch_err ( c , " error in fsck: error updating inode: %s " ,
bch2_err_str ( ret ) ) ;
2021-04-06 21:41:48 -04:00
}
2022-04-11 22:36:53 -04:00
err :
2021-04-06 21:41:48 -04:00
fsck_err :
2022-04-11 22:36:53 -04:00
if ( ret )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-04-06 21:41:48 -04:00
return ret ;
}
noinline_for_stack
2023-07-07 02:42:28 -04:00
int bch2_check_inodes ( struct bch_fs * c )
2021-04-06 21:41:48 -04:00
{
2023-07-07 02:42:28 -04:00
bool full = c - > opts . fsck ;
2021-04-06 21:41:48 -04:00
struct btree_trans trans ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2021-10-28 16:16:55 -04:00
struct bch_inode_unpacked prev = { 0 } ;
2022-07-14 02:47:36 -04:00
struct snapshots_seen s ;
2022-07-15 20:51:09 -04:00
struct bkey_s_c k ;
2021-04-06 21:41:48 -04:00
int ret ;
2022-07-14 02:47:36 -04:00
snapshots_seen_init ( & s ) ;
2021-04-06 21:41:48 -04:00
bch2_trans_init ( & trans , c , BTREE_ITER_MAX , 0 ) ;
2022-07-15 20:51:09 -04:00
ret = for_each_btree_key_commit ( & trans , iter , BTREE_ID_inodes ,
POS_MIN ,
2022-07-17 00:44:19 -04:00
BTREE_ITER_PREFETCH | BTREE_ITER_ALL_SNAPSHOTS , k ,
NULL , NULL , BTREE_INSERT_LAZY_RW | BTREE_INSERT_NOFAIL ,
2022-07-15 20:51:09 -04:00
check_inode ( & trans , & iter , k , & prev , & s , full ) ) ;
2021-04-20 00:15:44 -04:00
2021-10-28 16:16:55 -04:00
bch2_trans_exit ( & trans ) ;
2022-07-14 02:47:36 -04:00
snapshots_seen_exit ( & s ) ;
2022-04-11 22:36:53 -04:00
if ( ret )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-10-28 16:16:55 -04:00
return ret ;
}
2021-04-20 00:15:44 -04:00
2021-10-12 12:06:02 -04:00
static struct bkey_s_c_dirent dirent_get_by_pos ( struct btree_trans * trans ,
struct btree_iter * iter ,
struct bpos pos )
{
2023-04-29 19:33:09 -04:00
return bch2_bkey_get_iter_typed ( trans , iter , BTREE_ID_dirents , pos , 0 , dirent ) ;
2021-10-12 12:06:02 -04:00
}
static bool inode_points_to_dirent ( struct bch_inode_unpacked * inode ,
struct bkey_s_c_dirent d )
{
return inode - > bi_dir = = d . k - > p . inode & &
inode - > bi_dir_offset = = d . k - > p . offset ;
}
static bool dirent_points_to_inode ( struct bkey_s_c_dirent d ,
struct bch_inode_unpacked * inode )
{
return d . v - > d_type = = DT_SUBVOL
? le32_to_cpu ( d . v - > d_child_subvol ) = = inode - > bi_subvol
: le64_to_cpu ( d . v - > d_inum ) = = inode - > bi_inum ;
}
2021-04-20 00:15:44 -04:00
static int inode_backpointer_exists ( struct btree_trans * trans ,
struct bch_inode_unpacked * inode ,
u32 snapshot )
{
struct btree_iter iter ;
2021-10-12 12:06:02 -04:00
struct bkey_s_c_dirent d ;
2021-04-20 00:15:44 -04:00
int ret ;
2021-10-12 12:06:02 -04:00
d = dirent_get_by_pos ( trans , & iter ,
SPOS ( inode - > bi_dir , inode - > bi_dir_offset , snapshot ) ) ;
2023-04-30 13:02:05 -04:00
ret = bkey_err ( d ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
2023-05-27 19:59:59 -04:00
return bch2_err_matches ( ret , ENOENT ) ? 0 : ret ;
2021-10-19 01:08:05 -04:00
2021-10-12 12:06:02 -04:00
ret = dirent_points_to_inode ( d , inode ) ;
2021-04-20 00:15:44 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
return ret ;
}
static int check_i_sectors ( struct btree_trans * trans , struct inode_walker * w )
{
struct bch_fs * c = trans - > c ;
struct inode_walker_entry * i ;
2022-07-22 06:57:05 -04:00
u32 restart_count = trans - > restart_count ;
int ret = 0 ;
2021-04-20 00:15:44 -04:00
s64 count2 ;
2022-03-29 15:48:45 -04:00
darray_for_each ( w - > inodes , i ) {
2021-04-20 00:15:44 -04:00
if ( i - > inode . bi_sectors = = i - > count )
continue ;
2023-07-16 14:24:36 -04:00
count2 = bch2_count_inode_sectors ( trans , w - > last_pos . inode , i - > snapshot ) ;
2021-04-20 00:15:44 -04:00
2023-07-13 03:11:16 -04:00
if ( w - > recalculate_sums )
2021-04-20 00:15:44 -04:00
i - > count = count2 ;
2023-07-13 03:11:16 -04:00
if ( i - > count ! = count2 ) {
bch_err ( c , " fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu " ,
w - > last_pos . inode , i - > snapshot , i - > count , count2 ) ;
return - BCH_ERR_internal_fsck_err ;
2021-04-20 00:15:44 -04:00
}
if ( fsck_err_on ( ! ( i - > inode . bi_flags & BCH_INODE_I_SECTORS_DIRTY ) , c ,
" inode %llu:%u has incorrect i_sectors: got %llu, should be %llu " ,
2023-07-16 14:24:36 -04:00
w - > last_pos . inode , i - > snapshot ,
2022-07-19 17:20:18 -04:00
i - > inode . bi_sectors , i - > count ) ) {
i - > inode . bi_sectors = i - > count ;
ret = write_inode ( trans , & i - > inode , i - > snapshot ) ;
if ( ret )
break ;
}
2021-04-20 00:15:44 -04:00
}
fsck_err :
2022-08-12 12:45:01 -04:00
if ( ret )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2022-08-12 12:45:01 -04:00
if ( ! ret & & trans_was_restarted ( trans , restart_count ) )
ret = - BCH_ERR_transaction_restart_nested ;
return ret ;
2021-04-20 00:15:44 -04:00
}
2022-05-08 15:03:28 +12:00
struct extent_end {
u32 snapshot ;
u64 offset ;
struct snapshots_seen seen ;
} ;
2023-07-16 15:12:25 -04:00
struct extent_ends {
struct bpos last_pos ;
DARRAY ( struct extent_end ) e ;
} ;
static void extent_ends_reset ( struct extent_ends * extent_ends )
{
struct extent_end * i ;
darray_for_each ( extent_ends - > e , i )
snapshots_seen_exit ( & i - > seen ) ;
extent_ends - > e . nr = 0 ;
}
static void extent_ends_exit ( struct extent_ends * extent_ends )
{
extent_ends_reset ( extent_ends ) ;
darray_exit ( & extent_ends - > e ) ;
}
static void extent_ends_init ( struct extent_ends * extent_ends )
{
memset ( extent_ends , 0 , sizeof ( * extent_ends ) ) ;
}
static int extent_ends_at ( struct bch_fs * c ,
struct extent_ends * extent_ends ,
struct snapshots_seen * seen ,
struct bkey_s_c k )
{
struct extent_end * i , n = ( struct extent_end ) {
. offset = k . k - > p . offset ,
. snapshot = k . k - > p . snapshot ,
. seen = * seen ,
} ;
n . seen . ids . data = kmemdup ( seen - > ids . data ,
sizeof ( seen - > ids . data [ 0 ] ) * seen - > ids . size ,
GFP_KERNEL ) ;
if ( ! n . seen . ids . data )
return - BCH_ERR_ENOMEM_fsck_extent_ends_at ;
darray_for_each ( extent_ends - > e , i ) {
if ( i - > snapshot = = k . k - > p . snapshot ) {
snapshots_seen_exit ( & i - > seen ) ;
* i = n ;
return 0 ;
}
if ( i - > snapshot > = k . k - > p . snapshot )
break ;
}
return darray_insert_item ( & extent_ends - > e , i - extent_ends - > e . data , n ) ;
}
2022-05-08 15:03:28 +12:00
2023-07-13 03:11:16 -04:00
static int overlapping_extents_found ( struct btree_trans * trans ,
enum btree_id btree ,
struct bpos pos1 , struct bkey pos2 ,
bool * fixed )
2023-06-24 23:22:20 -04:00
{
2023-07-13 03:11:16 -04:00
struct bch_fs * c = trans - > c ;
struct printbuf buf = PRINTBUF ;
2023-06-24 23:22:20 -04:00
struct btree_iter iter ;
struct bkey_s_c k ;
2023-07-13 03:11:16 -04:00
u32 snapshot = min ( pos1 . snapshot , pos2 . p . snapshot ) ;
2023-06-24 23:22:20 -04:00
int ret ;
2023-07-13 03:11:16 -04:00
BUG_ON ( bkey_le ( pos1 , bkey_start_pos ( & pos2 ) ) ) ;
bch2_trans_iter_init ( trans , & iter , btree , SPOS ( pos1 . inode , pos1 . offset - 1 , snapshot ) , 0 ) ;
k = bch2_btree_iter_peek_upto ( & iter , POS ( pos1 . inode , U64_MAX ) ) ;
2023-06-24 23:22:20 -04:00
ret = bkey_err ( k ) ;
if ( ret )
2023-07-13 03:11:16 -04:00
goto err ;
prt_str ( & buf , " \n " ) ;
bch2_bkey_val_to_text ( & buf , c , k ) ;
if ( ! bpos_eq ( pos1 , k . k - > p ) ) {
bch_err ( c , " %s: error finding first overlapping extent when repairing%s " ,
__func__ , buf . buf ) ;
ret = - BCH_ERR_internal_fsck_err ;
goto err ;
}
while ( 1 ) {
bch2_btree_iter_advance ( & iter ) ;
k = bch2_btree_iter_peek_upto ( & iter , POS ( pos1 . inode , U64_MAX ) ) ;
ret = bkey_err ( k ) ;
if ( ret )
goto err ;
if ( bkey_ge ( k . k - > p , pos2 . p ) )
break ;
}
prt_str ( & buf , " \n " ) ;
bch2_bkey_val_to_text ( & buf , c , k ) ;
if ( bkey_gt ( k . k - > p , pos2 . p ) | |
pos2 . size ! = k . k - > size ) {
bch_err ( c , " %s: error finding seconding overlapping extent when repairing%s " ,
__func__ , buf . buf ) ;
ret = - BCH_ERR_internal_fsck_err ;
goto err ;
}
if ( fsck_err ( c , " overlapping extents%s " , buf . buf ) ) {
struct bpos update_pos = pos1 . snapshot < pos2 . p . snapshot ? pos1 : pos2 . p ;
struct btree_iter update_iter ;
2023-06-24 23:22:20 -04:00
2023-07-13 03:11:16 -04:00
struct bkey_i * update = bch2_bkey_get_mut ( trans , & update_iter ,
btree , update_pos ,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE ) ;
bch2_trans_iter_exit ( trans , & update_iter ) ;
if ( ( ret = PTR_ERR_OR_ZERO ( update ) ) )
goto err ;
* fixed = true ;
}
fsck_err :
err :
2023-06-24 23:22:20 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2023-07-13 03:11:16 -04:00
printbuf_exit ( & buf ) ;
return ret ;
2023-06-24 23:22:20 -04:00
}
2022-05-08 15:03:28 +12:00
static int check_overlapping_extents ( struct btree_trans * trans ,
struct snapshots_seen * seen ,
2023-07-16 15:12:25 -04:00
struct extent_ends * extent_ends ,
2022-05-08 15:03:28 +12:00
struct bkey_s_c k ,
2023-07-16 15:12:25 -04:00
u32 equiv ,
2022-05-08 15:03:28 +12:00
struct btree_iter * iter )
{
struct bch_fs * c = trans - > c ;
struct extent_end * i ;
2023-07-13 03:11:16 -04:00
bool fixed = false ;
2022-05-08 15:03:28 +12:00
int ret = 0 ;
2023-07-16 15:12:25 -04:00
/* transaction restart, running again */
if ( bpos_eq ( extent_ends - > last_pos , k . k - > p ) )
return 0 ;
if ( extent_ends - > last_pos . inode ! = k . k - > p . inode )
extent_ends_reset ( extent_ends ) ;
darray_for_each ( extent_ends - > e , i ) {
if ( i - > offset < = bkey_start_offset ( k . k ) )
2022-05-08 15:03:28 +12:00
continue ;
if ( ! ref_visible2 ( c ,
k . k - > p . snapshot , seen ,
i - > snapshot , & i - > seen ) )
continue ;
2023-07-16 15:12:25 -04:00
ret = overlapping_extents_found ( trans , iter - > btree_id ,
SPOS ( iter - > pos . inode ,
i - > offset ,
i - > snapshot ) ,
* k . k , & fixed ) ;
if ( ret )
goto err ;
2022-05-08 15:03:28 +12:00
}
2023-07-16 15:12:25 -04:00
ret = extent_ends_at ( c , extent_ends , seen , k ) ;
if ( ret )
goto err ;
2022-05-08 15:03:28 +12:00
2023-07-16 15:12:25 -04:00
extent_ends - > last_pos = k . k - > p ;
err :
return ret ? : fixed ;
2022-05-08 15:03:28 +12:00
}
2021-04-20 00:15:44 -04:00
static int check_extent ( struct btree_trans * trans , struct btree_iter * iter ,
2022-07-15 20:51:09 -04:00
struct bkey_s_c k ,
2021-04-20 00:15:44 -04:00
struct inode_walker * inode ,
2022-05-08 15:03:28 +12:00
struct snapshots_seen * s ,
2023-07-16 15:12:25 -04:00
struct extent_ends * extent_ends )
2021-04-20 00:15:44 -04:00
{
struct bch_fs * c = trans - > c ;
struct inode_walker_entry * i ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2023-07-13 01:41:02 -04:00
struct bpos equiv = k . k - > p ;
2021-04-20 00:15:44 -04:00
int ret = 0 ;
2023-07-13 01:41:02 -04:00
equiv . snapshot = bch2_snapshot_equiv ( c , k . k - > p . snapshot ) ;
2021-04-20 00:15:44 -04:00
ret = check_key_has_snapshot ( trans , iter , k ) ;
2022-02-25 13:18:19 -05:00
if ( ret ) {
ret = ret < 0 ? ret : 0 ;
goto out ;
}
2021-04-20 00:15:44 -04:00
2023-07-16 14:24:36 -04:00
if ( inode - > last_pos . inode ! = k . k - > p . inode ) {
2021-04-20 00:15:44 -04:00
ret = check_i_sectors ( trans , inode ) ;
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-04-20 00:15:44 -04:00
}
2022-04-06 14:35:10 -04:00
2023-07-16 14:19:08 -04:00
i = walk_inode ( trans , inode , equiv , k . k - > type = = KEY_TYPE_whiteout ) ;
ret = PTR_ERR_OR_ZERO ( i ) ;
if ( ret )
goto err ;
2023-07-16 14:45:23 -04:00
ret = snapshots_seen_update ( c , s , iter - > btree_id , k . k - > p ) ;
2022-05-08 15:03:28 +12:00
if ( ret )
goto err ;
2021-04-20 00:15:44 -04:00
2023-07-16 14:45:23 -04:00
if ( k . k - > type ! = KEY_TYPE_whiteout ) {
if ( fsck_err_on ( ! i , c ,
" extent in missing inode: \n %s " ,
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) )
goto delete ;
if ( fsck_err_on ( i & &
! S_ISREG ( i - > inode . bi_mode ) & &
! S_ISLNK ( i - > inode . bi_mode ) , c ,
" extent in non regular inode mode %o: \n %s " ,
i - > inode . bi_mode ,
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) )
goto delete ;
2021-04-20 00:15:44 -04:00
2023-07-16 15:12:25 -04:00
ret = check_overlapping_extents ( trans , s , extent_ends , k ,
equiv . snapshot , iter ) ;
2023-07-16 14:45:23 -04:00
if ( ret < 0 )
goto err ;
if ( ret )
inode - > recalculate_sums = true ;
2023-07-16 15:12:25 -04:00
ret = 0 ;
2023-07-16 14:45:23 -04:00
}
2021-04-20 00:15:44 -04:00
2022-07-16 23:31:28 -04:00
/*
2023-07-16 14:55:33 -04:00
* Check inodes in reverse order , from oldest snapshots to newest ,
* starting from the inode that matches this extent ' s snapshot . If we
* didn ' t have one , iterate over all inodes :
2022-07-16 23:31:28 -04:00
*/
2023-07-16 14:55:33 -04:00
if ( ! i )
i = inode - > inodes . data + inode - > inodes . nr - 1 ;
for ( ;
2023-07-16 14:45:23 -04:00
inode - > inodes . data & & i > = inode - > inodes . data ;
2022-07-16 23:31:28 -04:00
- - i ) {
if ( i - > snapshot > equiv . snapshot | |
! key_visible_in_snapshot ( c , s , i - > snapshot , equiv . snapshot ) )
continue ;
2023-07-16 14:45:23 -04:00
if ( k . k - > type ! = KEY_TYPE_whiteout ) {
if ( fsck_err_on ( ! ( i - > inode . bi_flags & BCH_INODE_I_SIZE_DIRTY ) & &
k . k - > p . offset > round_up ( i - > inode . bi_size , block_bytes ( c ) ) > > 9 & &
! bkey_extent_is_reservation ( k ) , c ,
" extent type past end of inode %llu:%u, i_size %llu \n %s " ,
i - > inode . bi_inum , i - > snapshot , i - > inode . bi_size ,
( bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ) {
struct btree_iter iter2 ;
bch2_trans_copy_iter ( & iter2 , iter ) ;
bch2_btree_iter_set_snapshot ( & iter2 , i - > snapshot ) ;
ret = bch2_btree_iter_traverse ( & iter2 ) ? :
bch2_btree_delete_at ( trans , & iter2 ,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE ) ;
bch2_trans_iter_exit ( trans , & iter2 ) ;
2022-07-16 23:31:28 -04:00
if ( ret )
goto err ;
2023-07-16 14:45:23 -04:00
iter - > k . type = KEY_TYPE_whiteout ;
2021-04-20 00:15:44 -04:00
}
2023-07-16 14:55:33 -04:00
if ( bkey_extent_is_allocation ( k . k ) )
i - > count + = k . k - > size ;
2021-04-20 00:15:44 -04:00
}
2021-04-06 20:15:26 -04:00
2023-07-16 14:55:33 -04:00
i - > seen_this_pos = true ;
}
2022-02-25 13:18:19 -05:00
out :
err :
2021-04-20 00:15:44 -04:00
fsck_err :
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
2022-04-11 22:36:53 -04:00
2022-07-17 23:06:38 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-04-06 20:15:26 -04:00
return ret ;
2023-07-13 01:41:02 -04:00
delete :
ret = bch2_btree_delete_at ( trans , iter , BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE ) ;
goto out ;
2021-04-06 20:15:26 -04:00
}
2017-03-16 22:18:50 -08:00
/*
* Walk extents : verify that extents have a corresponding S_ISREG inode , and
* that i_size an i_sectors are consistent
*/
2023-07-07 02:42:28 -04:00
int bch2_check_extents ( struct bch_fs * c )
2017-03-16 22:18:50 -08:00
{
struct inode_walker w = inode_walker_init ( ) ;
2021-04-20 00:15:44 -04:00
struct snapshots_seen s ;
2019-03-25 15:10:15 -04:00
struct btree_trans trans ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2022-07-15 20:51:09 -04:00
struct bkey_s_c k ;
2023-07-16 15:12:25 -04:00
struct extent_ends extent_ends ;
2023-04-16 21:49:12 -04:00
struct disk_reservation res = { 0 } ;
2017-03-16 22:18:50 -08:00
int ret = 0 ;
2021-04-20 00:15:44 -04:00
snapshots_seen_init ( & s ) ;
2023-07-16 15:12:25 -04:00
extent_ends_init ( & extent_ends ) ;
2019-05-15 10:54:43 -04:00
bch2_trans_init ( & trans , c , BTREE_ITER_MAX , 0 ) ;
2019-03-25 15:10:15 -04:00
2022-07-15 20:51:09 -04:00
ret = for_each_btree_key_commit ( & trans , iter , BTREE_ID_extents ,
POS ( BCACHEFS_ROOT_INO , 0 ) ,
BTREE_ITER_PREFETCH | BTREE_ITER_ALL_SNAPSHOTS , k ,
2023-04-16 21:49:12 -04:00
& res , NULL ,
BTREE_INSERT_LAZY_RW | BTREE_INSERT_NOFAIL , ( {
bch2_disk_reservation_put ( c , & res ) ;
check_extent ( & trans , & iter , k , & w , & s , & extent_ends ) ;
2023-07-16 14:33:57 -04:00
} ) ) ? :
check_i_sectors ( & trans , & w ) ;
2022-05-08 15:03:28 +12:00
2023-04-16 21:49:12 -04:00
bch2_disk_reservation_put ( c , & res ) ;
2023-07-16 15:12:25 -04:00
extent_ends_exit ( & extent_ends ) ;
2021-04-20 00:15:44 -04:00
inode_walker_exit ( & w ) ;
bch2_trans_exit ( & trans ) ;
snapshots_seen_exit ( & s ) ;
2022-04-11 22:36:53 -04:00
if ( ret )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-04-20 00:15:44 -04:00
return ret ;
}
static int check_subdir_count ( struct btree_trans * trans , struct inode_walker * w )
{
struct bch_fs * c = trans - > c ;
struct inode_walker_entry * i ;
2022-07-22 06:57:05 -04:00
u32 restart_count = trans - > restart_count ;
int ret = 0 ;
2021-04-20 00:15:44 -04:00
s64 count2 ;
2022-03-29 15:48:45 -04:00
darray_for_each ( w - > inodes , i ) {
2021-04-20 00:15:44 -04:00
if ( i - > inode . bi_nlink = = i - > count )
continue ;
2023-07-16 14:24:36 -04:00
count2 = bch2_count_subdirs ( trans , w - > last_pos . inode , i - > snapshot ) ;
2022-02-13 20:42:12 -05:00
if ( count2 < 0 )
return count2 ;
2021-04-20 00:15:44 -04:00
if ( i - > count ! = count2 ) {
bch_err ( c , " fsck counted subdirectories wrong: got %llu should be %llu " ,
i - > count , count2 ) ;
i - > count = count2 ;
if ( i - > inode . bi_nlink = = i - > count )
continue ;
}
if ( fsck_err_on ( i - > inode . bi_nlink ! = i - > count , c ,
" directory %llu:%u with wrong i_nlink: got %u, should be %llu " ,
2023-07-16 14:24:36 -04:00
w - > last_pos . inode , i - > snapshot , i - > inode . bi_nlink , i - > count ) ) {
2021-04-20 00:15:44 -04:00
i - > inode . bi_nlink = i - > count ;
ret = write_inode ( trans , & i - > inode , i - > snapshot ) ;
2021-03-19 22:34:54 -04:00
if ( ret )
break ;
}
2021-04-20 00:15:44 -04:00
}
fsck_err :
2022-08-18 17:00:12 -04:00
if ( ret )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2022-08-18 17:00:12 -04:00
if ( ! ret & & trans_was_restarted ( trans , restart_count ) )
ret = - BCH_ERR_transaction_restart_nested ;
return ret ;
2021-04-20 00:15:44 -04:00
}
2021-03-19 22:34:54 -04:00
2021-04-20 00:15:44 -04:00
static int check_dirent_target ( struct btree_trans * trans ,
struct btree_iter * iter ,
struct bkey_s_c_dirent d ,
struct bch_inode_unpacked * target ,
u32 target_snapshot )
{
struct bch_fs * c = trans - > c ;
2021-10-28 16:16:55 -04:00
struct bkey_i_dirent * n ;
2021-04-20 00:15:44 -04:00
bool backpointer_exists = true ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2021-04-20 00:15:44 -04:00
int ret = 0 ;
if ( ! target - > bi_dir & &
! target - > bi_dir_offset ) {
target - > bi_dir = d . k - > p . inode ;
target - > bi_dir_offset = d . k - > p . offset ;
2021-10-28 16:16:55 -04:00
ret = __write_inode ( trans , target , target_snapshot ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
goto err ;
}
2021-10-12 12:06:02 -04:00
if ( ! inode_points_to_dirent ( target , d ) ) {
2021-04-20 00:15:44 -04:00
ret = inode_backpointer_exists ( trans , target , d . k - > p . snapshot ) ;
if ( ret < 0 )
goto err ;
2019-12-30 14:37:25 -05:00
2021-04-20 00:15:44 -04:00
backpointer_exists = ret ;
ret = 0 ;
2019-12-30 14:37:25 -05:00
2021-04-20 00:15:44 -04:00
if ( fsck_err_on ( S_ISDIR ( target - > bi_mode ) & &
backpointer_exists , c ,
" directory %llu with multiple links " ,
target - > bi_inum ) ) {
2021-10-28 16:16:55 -04:00
ret = __remove_dirent ( trans , d . k - > p ) ;
2022-02-25 13:18:19 -05:00
goto out ;
2019-12-30 14:37:25 -05:00
}
2021-04-20 00:15:44 -04:00
if ( fsck_err_on ( backpointer_exists & &
! target - > bi_nlink , c ,
2022-04-16 18:59:58 -04:00
" inode %llu type %s has multiple links but i_nlink 0 " ,
target - > bi_inum , bch2_d_types [ d . v - > d_type ] ) ) {
2021-04-20 00:15:44 -04:00
target - > bi_nlink + + ;
target - > bi_flags & = ~ BCH_INODE_UNLINKED ;
2017-03-16 22:18:50 -08:00
2021-10-28 16:16:55 -04:00
ret = __write_inode ( trans , target , target_snapshot ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
goto err ;
2017-03-16 22:18:50 -08:00
}
2021-04-20 00:15:44 -04:00
if ( fsck_err_on ( ! backpointer_exists , c ,
2021-10-20 17:59:38 -04:00
" inode %llu:%u has wrong backpointer: \n "
2021-04-20 00:15:44 -04:00
" got %llu:%llu \n "
" should be %llu:%llu " ,
2021-10-20 17:59:38 -04:00
target - > bi_inum , target_snapshot ,
2021-04-20 00:15:44 -04:00
target - > bi_dir ,
target - > bi_dir_offset ,
d . k - > p . inode ,
d . k - > p . offset ) ) {
target - > bi_dir = d . k - > p . inode ;
target - > bi_dir_offset = d . k - > p . offset ;
2021-10-28 16:16:55 -04:00
ret = __write_inode ( trans , target , target_snapshot ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
goto err ;
2017-03-16 22:18:50 -08:00
}
2021-04-20 00:15:44 -04:00
}
2017-03-16 22:18:50 -08:00
2021-10-28 16:16:55 -04:00
if ( fsck_err_on ( d . v - > d_type ! = inode_d_type ( target ) , c ,
" incorrect d_type: got %s, should be %s: \n %s " ,
bch2_d_type_str ( d . v - > d_type ) ,
bch2_d_type_str ( inode_d_type ( target ) ) ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , d . s_c ) , buf . buf ) ) ) {
2021-10-28 16:16:55 -04:00
n = bch2_trans_kmalloc ( trans , bkey_bytes ( d . k ) ) ;
ret = PTR_ERR_OR_ZERO ( n ) ;
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-04-20 00:15:44 -04:00
bkey_reassemble ( & n - > k_i , d . s_c ) ;
2021-10-28 16:16:55 -04:00
n - > v . d_type = inode_d_type ( target ) ;
2021-04-20 00:15:44 -04:00
2021-10-28 16:16:55 -04:00
ret = bch2_trans_update ( trans , iter , & n - > k_i , 0 ) ;
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-10-12 12:06:02 -04:00
2021-10-28 16:16:55 -04:00
d = dirent_i_to_s_c ( n ) ;
2021-10-12 12:06:02 -04:00
}
if ( d . v - > d_type = = DT_SUBVOL & &
target - > bi_parent_subvol ! = le32_to_cpu ( d . v - > d_parent_subvol ) & &
( c - > sb . version < bcachefs_metadata_version_subvol_dirent | |
fsck_err ( c , " dirent has wrong d_parent_subvol field: got %u, should be %u " ,
le32_to_cpu ( d . v - > d_parent_subvol ) ,
target - > bi_parent_subvol ) ) ) {
2021-10-28 16:16:55 -04:00
n = bch2_trans_kmalloc ( trans , bkey_bytes ( d . k ) ) ;
ret = PTR_ERR_OR_ZERO ( n ) ;
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-10-12 12:06:02 -04:00
bkey_reassemble ( & n - > k_i , d . s_c ) ;
n - > v . d_parent_subvol = cpu_to_le32 ( target - > bi_parent_subvol ) ;
2021-10-28 16:16:55 -04:00
ret = bch2_trans_update ( trans , iter , & n - > k_i , 0 ) ;
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-10-12 12:06:02 -04:00
2021-10-28 16:16:55 -04:00
d = dirent_i_to_s_c ( n ) ;
2017-03-16 22:18:50 -08:00
}
2022-02-25 13:18:19 -05:00
out :
2021-04-20 00:15:44 -04:00
err :
2017-03-16 22:18:50 -08:00
fsck_err :
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
2022-04-11 22:36:53 -04:00
2022-07-17 23:06:38 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-04-20 00:15:44 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-07-14 20:28:27 -04:00
static int check_dirent ( struct btree_trans * trans , struct btree_iter * iter ,
2022-07-15 20:51:09 -04:00
struct bkey_s_c k ,
2021-07-14 20:28:27 -04:00
struct bch_hash_info * hash_info ,
2021-04-20 00:15:44 -04:00
struct inode_walker * dir ,
struct inode_walker * target ,
struct snapshots_seen * s )
2017-03-16 22:18:50 -08:00
{
2021-07-14 20:28:27 -04:00
struct bch_fs * c = trans - > c ;
struct bkey_s_c_dirent d ;
2021-04-20 00:15:44 -04:00
struct inode_walker_entry * i ;
2022-02-25 13:18:19 -05:00
struct printbuf buf = PRINTBUF ;
2022-07-14 02:47:36 -04:00
struct bpos equiv ;
2022-02-25 13:18:19 -05:00
int ret = 0 ;
2018-07-12 19:19:41 -04:00
2021-04-20 00:15:44 -04:00
ret = check_key_has_snapshot ( trans , iter , k ) ;
2022-02-25 13:18:19 -05:00
if ( ret ) {
ret = ret < 0 ? ret : 0 ;
goto out ;
}
2017-03-16 22:18:50 -08:00
2022-07-14 02:47:36 -04:00
equiv = k . k - > p ;
equiv . snapshot = bch2_snapshot_equiv ( c , k . k - > p . snapshot ) ;
ret = snapshots_seen_update ( c , s , iter - > btree_id , k . k - > p ) ;
2021-07-14 20:28:27 -04:00
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-04-06 20:15:26 -04:00
2021-04-20 00:15:44 -04:00
if ( k . k - > type = = KEY_TYPE_whiteout )
2022-02-25 13:18:19 -05:00
goto out ;
2021-04-20 00:15:44 -04:00
2023-07-16 14:24:36 -04:00
if ( dir - > last_pos . inode ! = k . k - > p . inode ) {
2021-04-20 00:15:44 -04:00
ret = check_subdir_count ( trans , dir ) ;
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-04-20 00:15:44 -04:00
}
2022-07-22 06:57:05 -04:00
BUG_ON ( ! iter - > path - > should_be_locked ) ;
2022-04-06 14:35:10 -04:00
2023-07-16 14:19:08 -04:00
i = walk_inode ( trans , dir , equiv , k . k - > type = = KEY_TYPE_whiteout ) ;
2023-06-25 16:35:49 -04:00
ret = PTR_ERR_OR_ZERO ( i ) ;
2021-04-20 00:15:44 -04:00
if ( ret < 0 )
2022-02-25 13:18:19 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
2023-07-20 19:30:53 -04:00
if ( dir - > first_this_inode & & dir - > inodes . nr )
2023-04-25 14:32:39 -04:00
* hash_info = bch2_hash_info_init ( c , & dir - > inodes . data [ 0 ] . inode ) ;
dir - > first_this_inode = false ;
2023-06-25 16:35:49 -04:00
if ( fsck_err_on ( ! i , c ,
2021-07-14 20:28:27 -04:00
" dirent in nonexisting directory: \n %s " ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ) {
ret = bch2_btree_delete_at ( trans , iter ,
2021-10-28 16:16:55 -04:00
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE ) ;
2022-02-25 13:18:19 -05:00
goto out ;
}
2021-04-20 00:15:44 -04:00
2023-06-25 16:35:49 -04:00
if ( ! i )
2022-02-25 13:18:19 -05:00
goto out ;
2021-04-20 00:15:44 -04:00
if ( fsck_err_on ( ! S_ISDIR ( i - > inode . bi_mode ) , c ,
2021-10-28 16:16:55 -04:00
" dirent in non directory inode type %s: \n %s " ,
bch2_d_type_str ( inode_d_type ( & i - > inode ) ) ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , k ) , buf . buf ) ) ) {
ret = bch2_btree_delete_at ( trans , iter , 0 ) ;
goto out ;
}
2021-04-06 20:15:26 -04:00
2023-04-25 14:32:39 -04:00
ret = hash_check_key ( trans , bch2_dirent_hash_desc , hash_info , iter , k ) ;
2021-07-14 20:28:27 -04:00
if ( ret < 0 )
2022-02-25 13:18:19 -05:00
goto err ;
if ( ret ) {
/* dirent has been deleted */
ret = 0 ;
goto out ;
}
2021-04-07 01:55:57 -04:00
2021-07-14 20:28:27 -04:00
if ( k . k - > type ! = KEY_TYPE_dirent )
2022-02-25 13:18:19 -05:00
goto out ;
2017-03-16 22:18:50 -08:00
2021-07-14 20:28:27 -04:00
d = bkey_s_c_to_dirent ( k ) ;
2017-03-16 22:18:50 -08:00
2021-10-12 12:06:02 -04:00
if ( d . v - > d_type = = DT_SUBVOL ) {
struct bch_inode_unpacked subvol_root ;
u32 target_subvol = le32_to_cpu ( d . v - > d_child_subvol ) ;
u32 target_snapshot ;
u64 target_inum ;
2021-03-16 00:46:26 -04:00
2021-10-12 12:06:02 -04:00
ret = __subvol_lookup ( trans , target_subvol ,
& target_snapshot , & target_inum ) ;
2023-05-27 19:59:59 -04:00
if ( ret & & ! bch2_err_matches ( ret , ENOENT ) )
2022-02-25 13:18:19 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
2021-10-12 12:06:02 -04:00
if ( fsck_err_on ( ret , c ,
2023-07-06 22:47:42 -04:00
" dirent points to missing subvolume %u " ,
le32_to_cpu ( d . v - > d_child_subvol ) ) ) {
2022-02-25 13:18:19 -05:00
ret = __remove_dirent ( trans , d . k - > p ) ;
goto err ;
}
2021-07-14 20:28:27 -04:00
2021-04-20 00:15:44 -04:00
ret = __lookup_inode ( trans , target_inum ,
& subvol_root , & target_snapshot ) ;
2023-05-27 19:59:59 -04:00
if ( ret & & ! bch2_err_matches ( ret , ENOENT ) )
2022-02-25 13:18:19 -05:00
goto err ;
2021-07-14 20:28:27 -04:00
2021-04-20 00:15:44 -04:00
if ( fsck_err_on ( ret , c ,
" subvolume %u points to missing subvolume root %llu " ,
target_subvol ,
target_inum ) ) {
bch_err ( c , " repair not implemented yet " ) ;
2022-02-25 13:18:19 -05:00
ret = - EINVAL ;
goto err ;
2021-04-20 00:15:44 -04:00
}
2021-07-14 20:28:27 -04:00
2021-04-20 00:15:44 -04:00
if ( fsck_err_on ( subvol_root . bi_subvol ! = target_subvol , c ,
" subvol root %llu has wrong bi_subvol field: got %u, should be %u " ,
target_inum ,
subvol_root . bi_subvol , target_subvol ) ) {
subvol_root . bi_subvol = target_subvol ;
2021-10-28 16:16:55 -04:00
ret = __write_inode ( trans , & subvol_root , target_snapshot ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-04-20 00:15:44 -04:00
}
2021-07-14 20:28:27 -04:00
2021-04-20 00:15:44 -04:00
ret = check_dirent_target ( trans , iter , d , & subvol_root ,
target_snapshot ) ;
2021-07-14 20:28:27 -04:00
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-04-20 00:15:44 -04:00
} else {
2021-10-12 12:06:02 -04:00
ret = __get_visible_inodes ( trans , target , s , le64_to_cpu ( d . v - > d_inum ) ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
2022-03-29 15:48:45 -04:00
if ( fsck_err_on ( ! target - > inodes . nr , c ,
2022-07-14 02:47:36 -04:00
" dirent points to missing inode: (equiv %u) \n %s " ,
equiv . snapshot ,
2022-02-25 13:18:19 -05:00
( printbuf_reset ( & buf ) ,
bch2_bkey_val_to_text ( & buf , c , k ) ,
buf . buf ) ) ) {
2021-10-28 16:16:55 -04:00
ret = __remove_dirent ( trans , d . k - > p ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
}
2022-03-29 15:48:45 -04:00
darray_for_each ( target - > inodes , i ) {
2021-04-20 00:15:44 -04:00
ret = check_dirent_target ( trans , iter , d ,
& i - > inode , i - > snapshot ) ;
if ( ret )
2022-02-25 13:18:19 -05:00
goto err ;
2021-04-07 03:11:07 -04:00
}
2021-07-14 20:28:27 -04:00
}
2021-04-07 03:11:07 -04:00
2021-04-20 00:15:44 -04:00
if ( d . v - > d_type = = DT_DIR )
2022-07-14 02:47:36 -04:00
for_each_visible_inode ( c , s , dir , equiv . snapshot , i )
2021-04-20 00:15:44 -04:00
i - > count + + ;
2017-03-16 22:18:50 -08:00
2022-02-25 13:18:19 -05:00
out :
err :
2021-07-14 20:28:27 -04:00
fsck_err :
2022-02-25 13:18:19 -05:00
printbuf_exit ( & buf ) ;
2022-04-11 22:36:53 -04:00
2022-07-17 23:06:38 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-07-14 20:28:27 -04:00
return ret ;
}
2017-03-16 22:18:50 -08:00
2021-07-14 20:28:27 -04:00
/*
* Walk dirents : verify that they all have a corresponding S_ISDIR inode ,
* validate d_type
*/
2023-07-07 02:42:28 -04:00
int bch2_check_dirents ( struct bch_fs * c )
2021-07-14 20:28:27 -04:00
{
2021-04-20 00:15:44 -04:00
struct inode_walker dir = inode_walker_init ( ) ;
struct inode_walker target = inode_walker_init ( ) ;
struct snapshots_seen s ;
2021-07-14 20:28:27 -04:00
struct bch_hash_info hash_info ;
struct btree_trans trans ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2022-07-15 20:51:09 -04:00
struct bkey_s_c k ;
2021-07-14 20:28:27 -04:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
2021-04-20 00:15:44 -04:00
snapshots_seen_init ( & s ) ;
2021-07-14 20:28:27 -04:00
bch2_trans_init ( & trans , c , BTREE_ITER_MAX , 0 ) ;
2017-03-16 22:18:50 -08:00
2022-07-15 20:51:09 -04:00
ret = for_each_btree_key_commit ( & trans , iter , BTREE_ID_dirents ,
POS ( BCACHEFS_ROOT_INO , 0 ) ,
BTREE_ITER_PREFETCH | BTREE_ITER_ALL_SNAPSHOTS ,
k ,
NULL , NULL ,
BTREE_INSERT_LAZY_RW | BTREE_INSERT_NOFAIL ,
check_dirent ( & trans , & iter , k , & hash_info , & dir , & target , & s ) ) ;
2021-07-14 20:28:27 -04:00
2021-04-20 00:15:44 -04:00
bch2_trans_exit ( & trans ) ;
snapshots_seen_exit ( & s ) ;
inode_walker_exit ( & dir ) ;
inode_walker_exit ( & target ) ;
2022-04-11 22:36:53 -04:00
if ( ret )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-04-20 00:15:44 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-10-28 16:16:55 -04:00
static int check_xattr ( struct btree_trans * trans , struct btree_iter * iter ,
2022-07-15 20:51:09 -04:00
struct bkey_s_c k ,
2021-10-28 16:16:55 -04:00
struct bch_hash_info * hash_info ,
struct inode_walker * inode )
{
struct bch_fs * c = trans - > c ;
2023-06-25 16:35:49 -04:00
struct inode_walker_entry * i ;
2021-10-28 16:16:55 -04:00
int ret ;
ret = check_key_has_snapshot ( trans , iter , k ) ;
if ( ret )
return ret ;
2023-07-16 14:19:08 -04:00
i = walk_inode ( trans , inode , k . k - > p , k . k - > type = = KEY_TYPE_whiteout ) ;
2023-06-25 16:35:49 -04:00
ret = PTR_ERR_OR_ZERO ( i ) ;
if ( ret )
2021-10-28 16:16:55 -04:00
return ret ;
2023-07-20 19:30:53 -04:00
if ( inode - > first_this_inode & & inode - > inodes . nr )
2023-04-25 14:32:39 -04:00
* hash_info = bch2_hash_info_init ( c , & inode - > inodes . data [ 0 ] . inode ) ;
inode - > first_this_inode = false ;
2023-06-25 16:35:49 -04:00
if ( fsck_err_on ( ! i , c ,
2021-10-28 16:16:55 -04:00
" xattr for missing inode %llu " ,
k . k - > p . inode ) )
return bch2_btree_delete_at ( trans , iter , 0 ) ;
2023-06-25 16:35:49 -04:00
if ( ! i )
2021-10-28 16:16:55 -04:00
return 0 ;
ret = hash_check_key ( trans , bch2_xattr_hash_desc , hash_info , iter , k ) ;
fsck_err :
2022-07-17 23:06:38 -04:00
if ( ret & & ! bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-10-28 16:16:55 -04:00
return ret ;
}
2017-03-16 22:18:50 -08:00
/*
* Walk xattrs : verify that they all have a corresponding inode
*/
2023-07-07 02:42:28 -04:00
int bch2_check_xattrs ( struct bch_fs * c )
2017-03-16 22:18:50 -08:00
{
2021-10-28 16:16:55 -04:00
struct inode_walker inode = inode_walker_init ( ) ;
2021-04-07 01:55:57 -04:00
struct bch_hash_info hash_info ;
2018-07-12 19:19:41 -04:00
struct btree_trans trans ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2022-07-15 20:51:09 -04:00
struct bkey_s_c k ;
2017-03-16 22:18:50 -08:00
int ret = 0 ;
2019-05-15 10:54:43 -04:00
bch2_trans_init ( & trans , c , BTREE_ITER_MAX , 0 ) ;
2018-07-12 19:19:41 -04:00
2022-07-15 20:51:09 -04:00
ret = for_each_btree_key_commit ( & trans , iter , BTREE_ID_xattrs ,
POS ( BCACHEFS_ROOT_INO , 0 ) ,
BTREE_ITER_PREFETCH | BTREE_ITER_ALL_SNAPSHOTS ,
k ,
NULL , NULL ,
BTREE_INSERT_LAZY_RW | BTREE_INSERT_NOFAIL ,
check_xattr ( & trans , & iter , k , & hash_info , & inode ) ) ;
2021-10-28 16:16:55 -04:00
2021-10-19 15:08:00 -04:00
bch2_trans_exit ( & trans ) ;
2022-04-11 22:36:53 -04:00
if ( ret )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-10-19 15:08:00 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-10-28 16:16:55 -04:00
static int check_root_trans ( struct btree_trans * trans )
2017-03-16 22:18:50 -08:00
{
2021-10-28 16:16:55 -04:00
struct bch_fs * c = trans - > c ;
2021-04-20 00:15:44 -04:00
struct bch_inode_unpacked root_inode ;
2021-04-07 03:11:07 -04:00
u32 snapshot ;
2021-04-20 00:15:44 -04:00
u64 inum ;
2017-03-16 22:18:50 -08:00
int ret ;
2021-10-28 16:16:55 -04:00
ret = __subvol_lookup ( trans , BCACHEFS_ROOT_SUBVOL , & snapshot , & inum ) ;
2023-05-27 19:59:59 -04:00
if ( ret & & ! bch2_err_matches ( ret , ENOENT ) )
2017-03-16 22:18:50 -08:00
return ret ;
2021-04-20 00:15:44 -04:00
if ( mustfix_fsck_err_on ( ret , c , " root subvol missing " ) ) {
struct bkey_i_subvolume root_subvol ;
2017-03-16 22:18:50 -08:00
2021-04-20 00:15:44 -04:00
snapshot = U32_MAX ;
inum = BCACHEFS_ROOT_INO ;
2017-03-16 22:18:50 -08:00
2021-04-20 00:15:44 -04:00
bkey_subvolume_init ( & root_subvol . k_i ) ;
root_subvol . k . p . offset = BCACHEFS_ROOT_SUBVOL ;
root_subvol . v . flags = 0 ;
root_subvol . v . snapshot = cpu_to_le32 ( snapshot ) ;
root_subvol . v . inode = cpu_to_le64 ( inum ) ;
2022-07-13 05:25:29 -04:00
ret = commit_do ( trans , NULL , NULL ,
2021-04-20 00:15:44 -04:00
BTREE_INSERT_NOFAIL |
BTREE_INSERT_LAZY_RW ,
2023-01-20 01:27:30 +13:00
__bch2_btree_insert ( trans , BTREE_ID_subvolumes ,
& root_subvol . k_i , 0 ) ) ;
2021-04-20 00:15:44 -04:00
if ( ret ) {
2022-07-18 19:42:58 -04:00
bch_err ( c , " error writing root subvol: %s " , bch2_err_str ( ret ) ) ;
2021-04-20 00:15:44 -04:00
goto err ;
}
}
2021-10-28 16:16:55 -04:00
ret = __lookup_inode ( trans , BCACHEFS_ROOT_INO , & root_inode , & snapshot ) ;
2023-05-27 19:59:59 -04:00
if ( ret & & ! bch2_err_matches ( ret , ENOENT ) )
2021-04-20 00:15:44 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
2021-04-20 00:15:44 -04:00
if ( mustfix_fsck_err_on ( ret , c , " root directory missing " ) | |
mustfix_fsck_err_on ( ! S_ISDIR ( root_inode . bi_mode ) , c ,
" root inode not a directory " ) ) {
bch2_inode_init ( c , & root_inode , 0 , 0 , S_IFDIR | 0755 ,
0 , NULL ) ;
root_inode . bi_inum = inum ;
2017-03-16 22:18:50 -08:00
2021-10-28 16:16:55 -04:00
ret = __write_inode ( trans , & root_inode , snapshot ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
2022-07-18 19:42:58 -04:00
bch_err ( c , " error writing root inode: %s " , bch2_err_str ( ret ) ) ;
2021-04-20 00:15:44 -04:00
}
err :
fsck_err :
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-10-28 16:16:55 -04:00
/* Get root directory, create if it doesn't exist: */
2023-07-07 02:42:28 -04:00
int bch2_check_root ( struct bch_fs * c )
2021-10-28 16:16:55 -04:00
{
2023-06-20 13:49:25 -04:00
int ret ;
ret = bch2_trans_do ( c , NULL , NULL ,
2021-10-28 16:16:55 -04:00
BTREE_INSERT_NOFAIL |
BTREE_INSERT_LAZY_RW ,
check_root_trans ( & trans ) ) ;
2023-06-20 13:49:25 -04:00
if ( ret )
bch_err_fn ( c , ret ) ;
return ret ;
2021-10-28 16:16:55 -04:00
}
2022-03-29 15:48:45 -04:00
struct pathbuf_entry {
u64 inum ;
u32 snapshot ;
2017-03-16 22:18:50 -08:00
} ;
2022-03-29 15:48:45 -04:00
typedef DARRAY ( struct pathbuf_entry ) pathbuf ;
static bool path_is_dup ( pathbuf * p , u64 inum , u32 snapshot )
2021-10-20 17:59:38 -04:00
{
struct pathbuf_entry * i ;
2022-03-29 15:48:45 -04:00
darray_for_each ( * p , i )
2021-10-20 17:59:38 -04:00
if ( i - > inum = = inum & &
i - > snapshot = = snapshot )
return true ;
return false ;
}
2022-03-29 15:48:45 -04:00
static int path_down ( struct bch_fs * c , pathbuf * p ,
2021-12-30 20:14:52 -05:00
u64 inum , u32 snapshot )
2017-03-16 22:18:50 -08:00
{
2022-03-29 15:48:45 -04:00
int ret = darray_push ( p , ( ( struct pathbuf_entry ) {
2021-10-20 17:59:38 -04:00
. inum = inum ,
. snapshot = snapshot ,
2022-03-29 15:48:45 -04:00
} ) ) ;
if ( ret )
bch_err ( c , " fsck: error allocating memory for pathbuf, size %zu " ,
p - > size ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-10-20 17:59:38 -04:00
/*
* Check that a given inode is reachable from the root :
*
* XXX : we should also be verifying that inodes are in the right subvolumes
*/
2021-04-07 03:11:07 -04:00
static int check_path ( struct btree_trans * trans ,
2022-03-29 15:48:45 -04:00
pathbuf * p ,
2021-04-19 23:31:40 -04:00
struct bch_inode_unpacked * inode ,
u32 snapshot )
2017-03-16 22:18:50 -08:00
{
2021-04-07 03:11:07 -04:00
struct bch_fs * c = trans - > c ;
2017-03-16 22:18:50 -08:00
int ret = 0 ;
2022-07-14 02:47:36 -04:00
snapshot = bch2_snapshot_equiv ( c , snapshot ) ;
2021-04-07 03:11:07 -04:00
p - > nr = 0 ;
2019-03-25 15:10:15 -04:00
2021-10-19 01:08:05 -04:00
while ( ! ( inode - > bi_inum = = BCACHEFS_ROOT_INO & &
inode - > bi_subvol = = BCACHEFS_ROOT_SUBVOL ) ) {
2021-10-12 12:06:02 -04:00
struct btree_iter dirent_iter ;
struct bkey_s_c_dirent d ;
2021-10-20 17:59:38 -04:00
u32 parent_snapshot = snapshot ;
2021-10-12 12:06:02 -04:00
if ( inode - > bi_subvol ) {
2021-10-19 01:08:05 -04:00
u64 inum ;
ret = subvol_lookup ( trans , inode - > bi_parent_subvol ,
2021-10-20 17:59:38 -04:00
& parent_snapshot , & inum ) ;
2021-10-19 01:08:05 -04:00
if ( ret )
break ;
}
2021-04-07 03:11:07 -04:00
ret = lockrestart_do ( trans ,
2021-10-12 12:06:02 -04:00
PTR_ERR_OR_ZERO ( ( d = dirent_get_by_pos ( trans , & dirent_iter ,
SPOS ( inode - > bi_dir , inode - > bi_dir_offset ,
parent_snapshot ) ) ) . k ) ) ;
2023-05-27 19:59:59 -04:00
if ( ret & & ! bch2_err_matches ( ret , ENOENT ) )
2021-04-07 03:11:07 -04:00
break ;
2017-03-16 22:18:50 -08:00
2021-10-12 12:06:02 -04:00
if ( ! ret & & ! dirent_points_to_inode ( d , inode ) ) {
bch2_trans_iter_exit ( trans , & dirent_iter ) ;
2023-05-27 19:59:59 -04:00
ret = - BCH_ERR_ENOENT_dirent_doesnt_match_inode ;
2021-10-12 12:06:02 -04:00
}
2023-05-27 19:59:59 -04:00
if ( bch2_err_matches ( ret , ENOENT ) ) {
2021-10-28 16:16:55 -04:00
if ( fsck_err ( c , " unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu " ,
2021-04-20 00:15:44 -04:00
inode - > bi_inum , snapshot ,
2021-10-28 16:16:55 -04:00
bch2_d_type_str ( inode_d_type ( inode ) ) ,
2021-04-07 03:11:07 -04:00
inode - > bi_nlink ,
inode - > bi_dir ,
inode - > bi_dir_offset ) )
2021-04-19 23:31:40 -04:00
ret = reattach_inode ( trans , inode , snapshot ) ;
2021-04-07 03:11:07 -04:00
break ;
}
2021-10-12 12:06:02 -04:00
bch2_trans_iter_exit ( trans , & dirent_iter ) ;
2017-03-16 22:18:50 -08:00
2021-04-07 03:11:07 -04:00
if ( ! S_ISDIR ( inode - > bi_mode ) )
break ;
2017-03-16 22:18:50 -08:00
2021-12-30 20:14:52 -05:00
ret = path_down ( c , p , inode - > bi_inum , snapshot ) ;
2021-04-07 03:11:07 -04:00
if ( ret ) {
bch_err ( c , " memory allocation failure " ) ;
return ret ;
}
2017-03-16 22:18:50 -08:00
2021-10-20 17:59:38 -04:00
snapshot = parent_snapshot ;
ret = lookup_inode ( trans , inode - > bi_dir , inode , & snapshot ) ;
if ( ret ) {
/* Should have been caught in dirents pass */
bch_err ( c , " error looking up parent directory: %i " , ret ) ;
break ;
}
if ( path_is_dup ( p , inode - > bi_inum , snapshot ) ) {
struct pathbuf_entry * i ;
2017-03-16 22:18:50 -08:00
2021-04-07 03:11:07 -04:00
/* XXX print path */
2021-10-20 17:59:38 -04:00
bch_err ( c , " directory structure loop " ) ;
2022-03-29 15:48:45 -04:00
darray_for_each ( * p , i )
2021-10-20 17:59:38 -04:00
pr_err ( " %llu:%u " , i - > inum , i - > snapshot ) ;
pr_err ( " %llu:%u " , inode - > bi_inum , snapshot ) ;
2021-04-07 03:11:07 -04:00
if ( ! fsck_err ( c , " directory structure loop " ) )
return 0 ;
2017-03-16 22:18:50 -08:00
2022-07-13 05:25:29 -04:00
ret = commit_do ( trans , NULL , NULL ,
2021-10-28 16:16:55 -04:00
BTREE_INSERT_NOFAIL |
BTREE_INSERT_LAZY_RW ,
2021-04-19 23:31:40 -04:00
remove_backpointer ( trans , inode ) ) ;
2017-03-16 22:18:50 -08:00
if ( ret ) {
2021-04-07 03:11:07 -04:00
bch_err ( c , " error removing dirent: %i " , ret ) ;
break ;
2017-03-16 22:18:50 -08:00
}
2021-04-19 23:31:40 -04:00
ret = reattach_inode ( trans , inode , snapshot ) ;
2017-03-16 22:18:50 -08:00
}
}
2021-04-07 03:11:07 -04:00
fsck_err :
if ( ret )
2023-06-20 13:49:25 -04:00
bch_err_fn ( c , ret ) ;
2021-04-07 03:11:07 -04:00
return ret ;
}
2017-03-16 22:18:50 -08:00
2021-04-07 03:11:07 -04:00
/*
* Check for unreachable inodes , as well as loops in the directory structure :
2023-07-07 02:42:28 -04:00
* After bch2_check_dirents ( ) , if an inode backpointer doesn ' t exist that means it ' s
2021-04-07 03:11:07 -04:00
* unreachable :
*/
2023-07-07 02:42:28 -04:00
int bch2_check_directory_structure ( struct bch_fs * c )
2021-04-07 03:11:07 -04:00
{
struct btree_trans trans ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2021-04-07 03:11:07 -04:00
struct bkey_s_c k ;
struct bch_inode_unpacked u ;
2022-03-29 15:48:45 -04:00
pathbuf path = { 0 , } ;
2021-04-07 03:11:07 -04:00
int ret ;
2017-03-16 22:18:50 -08:00
2021-04-07 03:11:07 -04:00
bch2_trans_init ( & trans , c , BTREE_ITER_MAX , 0 ) ;
2017-03-16 22:18:50 -08:00
2021-05-14 16:56:26 -04:00
for_each_btree_key ( & trans , iter , BTREE_ID_inodes , POS_MIN ,
BTREE_ITER_INTENT |
2021-04-20 00:15:44 -04:00
BTREE_ITER_PREFETCH |
BTREE_ITER_ALL_SNAPSHOTS , k , ret ) {
2021-10-29 21:14:23 -04:00
if ( ! bkey_is_inode ( k . k ) )
2017-03-16 22:18:50 -08:00
continue ;
2021-10-29 21:14:23 -04:00
ret = bch2_inode_unpack ( k , & u ) ;
2021-04-07 03:11:07 -04:00
if ( ret ) {
/* Should have been caught earlier in fsck: */
bch_err ( c , " error unpacking inode %llu: %i " , k . k - > p . offset , ret ) ;
break ;
2017-03-16 22:18:50 -08:00
}
2021-04-20 00:15:44 -04:00
if ( u . bi_flags & BCH_INODE_UNLINKED )
continue ;
2021-04-19 23:31:40 -04:00
ret = check_path ( & trans , & path , & u , iter . pos . snapshot ) ;
2021-04-07 03:11:07 -04:00
if ( ret )
break ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( & trans , & iter ) ;
2023-06-20 13:49:25 -04:00
bch2_trans_exit ( & trans ) ;
2022-03-29 15:48:45 -04:00
darray_exit ( & path ) ;
2021-04-16 17:26:25 -04:00
2023-06-20 13:49:25 -04:00
if ( ret )
bch_err_fn ( c , ret ) ;
2021-10-19 15:08:00 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-04-21 21:08:49 -04:00
struct nlink_table {
size_t nr ;
size_t size ;
2017-03-16 22:18:50 -08:00
2021-04-21 21:08:49 -04:00
struct nlink {
u64 inum ;
u32 snapshot ;
u32 count ;
} * d ;
} ;
2017-03-16 22:18:50 -08:00
2021-12-30 20:14:52 -05:00
static int add_nlink ( struct bch_fs * c , struct nlink_table * t ,
u64 inum , u32 snapshot )
2017-03-16 22:18:50 -08:00
{
2021-04-21 21:08:49 -04:00
if ( t - > nr = = t - > size ) {
size_t new_size = max_t ( size_t , 128UL , t - > size * 2 ) ;
2022-10-19 18:31:33 -04:00
void * d = kvmalloc_array ( new_size , sizeof ( t - > d [ 0 ] ) , GFP_KERNEL ) ;
2021-04-21 21:08:49 -04:00
if ( ! d ) {
2021-12-30 20:14:52 -05:00
bch_err ( c , " fsck: error allocating memory for nlink_table, size %zu " ,
new_size ) ;
2023-03-14 15:35:57 -04:00
return - BCH_ERR_ENOMEM_fsck_add_nlink ;
2021-04-21 21:08:49 -04:00
}
2017-03-16 22:18:50 -08:00
2021-05-17 16:10:06 -04:00
if ( t - > d )
memcpy ( d , t - > d , t - > size * sizeof ( t - > d [ 0 ] ) ) ;
2021-04-21 21:08:49 -04:00
kvfree ( t - > d ) ;
2017-03-16 22:18:50 -08:00
2021-04-21 21:08:49 -04:00
t - > d = d ;
t - > size = new_size ;
2021-02-12 20:53:29 -05:00
}
2021-04-21 21:08:49 -04:00
t - > d [ t - > nr + + ] = ( struct nlink ) {
. inum = inum ,
. snapshot = snapshot ,
} ;
return 0 ;
}
static int nlink_cmp ( const void * _l , const void * _r )
{
const struct nlink * l = _l ;
const struct nlink * r = _r ;
return cmp_int ( l - > inum , r - > inum ) ? : cmp_int ( l - > snapshot , r - > snapshot ) ;
}
2021-04-20 00:15:44 -04:00
static void inc_link ( struct bch_fs * c , struct snapshots_seen * s ,
struct nlink_table * links ,
u64 range_start , u64 range_end , u64 inum , u32 snapshot )
2021-04-21 21:08:49 -04:00
{
struct nlink * link , key = {
. inum = inum , . snapshot = U32_MAX ,
} ;
if ( inum < range_start | | inum > = range_end )
2017-03-16 22:18:50 -08:00
return ;
2021-04-21 21:08:49 -04:00
link = __inline_bsearch ( & key , links - > d , links - > nr ,
sizeof ( links - > d [ 0 ] ) , nlink_cmp ) ;
2021-04-20 00:15:44 -04:00
if ( ! link )
return ;
while ( link > links - > d & & link [ 0 ] . inum = = link [ - 1 ] . inum )
- - link ;
for ( ; link < links - > d + links - > nr & & link - > inum = = inum ; link + + )
if ( ref_visible ( c , s , snapshot , link - > snapshot ) ) {
link - > count + + ;
if ( link - > snapshot > = snapshot )
break ;
}
2021-04-21 21:08:49 -04:00
}
noinline_for_stack
static int check_nlinks_find_hardlinks ( struct bch_fs * c ,
struct nlink_table * t ,
u64 start , u64 * end )
{
struct btree_trans trans ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2021-04-21 21:08:49 -04:00
struct bkey_s_c k ;
struct bch_inode_unpacked u ;
int ret = 0 ;
bch2_trans_init ( & trans , c , BTREE_ITER_MAX , 0 ) ;
for_each_btree_key ( & trans , iter , BTREE_ID_inodes ,
2021-05-14 16:56:26 -04:00
POS ( 0 , start ) ,
BTREE_ITER_INTENT |
2021-04-20 00:15:44 -04:00
BTREE_ITER_PREFETCH |
BTREE_ITER_ALL_SNAPSHOTS , k , ret ) {
2021-10-29 21:14:23 -04:00
if ( ! bkey_is_inode ( k . k ) )
2021-04-21 21:08:49 -04:00
continue ;
2021-10-29 21:14:23 -04:00
/* Should never fail, checked by bch2_inode_invalid: */
BUG_ON ( bch2_inode_unpack ( k , & u ) ) ;
2021-04-21 21:08:49 -04:00
/*
* Backpointer and directory structure checks are sufficient for
* directories , since they can ' t have hardlinks :
*/
2023-07-06 22:47:42 -04:00
if ( S_ISDIR ( u . bi_mode ) )
2021-04-21 21:08:49 -04:00
continue ;
if ( ! u . bi_nlink )
continue ;
2021-12-30 20:14:52 -05:00
ret = add_nlink ( c , t , k . k - > p . offset , k . k - > p . snapshot ) ;
2021-04-21 21:08:49 -04:00
if ( ret ) {
* end = k . k - > p . offset ;
ret = 0 ;
break ;
}
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( & trans , & iter ) ;
2021-04-21 21:08:49 -04:00
bch2_trans_exit ( & trans ) ;
if ( ret )
bch_err ( c , " error in fsck: btree error %i while walking inodes " , ret ) ;
2017-03-16 22:18:50 -08:00
2021-04-21 21:08:49 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
noinline_for_stack
2021-04-21 21:08:49 -04:00
static int check_nlinks_walk_dirents ( struct bch_fs * c , struct nlink_table * links ,
u64 range_start , u64 range_end )
2017-03-16 22:18:50 -08:00
{
2019-03-25 15:10:15 -04:00
struct btree_trans trans ;
2021-04-20 00:15:44 -04:00
struct snapshots_seen s ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
struct bkey_s_c_dirent d ;
int ret ;
2021-04-20 00:15:44 -04:00
snapshots_seen_init ( & s ) ;
2019-05-15 10:54:43 -04:00
bch2_trans_init ( & trans , c , BTREE_ITER_MAX , 0 ) ;
2019-03-25 15:10:15 -04:00
2021-05-14 16:56:26 -04:00
for_each_btree_key ( & trans , iter , BTREE_ID_dirents , POS_MIN ,
BTREE_ITER_INTENT |
2021-04-20 00:15:44 -04:00
BTREE_ITER_PREFETCH |
BTREE_ITER_ALL_SNAPSHOTS , k , ret ) {
2022-07-14 02:47:36 -04:00
ret = snapshots_seen_update ( c , & s , iter . btree_id , k . k - > p ) ;
2021-04-20 00:15:44 -04:00
if ( ret )
break ;
2017-03-16 22:18:50 -08:00
switch ( k . k - > type ) {
2018-11-01 15:10:01 -04:00
case KEY_TYPE_dirent :
2017-03-16 22:18:50 -08:00
d = bkey_s_c_to_dirent ( k ) ;
2021-04-20 00:15:44 -04:00
if ( d . v - > d_type ! = DT_DIR & &
d . v - > d_type ! = DT_SUBVOL )
inc_link ( c , & s , links , range_start , range_end ,
le64_to_cpu ( d . v - > d_inum ) ,
2022-07-14 02:47:36 -04:00
bch2_snapshot_equiv ( c , d . k - > p . snapshot ) ) ;
2017-03-16 22:18:50 -08:00
break ;
}
}
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( & trans , & iter ) ;
2021-03-19 22:34:54 -04:00
2017-03-16 22:18:50 -08:00
if ( ret )
2019-04-17 18:21:19 -04:00
bch_err ( c , " error in fsck: btree error %i while walking dirents " , ret ) ;
2017-03-16 22:18:50 -08:00
2021-04-20 00:15:44 -04:00
bch2_trans_exit ( & trans ) ;
snapshots_seen_exit ( & s ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
2022-07-17 00:44:19 -04:00
static int check_nlinks_update_inode ( struct btree_trans * trans , struct btree_iter * iter ,
struct bkey_s_c k ,
struct nlink_table * links ,
size_t * idx , u64 range_end )
{
struct bch_fs * c = trans - > c ;
struct bch_inode_unpacked u ;
struct nlink * link = & links - > d [ * idx ] ;
int ret = 0 ;
if ( k . k - > p . offset > = range_end )
return 1 ;
if ( ! bkey_is_inode ( k . k ) )
return 0 ;
BUG_ON ( bch2_inode_unpack ( k , & u ) ) ;
2023-07-06 22:47:42 -04:00
if ( S_ISDIR ( u . bi_mode ) )
2022-07-17 00:44:19 -04:00
return 0 ;
if ( ! u . bi_nlink )
return 0 ;
while ( ( cmp_int ( link - > inum , k . k - > p . offset ) ? :
cmp_int ( link - > snapshot , k . k - > p . snapshot ) ) < 0 ) {
BUG_ON ( * idx = = links - > nr ) ;
link = & links - > d [ + + * idx ] ;
}
if ( fsck_err_on ( bch2_inode_nlink_get ( & u ) ! = link - > count , c ,
" inode %llu type %s has wrong i_nlink (%u, should be %u) " ,
u . bi_inum , bch2_d_types [ mode_to_type ( u . bi_mode ) ] ,
bch2_inode_nlink_get ( & u ) , link - > count ) ) {
bch2_inode_nlink_set ( & u , link - > count ) ;
ret = __write_inode ( trans , & u , k . k - > p . snapshot ) ;
}
fsck_err :
return ret ;
}
2017-03-16 22:18:50 -08:00
noinline_for_stack
2021-04-21 21:08:49 -04:00
static int check_nlinks_update_hardlinks ( struct bch_fs * c ,
struct nlink_table * links ,
2017-03-16 22:18:50 -08:00
u64 range_start , u64 range_end )
{
2019-03-13 20:49:16 -04:00
struct btree_trans trans ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
2022-07-17 00:44:19 -04:00
size_t idx = 0 ;
2021-04-08 15:25:29 -04:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
2019-05-15 10:54:43 -04:00
bch2_trans_init ( & trans , c , BTREE_ITER_MAX , 0 ) ;
2019-03-13 20:49:16 -04:00
2022-07-17 00:44:19 -04:00
ret = for_each_btree_key_commit ( & trans , iter , BTREE_ID_inodes ,
POS ( 0 , range_start ) ,
BTREE_ITER_INTENT | BTREE_ITER_PREFETCH | BTREE_ITER_ALL_SNAPSHOTS , k ,
NULL , NULL , BTREE_INSERT_LAZY_RW | BTREE_INSERT_NOFAIL ,
check_nlinks_update_inode ( & trans , & iter , k , links , & idx , range_end ) ) ;
2021-04-21 21:08:49 -04:00
2019-03-13 20:49:16 -04:00
bch2_trans_exit ( & trans ) ;
2022-07-17 00:44:19 -04:00
if ( ret < 0 ) {
2021-04-08 15:25:29 -04:00
bch_err ( c , " error in fsck: btree error %i while walking inodes " , ret ) ;
2022-07-17 00:44:19 -04:00
return ret ;
}
2017-03-16 22:18:50 -08:00
2022-07-17 00:44:19 -04:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2023-07-07 02:42:28 -04:00
int bch2_check_nlinks ( struct bch_fs * c )
2017-03-16 22:18:50 -08:00
{
2021-04-21 21:08:49 -04:00
struct nlink_table links = { 0 } ;
2017-03-16 22:18:50 -08:00
u64 this_iter_range_start , next_iter_range_start = 0 ;
int ret = 0 ;
do {
this_iter_range_start = next_iter_range_start ;
next_iter_range_start = U64_MAX ;
2021-04-21 21:08:49 -04:00
ret = check_nlinks_find_hardlinks ( c , & links ,
this_iter_range_start ,
& next_iter_range_start ) ;
ret = check_nlinks_walk_dirents ( c , & links ,
2017-03-16 22:18:50 -08:00
this_iter_range_start ,
2021-04-21 21:08:49 -04:00
next_iter_range_start ) ;
2017-03-16 22:18:50 -08:00
if ( ret )
break ;
2021-04-21 21:08:49 -04:00
ret = check_nlinks_update_hardlinks ( c , & links ,
2017-03-16 22:18:50 -08:00
this_iter_range_start ,
next_iter_range_start ) ;
if ( ret )
break ;
2021-04-21 21:08:49 -04:00
links . nr = 0 ;
2017-03-16 22:18:50 -08:00
} while ( next_iter_range_start ! = U64_MAX ) ;
2021-04-21 21:08:49 -04:00
kvfree ( links . d ) ;
2017-03-16 22:18:50 -08:00
2023-06-20 13:49:25 -04:00
if ( ret )
bch_err_fn ( c , ret ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
2022-07-17 00:44:19 -04:00
static int fix_reflink_p_key ( struct btree_trans * trans , struct btree_iter * iter ,
struct bkey_s_c k )
2021-10-19 17:30:16 -04:00
{
struct bkey_s_c_reflink_p p ;
struct bkey_i_reflink_p * u ;
int ret ;
if ( k . k - > type ! = KEY_TYPE_reflink_p )
return 0 ;
p = bkey_s_c_to_reflink_p ( k ) ;
2021-10-14 09:54:47 -04:00
if ( ! p . v - > front_pad & & ! p . v - > back_pad )
2021-10-19 17:30:16 -04:00
return 0 ;
u = bch2_trans_kmalloc ( trans , sizeof ( * u ) ) ;
ret = PTR_ERR_OR_ZERO ( u ) ;
if ( ret )
return ret ;
bkey_reassemble ( & u - > k_i , k ) ;
2021-10-14 09:54:47 -04:00
u - > v . front_pad = 0 ;
u - > v . back_pad = 0 ;
2021-10-19 17:30:16 -04:00
2021-10-25 19:30:24 -04:00
return bch2_trans_update ( trans , iter , & u - > k_i , BTREE_TRIGGER_NORUN ) ;
2021-10-19 17:30:16 -04:00
}
2023-07-07 02:42:28 -04:00
int bch2_fix_reflink_p ( struct bch_fs * c )
2021-10-19 17:30:16 -04:00
{
struct btree_iter iter ;
struct bkey_s_c k ;
int ret ;
if ( c - > sb . version > = bcachefs_metadata_version_reflink_p_fix )
return 0 ;
2023-06-20 13:49:25 -04:00
ret = bch2_trans_run ( c ,
for_each_btree_key_commit ( & trans , iter ,
BTREE_ID_extents , POS_MIN ,
BTREE_ITER_INTENT | BTREE_ITER_PREFETCH |
BTREE_ITER_ALL_SNAPSHOTS , k ,
NULL , NULL , BTREE_INSERT_NOFAIL | BTREE_INSERT_LAZY_RW ,
fix_reflink_p_key ( & trans , & iter , k ) ) ) ;
2021-10-19 17:30:16 -04:00
2023-06-20 13:49:25 -04:00
if ( ret )
bch_err_fn ( c , ret ) ;
2021-10-19 17:30:16 -04:00
return ret ;
}