2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
2022-03-17 20:51:27 -04:00
# include "backpointers.h"
2017-03-16 22:18:50 -08:00
# include "bkey_methods.h"
2023-10-19 22:49:08 -04:00
# include "btree_cache.h"
2017-03-16 22:18:50 -08:00
# include "btree_types.h"
2018-10-06 00:46:55 -04:00
# include "alloc_background.h"
2017-03-16 22:18:50 -08:00
# include "dirent.h"
2023-12-27 18:31:46 -05:00
# include "disk_accounting.h"
2018-11-01 15:13:19 -04:00
# include "ec.h"
2017-03-16 22:18:50 -08:00
# include "error.h"
# include "extents.h"
# include "inode.h"
2023-09-10 16:42:30 -04:00
# include "io_misc.h"
2021-12-05 00:31:54 -05:00
# include "lru.h"
2017-03-16 22:18:50 -08:00
# include "quota.h"
2019-08-16 09:59:56 -04:00
# include "reflink.h"
2023-08-16 16:54:33 -04:00
# include "snapshot.h"
2021-03-16 00:42:25 -04:00
# include "subvolume.h"
2017-03-16 22:18:50 -08:00
# include "xattr.h"
2019-07-16 12:23:04 -04:00
const char * const bch2_bkey_types [ ] = {
2018-11-01 15:10:01 -04:00
# define x(name, nr) #name,
BCH_BKEY_TYPES ( )
# undef x
NULL
2017-03-16 22:18:50 -08:00
} ;
2023-10-24 20:44:36 -04:00
static int deleted_key_invalid ( struct bch_fs * c , struct bkey_s_c k ,
2024-05-08 18:40:42 -04:00
enum bch_validate_flags flags , struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
2022-04-03 17:50:01 -04:00
return 0 ;
2018-11-01 15:10:01 -04:00
}
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_deleted ((struct bkey_ops) { \
2018-12-09 13:20:52 -08:00
. key_invalid = deleted_key_invalid , \
2022-10-22 15:59:53 -04:00
} )
2018-11-01 15:10:01 -04:00
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_whiteout ((struct bkey_ops) { \
2018-12-09 13:20:52 -08:00
. key_invalid = deleted_key_invalid , \
2022-10-22 15:59:53 -04:00
} )
2017-03-16 22:18:50 -08:00
2023-10-24 20:44:36 -04:00
static int empty_val_key_invalid ( struct bch_fs * c , struct bkey_s_c k ,
2024-05-08 18:40:42 -04:00
enum bch_validate_flags flags , struct printbuf * err )
2018-11-01 15:10:01 -04:00
{
2023-10-24 20:44:36 -04:00
int ret = 0 ;
bkey_fsck_err_on ( bkey_val_bytes ( k . k ) , c , err ,
bkey_val_size_nonzero ,
" incorrect value size (%zu != 0) " ,
bkey_val_bytes ( k . k ) ) ;
fsck_err :
return ret ;
2018-11-01 15:10:01 -04:00
}
2017-03-16 22:18:50 -08:00
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_error ((struct bkey_ops) { \
2018-12-09 13:20:52 -08:00
. key_invalid = empty_val_key_invalid , \
2022-10-22 15:59:53 -04:00
} )
2017-03-16 22:18:50 -08:00
2023-10-24 20:44:36 -04:00
static int key_type_cookie_invalid ( struct bch_fs * c , struct bkey_s_c k ,
2024-05-08 18:40:42 -04:00
enum bch_validate_flags flags , struct printbuf * err )
2018-11-01 15:10:01 -04:00
{
2022-04-03 17:50:01 -04:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2024-01-15 14:15:03 -05:00
static void key_type_cookie_to_text ( struct printbuf * out , struct bch_fs * c ,
struct bkey_s_c k )
{
struct bkey_s_c_cookie ck = bkey_s_c_to_cookie ( k ) ;
prt_printf ( out , " %llu " , le64_to_cpu ( ck . v - > cookie ) ) ;
}
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_cookie ((struct bkey_ops) { \
2023-04-29 13:24:18 -04:00
. key_invalid = key_type_cookie_invalid , \
2024-01-15 14:15:03 -05:00
. val_to_text = key_type_cookie_to_text , \
2023-04-29 13:24:18 -04:00
. min_val_size = 8 , \
2022-10-22 15:59:53 -04:00
} )
2018-11-01 15:10:01 -04:00
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\
2018-12-09 13:20:52 -08:00
. key_invalid = empty_val_key_invalid , \
2022-10-22 15:59:53 -04:00
} )
2018-11-01 15:10:01 -04:00
2023-10-24 20:44:36 -04:00
static int key_type_inline_data_invalid ( struct bch_fs * c , struct bkey_s_c k ,
2024-05-08 18:40:42 -04:00
enum bch_validate_flags flags , struct printbuf * err )
2019-11-09 16:43:16 -05:00
{
2022-04-03 17:50:01 -04:00
return 0 ;
2019-11-09 16:43:16 -05:00
}
static void key_type_inline_data_to_text ( struct printbuf * out , struct bch_fs * c ,
struct bkey_s_c k )
{
2020-10-24 19:51:34 -04:00
struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data ( k ) ;
unsigned datalen = bkey_inline_data_bytes ( k . k ) ;
2023-02-03 21:01:40 -05:00
prt_printf ( out , " datalen %u: %*phN " ,
2020-10-24 19:51:34 -04:00
datalen , min ( datalen , 32U ) , d . v - > data ) ;
2019-11-09 16:43:16 -05:00
}
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
2019-11-26 17:09:32 -05:00
. key_invalid = key_type_inline_data_invalid , \
. val_to_text = key_type_inline_data_to_text , \
2022-10-22 15:59:53 -04:00
} )
2019-11-09 16:43:16 -05:00
2022-01-05 22:13:13 -05:00
static bool key_type_set_merge ( struct bch_fs * c , struct bkey_s l , struct bkey_s_c r )
{
bch2_key_resize ( l . k , l . k - > size + r . k - > size ) ;
return true ;
}
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_set ((struct bkey_ops) { \
2023-10-24 20:44:36 -04:00
. key_invalid = empty_val_key_invalid , \
2022-01-05 22:13:13 -05:00
. key_merge = key_type_set_merge , \
2022-10-22 15:59:53 -04:00
} )
2022-01-05 22:13:13 -05:00
2021-04-28 23:49:30 -04:00
const struct bkey_ops bch2_bkey_ops [ ] = {
2018-11-01 15:10:01 -04:00
# define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
BCH_BKEY_TYPES ( )
# undef x
} ;
2023-07-06 19:23:27 -04:00
const struct bkey_ops bch2_bkey_null_ops = {
} ;
2022-04-03 21:50:25 -04:00
int bch2_bkey_val_invalid ( struct bch_fs * c , struct bkey_s_c k ,
2024-05-08 18:40:42 -04:00
enum bch_validate_flags flags ,
2023-07-06 21:16:10 -04:00
struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
2024-05-09 12:37:24 -06:00
if ( test_bit ( BCH_FS_no_invalid_checks , & c - > flags ) )
return 0 ;
2023-07-06 19:23:27 -04:00
const struct bkey_ops * ops = bch2_bkey_type_ops ( k . k - > type ) ;
2023-10-24 20:44:36 -04:00
int ret = 0 ;
2023-04-29 13:24:18 -04:00
2023-10-24 20:44:36 -04:00
bkey_fsck_err_on ( bkey_val_bytes ( k . k ) < ops - > min_val_size , c , err ,
bkey_val_size_too_small ,
" bad val size (%zu < %u) " ,
bkey_val_bytes ( k . k ) , ops - > min_val_size ) ;
2023-04-29 13:24:18 -04:00
2023-07-06 19:23:27 -04:00
if ( ! ops - > key_invalid )
return 0 ;
2023-10-24 20:44:36 -04:00
ret = ops - > key_invalid ( c , k , flags , err ) ;
fsck_err :
return ret ;
2018-11-01 15:10:01 -04:00
}
2017-03-16 22:18:50 -08:00
2023-07-21 05:38:45 -04:00
static u64 bch2_key_types_allowed [ ] = {
2021-04-17 23:18:17 -04:00
[ BKEY_TYPE_btree ] =
2023-07-21 05:38:45 -04:00
BIT_ULL ( KEY_TYPE_deleted ) |
BIT_ULL ( KEY_TYPE_btree_ptr ) |
BIT_ULL ( KEY_TYPE_btree_ptr_v2 ) ,
2023-10-20 00:01:53 -04:00
# define x(name, nr, flags, keys) [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys,
BCH_BTREE_IDS ( )
# undef x
2021-04-17 23:18:17 -04:00
} ;
2023-10-20 00:01:53 -04:00
const char * bch2_btree_node_type_str ( enum btree_node_type type )
{
return type = = BKEY_TYPE_btree ? " internal btree node " : bch2_btree_id_str ( type - 1 ) ;
}
2022-04-03 17:50:01 -04:00
int __bch2_bkey_invalid ( struct bch_fs * c , struct bkey_s_c k ,
enum btree_node_type type ,
2024-05-08 18:40:42 -04:00
enum bch_validate_flags flags ,
2023-07-06 21:16:10 -04:00
struct printbuf * err )
2018-11-01 15:10:01 -04:00
{
2024-05-09 12:37:24 -06:00
if ( test_bit ( BCH_FS_no_invalid_checks , & c - > flags ) )
return 0 ;
2023-10-24 20:44:36 -04:00
int ret = 0 ;
bkey_fsck_err_on ( k . k - > u64s < BKEY_U64s , c , err ,
bkey_u64s_too_small ,
" u64s too small (%u < %zu) " , k . k - > u64s , BKEY_U64s ) ;
2021-04-17 23:18:17 -04:00
2023-10-20 00:01:53 -04:00
if ( type > = BKEY_TYPE_NR )
return 0 ;
2024-05-05 22:33:05 -04:00
bkey_fsck_err_on ( k . k - > type < KEY_TYPE_MAX & &
2024-05-08 18:40:42 -04:00
( type = = BKEY_TYPE_btree | | ( flags & BCH_VALIDATE_commit ) ) & &
2023-10-24 20:44:36 -04:00
! ( bch2_key_types_allowed [ type ] & BIT_ULL ( k . k - > type ) ) , c , err ,
bkey_invalid_type_for_btree ,
" invalid key type for btree %s (%s) " ,
2024-04-13 23:59:06 -04:00
bch2_btree_node_type_str ( type ) ,
k . k - > type < KEY_TYPE_MAX
? bch2_bkey_types [ k . k - > type ]
: " (unknown) " ) ;
2019-06-09 20:32:54 -04:00
2021-02-02 17:09:10 -05:00
if ( btree_node_type_is_extents ( type ) & & ! bkey_whiteout ( k . k ) ) {
2023-10-24 20:44:36 -04:00
bkey_fsck_err_on ( k . k - > size = = 0 , c , err ,
bkey_extent_size_zero ,
" size == 0 " ) ;
bkey_fsck_err_on ( k . k - > size > k . k - > p . offset , c , err ,
bkey_extent_size_greater_than_offset ,
" size greater than offset (%u > %llu) " ,
k . k - > size , k . k - > p . offset ) ;
2018-11-01 15:10:01 -04:00
} else {
2023-10-24 20:44:36 -04:00
bkey_fsck_err_on ( k . k - > size , c , err ,
bkey_size_nonzero ,
" size != 0 " ) ;
2017-03-16 22:18:50 -08:00
}
2023-07-06 21:16:10 -04:00
if ( type ! = BKEY_TYPE_btree ) {
2023-10-20 00:01:53 -04:00
enum btree_id btree = type - 1 ;
2023-11-03 18:30:08 -04:00
if ( btree_type_has_snapshots ( btree ) ) {
bkey_fsck_err_on ( ! k . k - > p . snapshot , c , err ,
bkey_snapshot_zero ,
" snapshot == 0 " ) ;
} else if ( ! btree_type_has_snapshot_field ( btree ) ) {
bkey_fsck_err_on ( k . k - > p . snapshot , c , err ,
bkey_snapshot_nonzero ,
" nonzero snapshot " ) ;
} else {
/*
* btree uses snapshot field but it ' s not required to be
* nonzero
*/
}
2023-10-24 20:44:36 -04:00
bkey_fsck_err_on ( bkey_eq ( k . k - > p , POS_MAX ) , c , err ,
bkey_at_pos_max ,
" key at POS_MAX " ) ;
}
fsck_err :
return ret ;
2017-03-16 22:18:50 -08:00
}
2022-04-03 17:50:01 -04:00
int bch2_bkey_invalid ( struct bch_fs * c , struct bkey_s_c k ,
enum btree_node_type type ,
2024-05-08 18:40:42 -04:00
enum bch_validate_flags flags ,
2023-07-06 21:16:10 -04:00
struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
2022-12-20 19:58:16 -05:00
return __bch2_bkey_invalid ( c , k , type , flags , err ) ? :
bch2_bkey_val_invalid ( c , k , flags , err ) ;
2017-03-16 22:18:50 -08:00
}
2023-10-24 20:44:36 -04:00
int bch2_bkey_in_btree_node ( struct bch_fs * c , struct btree * b ,
struct bkey_s_c k , struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
2023-10-24 20:44:36 -04:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
2023-10-24 20:44:36 -04:00
bkey_fsck_err_on ( bpos_lt ( k . k - > p , b - > data - > min_key ) , c , err ,
bkey_before_start_of_btree_node ,
" key before start of btree node " ) ;
2017-03-16 22:18:50 -08:00
2023-10-24 20:44:36 -04:00
bkey_fsck_err_on ( bpos_gt ( k . k - > p , b - > data - > max_key ) , c , err ,
bkey_after_end_of_btree_node ,
" key past end of btree node " ) ;
fsck_err :
return ret ;
2017-03-16 22:18:50 -08:00
}
2018-11-09 01:24:07 -05:00
void bch2_bpos_to_text ( struct printbuf * out , struct bpos pos )
2018-07-23 09:13:07 -04:00
{
2022-11-24 03:12:22 -05:00
if ( bpos_eq ( pos , POS_MIN ) )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " POS_MIN " ) ;
2022-11-24 03:12:22 -05:00
else if ( bpos_eq ( pos , POS_MAX ) )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " POS_MAX " ) ;
2022-11-24 03:12:22 -05:00
else if ( bpos_eq ( pos , SPOS_MAX ) )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " SPOS_MAX " ) ;
2021-03-22 15:50:02 -04:00
else {
if ( pos . inode = = U64_MAX )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " U64_MAX " ) ;
2021-03-22 15:50:02 -04:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %llu " , pos . inode ) ;
prt_printf ( out , " : " ) ;
2021-03-22 15:50:02 -04:00
if ( pos . offset = = U64_MAX )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " U64_MAX " ) ;
2021-03-22 15:50:02 -04:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %llu " , pos . offset ) ;
prt_printf ( out , " : " ) ;
2021-03-22 15:50:02 -04:00
if ( pos . snapshot = = U32_MAX )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " U32_MAX " ) ;
2021-03-22 15:50:02 -04:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %u " , pos . snapshot ) ;
2021-03-22 15:50:02 -04:00
}
2018-07-23 09:13:07 -04:00
}
2018-11-09 01:24:07 -05:00
void bch2_bkey_to_text ( struct printbuf * out , const struct bkey * k )
2017-03-16 22:18:50 -08:00
{
2020-05-25 21:25:31 -04:00
if ( k ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " u64s %u type " , k - > u64s ) ;
2020-11-13 15:03:34 -05:00
if ( k - > type < KEY_TYPE_MAX )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %s " , bch2_bkey_types [ k - > type ] ) ;
2020-11-13 15:03:34 -05:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %u " , k - > type ) ;
2017-03-16 22:18:50 -08:00
2020-05-25 21:25:31 -04:00
bch2_bpos_to_text ( out , k - > p ) ;
2017-03-16 22:18:50 -08:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " len %u ver %llu " , k - > size , k - > version . lo ) ;
2020-05-25 21:25:31 -04:00
} else {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (null) " ) ;
2020-05-25 21:25:31 -04:00
}
2017-03-16 22:18:50 -08:00
}
2018-11-09 01:24:07 -05:00
void bch2_val_to_text ( struct printbuf * out , struct bch_fs * c ,
2018-11-01 15:10:01 -04:00
struct bkey_s_c k )
{
2023-07-06 19:23:27 -04:00
const struct bkey_ops * ops = bch2_bkey_type_ops ( k . k - > type ) ;
2018-11-01 15:10:01 -04:00
2023-07-06 19:23:27 -04:00
if ( likely ( ops - > val_to_text ) )
ops - > val_to_text ( out , c , k ) ;
2017-03-16 22:18:50 -08:00
}
2018-11-09 01:24:07 -05:00
void bch2_bkey_val_to_text ( struct printbuf * out , struct bch_fs * c ,
2018-11-01 15:10:01 -04:00
struct bkey_s_c k )
2017-03-16 22:18:50 -08:00
{
2018-11-09 01:24:07 -05:00
bch2_bkey_to_text ( out , k . k ) ;
2020-05-25 21:25:31 -04:00
2021-07-21 13:55:51 -04:00
if ( bkey_val_bytes ( k . k ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " : " ) ;
2020-05-25 21:25:31 -04:00
bch2_val_to_text ( out , c , k ) ;
}
2017-03-16 22:18:50 -08:00
}
2020-02-06 20:15:15 -05:00
void bch2_bkey_swab_val ( struct bkey_s k )
2017-03-16 22:18:50 -08:00
{
2023-07-06 19:23:27 -04:00
const struct bkey_ops * ops = bch2_bkey_type_ops ( k . k - > type ) ;
2017-03-16 22:18:50 -08:00
if ( ops - > swab )
2020-02-06 20:15:15 -05:00
ops - > swab ( k ) ;
2017-03-16 22:18:50 -08:00
}
2018-11-01 15:10:01 -04:00
bool bch2_bkey_normalize ( struct bch_fs * c , struct bkey_s k )
{
2023-07-06 19:23:27 -04:00
const struct bkey_ops * ops = bch2_bkey_type_ops ( k . k - > type ) ;
2018-11-01 15:10:01 -04:00
return ops - > key_normalize
? ops - > key_normalize ( c , k )
: false ;
}
2021-04-28 23:49:30 -04:00
bool bch2_bkey_merge ( struct bch_fs * c , struct bkey_s l , struct bkey_s_c r )
2018-11-01 15:10:01 -04:00
{
2023-07-06 19:23:27 -04:00
const struct bkey_ops * ops = bch2_bkey_type_ops ( l . k - > type ) ;
2018-11-01 15:10:01 -04:00
2023-07-06 19:23:27 -04:00
return ops - > key_merge & &
bch2_bkey_maybe_mergable ( l . k , r . k ) & &
2021-04-28 23:52:19 -04:00
( u64 ) l . k - > size + r . k - > size < = KEY_SIZE_MAX & &
! bch2_key_merging_disabled & &
ops - > key_merge ( c , l , r ) ;
2018-11-01 15:10:01 -04:00
}
static const struct old_bkey_type {
u8 btree_node_type ;
u8 old ;
u8 new ;
} bkey_renumber_table [ ] = {
2021-02-20 19:27:37 -05:00
{ BKEY_TYPE_btree , 128 , KEY_TYPE_btree_ptr } ,
{ BKEY_TYPE_extents , 128 , KEY_TYPE_extent } ,
{ BKEY_TYPE_extents , 129 , KEY_TYPE_extent } ,
{ BKEY_TYPE_extents , 130 , KEY_TYPE_reservation } ,
{ BKEY_TYPE_inodes , 128 , KEY_TYPE_inode } ,
{ BKEY_TYPE_inodes , 130 , KEY_TYPE_inode_generation } ,
{ BKEY_TYPE_dirents , 128 , KEY_TYPE_dirent } ,
{ BKEY_TYPE_dirents , 129 , KEY_TYPE_hash_whiteout } ,
{ BKEY_TYPE_xattrs , 128 , KEY_TYPE_xattr } ,
{ BKEY_TYPE_xattrs , 129 , KEY_TYPE_hash_whiteout } ,
{ BKEY_TYPE_alloc , 128 , KEY_TYPE_alloc } ,
{ BKEY_TYPE_quotas , 128 , KEY_TYPE_quota } ,
2018-11-01 15:10:01 -04:00
} ;
void bch2_bkey_renumber ( enum btree_node_type btree_node_type ,
struct bkey_packed * k ,
int write )
{
const struct old_bkey_type * i ;
for ( i = bkey_renumber_table ;
i < bkey_renumber_table + ARRAY_SIZE ( bkey_renumber_table ) ;
i + + )
if ( btree_node_type = = i - > btree_node_type & &
k - > type = = ( write ? i - > new : i - > old ) ) {
k - > type = write ? i - > old : i - > new ;
break ;
}
}
2020-01-07 13:29:32 -05:00
void __bch2_bkey_compat ( unsigned level , enum btree_id btree_id ,
unsigned version , unsigned big_endian ,
int write ,
struct bkey_format * f ,
struct bkey_packed * k )
{
const struct bkey_ops * ops ;
struct bkey uk ;
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
unsigned nr_compat = 5 ;
2020-04-24 14:08:18 -04:00
int i ;
/*
* Do these operations in reverse order in the write path :
*/
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
for ( i = 0 ; i < nr_compat ; i + + )
switch ( ! write ? i : nr_compat - 1 - i ) {
2020-04-24 14:08:18 -04:00
case 0 :
2024-06-12 19:51:15 -04:00
if ( big_endian ! = CPU_BIG_ENDIAN ) {
2020-04-24 14:08:18 -04:00
bch2_bkey_swab_key ( f , k ) ;
2024-06-12 19:51:15 -04:00
} else if ( IS_ENABLED ( CONFIG_BCACHEFS_DEBUG ) ) {
bch2_bkey_swab_key ( f , k ) ;
bch2_bkey_swab_key ( f , k ) ;
}
2020-04-24 14:08:18 -04:00
break ;
case 1 :
if ( version < bcachefs_metadata_version_bkey_renumber )
bch2_bkey_renumber ( __btree_node_type ( level , btree_id ) , k , write ) ;
break ;
case 2 :
if ( version < bcachefs_metadata_version_inode_btree_change & &
2021-02-20 19:27:37 -05:00
btree_id = = BTREE_ID_inodes ) {
2020-04-24 14:08:18 -04:00
if ( ! bkey_packed ( k ) ) {
struct bkey_i * u = packed_to_bkey ( k ) ;
2022-10-22 15:59:53 -04:00
2020-04-24 14:08:18 -04:00
swap ( u - > k . p . inode , u - > k . p . offset ) ;
} else if ( f - > bits_per_field [ BKEY_FIELD_INODE ] & &
f - > bits_per_field [ BKEY_FIELD_OFFSET ] ) {
struct bkey_format tmp = * f , * in = f , * out = & tmp ;
swap ( tmp . bits_per_field [ BKEY_FIELD_INODE ] ,
tmp . bits_per_field [ BKEY_FIELD_OFFSET ] ) ;
swap ( tmp . field_offset [ BKEY_FIELD_INODE ] ,
tmp . field_offset [ BKEY_FIELD_OFFSET ] ) ;
if ( ! write )
swap ( in , out ) ;
uk = __bch2_bkey_unpack_key ( in , k ) ;
swap ( uk . p . inode , uk . p . offset ) ;
BUG_ON ( ! bch2_bkey_pack_key ( k , & uk , out ) ) ;
}
}
break ;
case 3 :
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
if ( version < bcachefs_metadata_version_snapshot & &
( level | | btree_type_has_snapshots ( btree_id ) ) ) {
struct bkey_i * u = packed_to_bkey ( k ) ;
if ( u ) {
u - > k . p . snapshot = write
? 0 : U32_MAX ;
} else {
2023-07-06 22:47:42 -04:00
u64 min_packed = le64_to_cpu ( f - > field_offset [ BKEY_FIELD_SNAPSHOT ] ) ;
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
u64 max_packed = min_packed +
~ ( ~ 0ULL < < f - > bits_per_field [ BKEY_FIELD_SNAPSHOT ] ) ;
uk = __bch2_bkey_unpack_key ( f , k ) ;
uk . p . snapshot = write
? min_packed : min_t ( u64 , U32_MAX , max_packed ) ;
BUG_ON ( ! bch2_bkey_pack_key ( k , & uk , f ) ) ;
}
}
break ;
2023-09-12 18:41:22 -04:00
case 4 : {
struct bkey_s u ;
2020-01-07 13:29:32 -05:00
if ( ! bkey_packed ( k ) ) {
2020-04-24 14:08:18 -04:00
u = bkey_i_to_s ( packed_to_bkey ( k ) ) ;
} else {
uk = __bch2_bkey_unpack_key ( f , k ) ;
u . k = & uk ;
u . v = bkeyp_val ( f , k ) ;
2020-01-07 13:29:32 -05:00
}
2020-04-24 14:08:18 -04:00
if ( big_endian ! = CPU_BIG_ENDIAN )
bch2_bkey_swab_val ( u ) ;
2020-01-07 13:29:32 -05:00
2023-07-06 19:23:27 -04:00
ops = bch2_bkey_type_ops ( k - > type ) ;
2020-01-07 13:29:32 -05:00
2020-04-24 14:08:18 -04:00
if ( ops - > compat )
ops - > compat ( btree_id , version , big_endian , write , u ) ;
break ;
2023-09-12 18:41:22 -04:00
}
2020-04-24 14:08:18 -04:00
default :
BUG ( ) ;
}
2020-01-07 13:29:32 -05:00
}