2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
# include "bkey_methods.h"
# include "btree_types.h"
2018-10-06 00:46:55 -04:00
# include "alloc_background.h"
2017-03-16 22:18:50 -08:00
# include "dirent.h"
2018-11-01 15:13:19 -04:00
# include "ec.h"
2017-03-16 22:18:50 -08:00
# include "error.h"
# include "extents.h"
# include "inode.h"
2021-12-05 00:31:54 -05:00
# include "lru.h"
2017-03-16 22:18:50 -08:00
# include "quota.h"
2019-08-16 09:59:56 -04:00
# include "reflink.h"
2021-03-16 00:42:25 -04:00
# include "subvolume.h"
2017-03-16 22:18:50 -08:00
# include "xattr.h"
2019-07-16 12:23:04 -04:00
const char * const bch2_bkey_types [ ] = {
2018-11-01 15:10:01 -04:00
# define x(name, nr) #name,
BCH_BKEY_TYPES ( )
# undef x
NULL
2017-03-16 22:18:50 -08:00
} ;
2022-04-03 17:50:01 -04:00
static int deleted_key_invalid ( const struct bch_fs * c , struct bkey_s_c k ,
2022-04-03 21:50:25 -04:00
int rw , struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
2022-04-03 17:50:01 -04:00
return 0 ;
2018-11-01 15:10:01 -04:00
}
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_deleted ((struct bkey_ops) { \
2018-12-09 13:20:52 -08:00
. key_invalid = deleted_key_invalid , \
2022-10-22 15:59:53 -04:00
} )
2018-11-01 15:10:01 -04:00
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_whiteout ((struct bkey_ops) { \
2018-12-09 13:20:52 -08:00
. key_invalid = deleted_key_invalid , \
2022-10-22 15:59:53 -04:00
} )
2017-03-16 22:18:50 -08:00
2022-04-03 17:50:01 -04:00
static int empty_val_key_invalid ( const struct bch_fs * c , struct bkey_s_c k ,
2022-04-03 21:50:25 -04:00
int rw , struct printbuf * err )
2018-11-01 15:10:01 -04:00
{
2022-04-03 17:50:01 -04:00
if ( bkey_val_bytes ( k . k ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " incorrect value size (%zu != 0) " ,
2022-04-03 17:50:01 -04:00
bkey_val_bytes ( k . k ) ) ;
return - EINVAL ;
}
2017-03-16 22:18:50 -08:00
2022-04-03 17:50:01 -04:00
return 0 ;
2018-11-01 15:10:01 -04:00
}
2017-03-16 22:18:50 -08:00
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_error ((struct bkey_ops) { \
2018-12-09 13:20:52 -08:00
. key_invalid = empty_val_key_invalid , \
2022-10-22 15:59:53 -04:00
} )
2017-03-16 22:18:50 -08:00
2022-04-03 17:50:01 -04:00
static int key_type_cookie_invalid ( const struct bch_fs * c , struct bkey_s_c k ,
2022-04-03 21:50:25 -04:00
int rw , struct printbuf * err )
2018-11-01 15:10:01 -04:00
{
2022-04-03 17:50:01 -04:00
if ( bkey_val_bytes ( k . k ) ! = sizeof ( struct bch_cookie ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " incorrect value size (%zu != %zu) " ,
2022-04-03 17:50:01 -04:00
bkey_val_bytes ( k . k ) , sizeof ( struct bch_cookie ) ) ;
return - EINVAL ;
}
2017-03-16 22:18:50 -08:00
2022-04-03 17:50:01 -04:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_cookie ((struct bkey_ops) { \
2018-12-09 13:20:52 -08:00
. key_invalid = key_type_cookie_invalid , \
2022-10-22 15:59:53 -04:00
} )
2018-11-01 15:10:01 -04:00
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\
2018-12-09 13:20:52 -08:00
. key_invalid = empty_val_key_invalid , \
2022-10-22 15:59:53 -04:00
} )
2018-11-01 15:10:01 -04:00
2022-04-03 17:50:01 -04:00
static int key_type_inline_data_invalid ( const struct bch_fs * c , struct bkey_s_c k ,
2022-04-03 21:50:25 -04:00
int rw , struct printbuf * err )
2019-11-09 16:43:16 -05:00
{
2022-04-03 17:50:01 -04:00
return 0 ;
2019-11-09 16:43:16 -05:00
}
static void key_type_inline_data_to_text ( struct printbuf * out , struct bch_fs * c ,
struct bkey_s_c k )
{
2020-10-24 19:51:34 -04:00
struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data ( k ) ;
unsigned datalen = bkey_inline_data_bytes ( k . k ) ;
2023-02-03 21:01:40 -05:00
prt_printf ( out , " datalen %u: %*phN " ,
2020-10-24 19:51:34 -04:00
datalen , min ( datalen , 32U ) , d . v - > data ) ;
2019-11-09 16:43:16 -05:00
}
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
2019-11-26 17:09:32 -05:00
. key_invalid = key_type_inline_data_invalid , \
. val_to_text = key_type_inline_data_to_text , \
2022-10-22 15:59:53 -04:00
} )
2019-11-09 16:43:16 -05:00
2022-04-03 17:50:01 -04:00
static int key_type_set_invalid ( const struct bch_fs * c , struct bkey_s_c k ,
2022-04-03 21:50:25 -04:00
int rw , struct printbuf * err )
2022-01-05 22:13:13 -05:00
{
2022-04-03 17:50:01 -04:00
if ( bkey_val_bytes ( k . k ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " incorrect value size (%zu != %zu) " ,
2022-04-03 17:50:01 -04:00
bkey_val_bytes ( k . k ) , sizeof ( struct bch_cookie ) ) ;
return - EINVAL ;
}
return 0 ;
2022-01-05 22:13:13 -05:00
}
static bool key_type_set_merge ( struct bch_fs * c , struct bkey_s l , struct bkey_s_c r )
{
bch2_key_resize ( l . k , l . k - > size + r . k - > size ) ;
return true ;
}
2022-10-22 15:59:53 -04:00
# define bch2_bkey_ops_set ((struct bkey_ops) { \
2022-01-05 22:13:13 -05:00
. key_invalid = key_type_set_invalid , \
. key_merge = key_type_set_merge , \
2022-10-22 15:59:53 -04:00
} )
2022-01-05 22:13:13 -05:00
2021-04-28 23:49:30 -04:00
const struct bkey_ops bch2_bkey_ops [ ] = {
2018-11-01 15:10:01 -04:00
# define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
BCH_BKEY_TYPES ( )
# undef x
} ;
2022-04-03 21:50:25 -04:00
int bch2_bkey_val_invalid ( struct bch_fs * c , struct bkey_s_c k ,
int rw , struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
2022-04-03 17:50:01 -04:00
if ( k . k - > type > = KEY_TYPE_MAX ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " invalid type (%u >= %u) " , k . k - > type , KEY_TYPE_MAX ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
2018-11-01 15:10:01 -04:00
2022-04-03 21:50:25 -04:00
return bch2_bkey_ops [ k . k - > type ] . key_invalid ( c , k , rw , err ) ;
2018-11-01 15:10:01 -04:00
}
2017-03-16 22:18:50 -08:00
2021-04-17 23:18:17 -04:00
static unsigned bch2_key_types_allowed [ ] = {
[ BKEY_TYPE_extents ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
( 1U < < KEY_TYPE_whiteout ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_error ) |
( 1U < < KEY_TYPE_cookie ) |
( 1U < < KEY_TYPE_extent ) |
( 1U < < KEY_TYPE_reservation ) |
( 1U < < KEY_TYPE_reflink_p ) |
( 1U < < KEY_TYPE_inline_data ) ,
[ BKEY_TYPE_inodes ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
( 1U < < KEY_TYPE_whiteout ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_inode ) |
2021-10-29 21:14:23 -04:00
( 1U < < KEY_TYPE_inode_v2 ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_inode_generation ) ,
[ BKEY_TYPE_dirents ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
( 1U < < KEY_TYPE_whiteout ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_hash_whiteout ) |
( 1U < < KEY_TYPE_dirent ) ,
[ BKEY_TYPE_xattrs ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
( 1U < < KEY_TYPE_whiteout ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_cookie ) |
( 1U < < KEY_TYPE_hash_whiteout ) |
( 1U < < KEY_TYPE_xattr ) ,
[ BKEY_TYPE_alloc ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_alloc ) |
2021-10-29 21:14:23 -04:00
( 1U < < KEY_TYPE_alloc_v2 ) |
2021-12-31 20:03:29 -05:00
( 1U < < KEY_TYPE_alloc_v3 ) |
( 1U < < KEY_TYPE_alloc_v4 ) ,
2021-04-17 23:18:17 -04:00
[ BKEY_TYPE_quotas ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_quota ) ,
[ BKEY_TYPE_stripes ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_stripe ) ,
[ BKEY_TYPE_reflink ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_reflink_v ) |
( 1U < < KEY_TYPE_indirect_inline_data ) ,
2021-03-16 00:42:25 -04:00
[ BKEY_TYPE_subvolumes ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
2021-03-16 00:42:25 -04:00
( 1U < < KEY_TYPE_subvolume ) ,
[ BKEY_TYPE_snapshots ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
2021-03-16 00:42:25 -04:00
( 1U < < KEY_TYPE_snapshot ) ,
2021-12-05 00:31:54 -05:00
[ BKEY_TYPE_lru ] =
( 1U < < KEY_TYPE_deleted ) |
( 1U < < KEY_TYPE_lru ) ,
2021-12-11 17:13:09 -05:00
[ BKEY_TYPE_freespace ] =
( 1U < < KEY_TYPE_deleted ) |
( 1U < < KEY_TYPE_set ) ,
[ BKEY_TYPE_need_discard ] =
( 1U < < KEY_TYPE_deleted ) |
( 1U < < KEY_TYPE_set ) ,
2021-04-17 23:18:17 -04:00
[ BKEY_TYPE_btree ] =
2021-02-02 17:09:10 -05:00
( 1U < < KEY_TYPE_deleted ) |
2021-04-17 23:18:17 -04:00
( 1U < < KEY_TYPE_btree_ptr ) |
( 1U < < KEY_TYPE_btree_ptr_v2 ) ,
} ;
2022-04-03 17:50:01 -04:00
int __bch2_bkey_invalid ( struct bch_fs * c , struct bkey_s_c k ,
enum btree_node_type type ,
2022-04-03 21:50:25 -04:00
int rw , struct printbuf * err )
2018-11-01 15:10:01 -04:00
{
2022-04-03 17:50:01 -04:00
if ( k . k - > u64s < BKEY_U64s ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " u64s too small (%u < %zu) " , k . k - > u64s , BKEY_U64s ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
2021-04-17 23:18:17 -04:00
2022-04-03 17:50:01 -04:00
if ( ! ( bch2_key_types_allowed [ type ] & ( 1U < < k . k - > type ) ) ) {
2022-06-17 20:12:02 -04:00
prt_printf ( err , " invalid key type for btree %s (%s) " ,
bch2_btree_ids [ type ] , bch2_bkey_types [ k . k - > type ] ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
2019-06-09 20:32:54 -04:00
2021-02-02 17:09:10 -05:00
if ( btree_node_type_is_extents ( type ) & & ! bkey_whiteout ( k . k ) ) {
2022-04-03 17:50:01 -04:00
if ( k . k - > size = = 0 ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " size == 0 " ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
if ( k . k - > size > k . k - > p . offset ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " size greater than offset (%u > %llu) " ,
2022-04-03 17:50:01 -04:00
k . k - > size , k . k - > p . offset ) ;
return - EINVAL ;
}
2018-11-01 15:10:01 -04:00
} else {
2022-04-03 17:50:01 -04:00
if ( k . k - > size ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " size != 0 " ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
2017-03-16 22:18:50 -08:00
}
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
if ( type ! = BKEY_TYPE_btree & &
! btree_type_has_snapshots ( type ) & &
2022-04-03 17:50:01 -04:00
k . k - > p . snapshot ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " nonzero snapshot " ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
2017-03-16 22:18:50 -08:00
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
if ( type ! = BKEY_TYPE_btree & &
btree_type_has_snapshots ( type ) & &
2022-04-03 17:50:01 -04:00
! k . k - > p . snapshot ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " snapshot == 0 " ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
2021-02-20 19:27:37 -05:00
if ( type ! = BKEY_TYPE_btree & &
2022-04-03 17:50:01 -04:00
! bkey_cmp ( k . k - > p , POS_MAX ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " key at POS_MAX " ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
2017-03-16 22:18:50 -08:00
2022-04-03 17:50:01 -04:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2022-04-03 17:50:01 -04:00
int bch2_bkey_invalid ( struct bch_fs * c , struct bkey_s_c k ,
enum btree_node_type type ,
2022-04-03 21:50:25 -04:00
int rw , struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
2022-04-03 21:50:25 -04:00
return __bch2_bkey_invalid ( c , k , type , rw , err ) ? :
bch2_bkey_val_invalid ( c , k , rw , err ) ;
2017-03-16 22:18:50 -08:00
}
2022-04-03 17:50:01 -04:00
int bch2_bkey_in_btree_node ( struct btree * b , struct bkey_s_c k ,
struct printbuf * err )
2017-03-16 22:18:50 -08:00
{
2022-04-03 17:50:01 -04:00
if ( bpos_cmp ( k . k - > p , b - > data - > min_key ) < 0 ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " key before start of btree node " ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
2017-03-16 22:18:50 -08:00
2022-04-03 17:50:01 -04:00
if ( bpos_cmp ( k . k - > p , b - > data - > max_key ) > 0 ) {
2023-02-03 21:01:40 -05:00
prt_printf ( err , " key past end of btree node " ) ;
2022-04-03 17:50:01 -04:00
return - EINVAL ;
}
2017-03-16 22:18:50 -08:00
2022-04-03 17:50:01 -04:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2018-11-09 01:24:07 -05:00
void bch2_bpos_to_text ( struct printbuf * out , struct bpos pos )
2018-07-23 09:13:07 -04:00
{
2021-03-04 16:20:16 -05:00
if ( ! bpos_cmp ( pos , POS_MIN ) )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " POS_MIN " ) ;
2021-03-04 16:20:16 -05:00
else if ( ! bpos_cmp ( pos , POS_MAX ) )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " POS_MAX " ) ;
2021-08-24 20:31:44 -04:00
else if ( ! bpos_cmp ( pos , SPOS_MAX ) )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " SPOS_MAX " ) ;
2021-03-22 15:50:02 -04:00
else {
if ( pos . inode = = U64_MAX )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " U64_MAX " ) ;
2021-03-22 15:50:02 -04:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %llu " , pos . inode ) ;
prt_printf ( out , " : " ) ;
2021-03-22 15:50:02 -04:00
if ( pos . offset = = U64_MAX )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " U64_MAX " ) ;
2021-03-22 15:50:02 -04:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %llu " , pos . offset ) ;
prt_printf ( out , " : " ) ;
2021-03-22 15:50:02 -04:00
if ( pos . snapshot = = U32_MAX )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " U32_MAX " ) ;
2021-03-22 15:50:02 -04:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %u " , pos . snapshot ) ;
2021-03-22 15:50:02 -04:00
}
2018-07-23 09:13:07 -04:00
}
2018-11-09 01:24:07 -05:00
void bch2_bkey_to_text ( struct printbuf * out , const struct bkey * k )
2017-03-16 22:18:50 -08:00
{
2020-05-25 21:25:31 -04:00
if ( k ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " u64s %u type " , k - > u64s ) ;
2020-11-13 15:03:34 -05:00
if ( k - > type < KEY_TYPE_MAX )
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %s " , bch2_bkey_types [ k - > type ] ) ;
2020-11-13 15:03:34 -05:00
else
2023-02-03 21:01:40 -05:00
prt_printf ( out , " %u " , k - > type ) ;
2017-03-16 22:18:50 -08:00
2020-05-25 21:25:31 -04:00
bch2_bpos_to_text ( out , k - > p ) ;
2017-03-16 22:18:50 -08:00
2023-02-03 21:01:40 -05:00
prt_printf ( out , " len %u ver %llu " , k - > size , k - > version . lo ) ;
2020-05-25 21:25:31 -04:00
} else {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (null) " ) ;
2020-05-25 21:25:31 -04:00
}
2017-03-16 22:18:50 -08:00
}
2018-11-09 01:24:07 -05:00
void bch2_val_to_text ( struct printbuf * out , struct bch_fs * c ,
2018-11-01 15:10:01 -04:00
struct bkey_s_c k )
{
2020-11-13 15:03:34 -05:00
if ( k . k - > type < KEY_TYPE_MAX ) {
const struct bkey_ops * ops = & bch2_bkey_ops [ k . k - > type ] ;
2018-11-01 15:10:01 -04:00
2020-11-13 15:03:34 -05:00
if ( likely ( ops - > val_to_text ) )
ops - > val_to_text ( out , c , k ) ;
} else {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " (invalid type %u) " , k . k - > type ) ;
2020-11-13 15:03:34 -05:00
}
2017-03-16 22:18:50 -08:00
}
2018-11-09 01:24:07 -05:00
void bch2_bkey_val_to_text ( struct printbuf * out , struct bch_fs * c ,
2018-11-01 15:10:01 -04:00
struct bkey_s_c k )
2017-03-16 22:18:50 -08:00
{
2018-11-09 01:24:07 -05:00
bch2_bkey_to_text ( out , k . k ) ;
2020-05-25 21:25:31 -04:00
2021-07-21 13:55:51 -04:00
if ( bkey_val_bytes ( k . k ) ) {
2023-02-03 21:01:40 -05:00
prt_printf ( out , " : " ) ;
2020-05-25 21:25:31 -04:00
bch2_val_to_text ( out , c , k ) ;
}
2017-03-16 22:18:50 -08:00
}
2020-02-06 20:15:15 -05:00
void bch2_bkey_swab_val ( struct bkey_s k )
2017-03-16 22:18:50 -08:00
{
2020-02-06 20:15:15 -05:00
const struct bkey_ops * ops = & bch2_bkey_ops [ k . k - > type ] ;
2017-03-16 22:18:50 -08:00
if ( ops - > swab )
2020-02-06 20:15:15 -05:00
ops - > swab ( k ) ;
2017-03-16 22:18:50 -08:00
}
2018-11-01 15:10:01 -04:00
bool bch2_bkey_normalize ( struct bch_fs * c , struct bkey_s k )
{
const struct bkey_ops * ops = & bch2_bkey_ops [ k . k - > type ] ;
return ops - > key_normalize
? ops - > key_normalize ( c , k )
: false ;
}
2021-04-28 23:49:30 -04:00
bool bch2_bkey_merge ( struct bch_fs * c , struct bkey_s l , struct bkey_s_c r )
2018-11-01 15:10:01 -04:00
{
2019-06-09 16:56:16 -04:00
const struct bkey_ops * ops = & bch2_bkey_ops [ l . k - > type ] ;
2018-11-01 15:10:01 -04:00
2021-04-28 23:52:19 -04:00
return bch2_bkey_maybe_mergable ( l . k , r . k ) & &
( u64 ) l . k - > size + r . k - > size < = KEY_SIZE_MAX & &
bch2_bkey_ops [ l . k - > type ] . key_merge & &
! bch2_key_merging_disabled & &
ops - > key_merge ( c , l , r ) ;
2018-11-01 15:10:01 -04:00
}
static const struct old_bkey_type {
u8 btree_node_type ;
u8 old ;
u8 new ;
} bkey_renumber_table [ ] = {
2021-02-20 19:27:37 -05:00
{ BKEY_TYPE_btree , 128 , KEY_TYPE_btree_ptr } ,
{ BKEY_TYPE_extents , 128 , KEY_TYPE_extent } ,
{ BKEY_TYPE_extents , 129 , KEY_TYPE_extent } ,
{ BKEY_TYPE_extents , 130 , KEY_TYPE_reservation } ,
{ BKEY_TYPE_inodes , 128 , KEY_TYPE_inode } ,
{ BKEY_TYPE_inodes , 130 , KEY_TYPE_inode_generation } ,
{ BKEY_TYPE_dirents , 128 , KEY_TYPE_dirent } ,
{ BKEY_TYPE_dirents , 129 , KEY_TYPE_hash_whiteout } ,
{ BKEY_TYPE_xattrs , 128 , KEY_TYPE_xattr } ,
{ BKEY_TYPE_xattrs , 129 , KEY_TYPE_hash_whiteout } ,
{ BKEY_TYPE_alloc , 128 , KEY_TYPE_alloc } ,
{ BKEY_TYPE_quotas , 128 , KEY_TYPE_quota } ,
2018-11-01 15:10:01 -04:00
} ;
void bch2_bkey_renumber ( enum btree_node_type btree_node_type ,
struct bkey_packed * k ,
int write )
{
const struct old_bkey_type * i ;
for ( i = bkey_renumber_table ;
i < bkey_renumber_table + ARRAY_SIZE ( bkey_renumber_table ) ;
i + + )
if ( btree_node_type = = i - > btree_node_type & &
k - > type = = ( write ? i - > new : i - > old ) ) {
k - > type = write ? i - > old : i - > new ;
break ;
}
}
2020-01-07 13:29:32 -05:00
void __bch2_bkey_compat ( unsigned level , enum btree_id btree_id ,
unsigned version , unsigned big_endian ,
int write ,
struct bkey_format * f ,
struct bkey_packed * k )
{
const struct bkey_ops * ops ;
struct bkey uk ;
struct bkey_s u ;
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
unsigned nr_compat = 5 ;
2020-04-24 14:08:18 -04:00
int i ;
/*
* Do these operations in reverse order in the write path :
*/
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
for ( i = 0 ; i < nr_compat ; i + + )
switch ( ! write ? i : nr_compat - 1 - i ) {
2020-04-24 14:08:18 -04:00
case 0 :
if ( big_endian ! = CPU_BIG_ENDIAN )
bch2_bkey_swab_key ( f , k ) ;
break ;
case 1 :
if ( version < bcachefs_metadata_version_bkey_renumber )
bch2_bkey_renumber ( __btree_node_type ( level , btree_id ) , k , write ) ;
break ;
case 2 :
if ( version < bcachefs_metadata_version_inode_btree_change & &
2021-02-20 19:27:37 -05:00
btree_id = = BTREE_ID_inodes ) {
2020-04-24 14:08:18 -04:00
if ( ! bkey_packed ( k ) ) {
struct bkey_i * u = packed_to_bkey ( k ) ;
2022-10-22 15:59:53 -04:00
2020-04-24 14:08:18 -04:00
swap ( u - > k . p . inode , u - > k . p . offset ) ;
} else if ( f - > bits_per_field [ BKEY_FIELD_INODE ] & &
f - > bits_per_field [ BKEY_FIELD_OFFSET ] ) {
struct bkey_format tmp = * f , * in = f , * out = & tmp ;
swap ( tmp . bits_per_field [ BKEY_FIELD_INODE ] ,
tmp . bits_per_field [ BKEY_FIELD_OFFSET ] ) ;
swap ( tmp . field_offset [ BKEY_FIELD_INODE ] ,
tmp . field_offset [ BKEY_FIELD_OFFSET ] ) ;
if ( ! write )
swap ( in , out ) ;
uk = __bch2_bkey_unpack_key ( in , k ) ;
swap ( uk . p . inode , uk . p . offset ) ;
BUG_ON ( ! bch2_bkey_pack_key ( k , & uk , out ) ) ;
}
}
break ;
case 3 :
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
if ( version < bcachefs_metadata_version_snapshot & &
( level | | btree_type_has_snapshots ( btree_id ) ) ) {
struct bkey_i * u = packed_to_bkey ( k ) ;
if ( u ) {
u - > k . p . snapshot = write
? 0 : U32_MAX ;
} else {
u64 min_packed = f - > field_offset [ BKEY_FIELD_SNAPSHOT ] ;
u64 max_packed = min_packed +
~ ( ~ 0ULL < < f - > bits_per_field [ BKEY_FIELD_SNAPSHOT ] ) ;
uk = __bch2_bkey_unpack_key ( f , k ) ;
uk . p . snapshot = write
? min_packed : min_t ( u64 , U32_MAX , max_packed ) ;
BUG_ON ( ! bch2_bkey_pack_key ( k , & uk , f ) ) ;
}
}
break ;
case 4 :
2020-01-07 13:29:32 -05:00
if ( ! bkey_packed ( k ) ) {
2020-04-24 14:08:18 -04:00
u = bkey_i_to_s ( packed_to_bkey ( k ) ) ;
} else {
uk = __bch2_bkey_unpack_key ( f , k ) ;
u . k = & uk ;
u . v = bkeyp_val ( f , k ) ;
2020-01-07 13:29:32 -05:00
}
2020-04-24 14:08:18 -04:00
if ( big_endian ! = CPU_BIG_ENDIAN )
bch2_bkey_swab_val ( u ) ;
2020-01-07 13:29:32 -05:00
2020-04-24 14:08:18 -04:00
ops = & bch2_bkey_ops [ k - > type ] ;
2020-01-07 13:29:32 -05:00
2020-04-24 14:08:18 -04:00
if ( ops - > compat )
ops - > compat ( btree_id , version , big_endian , write , u ) ;
break ;
default :
BUG ( ) ;
}
2020-01-07 13:29:32 -05:00
}