2017-03-17 09:18:50 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_ITER_H
# define _BCACHEFS_BTREE_ITER_H
2019-01-14 00:02:22 +03:00
# include "bset.h"
2017-03-17 09:18:50 +03:00
# include "btree_types.h"
2021-08-30 22:18:31 +03:00
static inline void __btree_path_get ( struct btree_path * path , bool intent )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
path - > ref + + ;
path - > intent_ref + = intent ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline bool __btree_path_put ( struct btree_path * path , bool intent )
{
EBUG_ON ( ! path - > ref ) ;
EBUG_ON ( ! path - > intent_ref & & intent ) ;
path - > intent_ref - = intent ;
return - - path - > ref = = 0 ;
}
static inline void btree_path_set_dirty ( struct btree_path * path ,
enum btree_path_uptodate u )
{
path - > uptodate = max_t ( unsigned , path - > uptodate , u ) ;
}
static inline struct btree * btree_path_node ( struct btree_path * path ,
2017-03-17 09:18:50 +03:00
unsigned level )
{
2021-08-30 22:18:31 +03:00
return level < BTREE_MAX_DEPTH ? path - > l [ level ] . b : NULL ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline bool btree_node_lock_seq_matches ( const struct btree_path * path ,
2020-06-06 19:28:01 +03:00
const struct btree * b , unsigned level )
{
/*
* We don ' t compare the low bits of the lock sequence numbers because
2021-08-30 22:18:31 +03:00
* @ path might have taken a write lock on @ b , and we don ' t want to skip
* the linked path if the sequence numbers were equal before taking that
* write lock . The lock sequence number is incremented by taking and
* releasing write locks and is even when unlocked :
2020-06-06 19:28:01 +03:00
*/
2021-08-30 22:18:31 +03:00
return path - > l [ level ] . lock_seq > > 1 = = b - > c . lock . state . seq > > 1 ;
2020-06-06 19:28:01 +03:00
}
2021-08-30 22:18:31 +03:00
static inline struct btree * btree_node_parent ( struct btree_path * path ,
2017-03-17 09:18:50 +03:00
struct btree * b )
{
2021-08-30 22:18:31 +03:00
return btree_path_node ( path , b - > c . level + 1 ) ;
2019-03-28 05:03:30 +03:00
}
static inline int btree_iter_err ( const struct btree_iter * iter )
{
return iter - > flags & BTREE_ITER_ERROR ? - EIO : 0 ;
}
2021-08-30 22:18:31 +03:00
/* Iterate over paths within a transaction: */
2019-03-28 05:03:30 +03:00
2021-09-04 00:18:57 +03:00
void __bch2_btree_trans_sort_paths ( struct btree_trans * ) ;
static inline void btree_trans_sort_paths ( struct btree_trans * trans )
{
if ( ! IS_ENABLED ( CONFIG_BCACHEFS_DEBUG ) & &
trans - > paths_sorted )
return ;
__bch2_btree_trans_sort_paths ( trans ) ;
}
2021-08-30 22:18:31 +03:00
static inline struct btree_path *
__trans_next_path ( struct btree_trans * trans , unsigned idx )
2019-03-28 05:03:30 +03:00
{
2020-12-09 21:34:42 +03:00
u64 l ;
if ( idx = = BTREE_ITER_MAX )
return NULL ;
2021-08-30 22:18:31 +03:00
l = trans - > paths_allocated > > idx ;
2020-12-02 07:11:53 +03:00
if ( ! l )
return NULL ;
2019-03-28 05:03:30 +03:00
2020-12-02 07:11:53 +03:00
idx + = __ffs64 ( l ) ;
2020-12-09 21:34:42 +03:00
EBUG_ON ( idx > = BTREE_ITER_MAX ) ;
2021-08-30 22:18:31 +03:00
EBUG_ON ( trans - > paths [ idx ] . idx ! = idx ) ;
return & trans - > paths [ idx ] ;
2019-03-28 05:03:30 +03:00
}
2021-08-30 22:18:31 +03:00
# define trans_for_each_path(_trans, _path) \
for ( _path = __trans_next_path ( ( _trans ) , 0 ) ; \
( _path ) ; \
_path = __trans_next_path ( ( _trans ) , ( _path ) - > idx + 1 ) )
2019-03-28 05:03:30 +03:00
2021-08-30 22:18:31 +03:00
static inline struct btree_path * next_btree_path ( struct btree_trans * trans , struct btree_path * path )
2021-06-12 22:45:45 +03:00
{
2021-08-30 22:18:31 +03:00
unsigned idx = path ? path - > sorted_idx + 1 : 0 ;
2021-06-12 22:45:45 +03:00
EBUG_ON ( idx > trans - > nr_sorted ) ;
return idx < trans - > nr_sorted
2021-08-30 22:18:31 +03:00
? trans - > paths + trans - > sorted [ idx ]
2021-06-12 22:45:45 +03:00
: NULL ;
}
2021-08-30 22:18:31 +03:00
static inline struct btree_path * prev_btree_path ( struct btree_trans * trans , struct btree_path * path )
2021-06-12 22:45:45 +03:00
{
2021-08-30 22:18:31 +03:00
unsigned idx = path ? path - > sorted_idx : trans - > nr_sorted ;
2021-06-12 22:45:45 +03:00
return idx
2021-08-30 22:18:31 +03:00
? trans - > paths + trans - > sorted [ idx - 1 ]
2021-06-12 22:45:45 +03:00
: NULL ;
}
2021-08-30 22:18:31 +03:00
# define trans_for_each_path_inorder(_trans, _path, _i) \
2021-06-12 22:45:45 +03:00
for ( _i = 0 ; \
2021-08-30 22:18:31 +03:00
( ( _path ) = ( _trans ) - > paths + trans - > sorted [ _i ] ) , ( _i ) < ( _trans ) - > nr_sorted ; \
2021-06-12 22:45:45 +03:00
_i + + )
2021-08-30 22:18:31 +03:00
# define trans_for_each_path_inorder_reverse(_trans, _path, _i) \
2021-06-12 22:45:45 +03:00
for ( _i = trans - > nr_sorted - 1 ; \
2021-08-30 22:18:31 +03:00
( ( _path ) = ( _trans ) - > paths + trans - > sorted [ _i ] ) , ( _i ) > = 0 ; \
2021-06-12 22:45:45 +03:00
- - _i )
2021-08-30 22:18:31 +03:00
static inline bool __path_has_node ( const struct btree_path * path ,
2017-03-17 09:18:50 +03:00
const struct btree * b )
{
2021-08-30 22:18:31 +03:00
return path - > l [ b - > c . level ] . b = = b & &
btree_node_lock_seq_matches ( path , b , b - > c . level ) ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline struct btree_path *
__trans_next_path_with_node ( struct btree_trans * trans , struct btree * b ,
2019-03-28 06:14:38 +03:00
unsigned idx )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
struct btree_path * path = __trans_next_path ( trans , idx ) ;
while ( path & & ! __path_has_node ( path , b ) )
path = __trans_next_path ( trans , path - > idx + 1 ) ;
2019-03-28 05:03:30 +03:00
2021-08-30 22:18:31 +03:00
return path ;
}
# define trans_for_each_path_with_node(_trans, _b, _path) \
for ( _path = __trans_next_path_with_node ( ( _trans ) , ( _b ) , 0 ) ; \
( _path ) ; \
_path = __trans_next_path_with_node ( ( _trans ) , ( _b ) , \
( _path ) - > idx + 1 ) )
struct btree_path * __bch2_btree_path_make_mut ( struct btree_trans * ,
struct btree_path * , bool ) ;
2019-03-28 05:03:30 +03:00
2021-08-30 22:18:31 +03:00
static inline struct btree_path * __must_check
bch2_btree_path_make_mut ( struct btree_trans * trans ,
struct btree_path * path , bool intent )
{
if ( path - > ref > 1 | | path - > preserve )
path = __bch2_btree_path_make_mut ( trans , path , intent ) ;
return path ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
int __must_check bch2_btree_path_traverse ( struct btree_trans * ,
struct btree_path * , unsigned ) ;
struct btree_path * bch2_path_get ( struct btree_trans * , bool , enum btree_id ,
struct bpos , unsigned , unsigned , bool ) ;
inline struct bkey_s_c bch2_btree_path_peek_slot ( struct btree_path * , struct bkey * ) ;
2017-03-17 09:18:50 +03:00
# ifdef CONFIG_BCACHEFS_DEBUG
2021-08-30 22:18:31 +03:00
void bch2_trans_verify_paths ( struct btree_trans * ) ;
2021-08-30 21:45:11 +03:00
void bch2_trans_verify_locks ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
# else
2021-08-30 22:18:31 +03:00
static inline void bch2_trans_verify_paths ( struct btree_trans * trans ) { }
static inline void bch2_trans_verify_locks ( struct btree_trans * trans ) { }
2017-03-17 09:18:50 +03:00
# endif
2021-08-30 22:18:31 +03:00
void bch2_btree_path_fix_key_modified ( struct btree_trans * trans ,
2021-08-25 04:30:06 +03:00
struct btree * , struct bkey_packed * ) ;
2021-08-30 22:18:31 +03:00
void bch2_btree_node_iter_fix ( struct btree_trans * trans , struct btree_path * ,
2021-08-25 04:30:06 +03:00
struct btree * , struct btree_node_iter * ,
struct bkey_packed * , unsigned , unsigned ) ;
2017-03-17 09:18:50 +03:00
2021-08-30 22:18:31 +03:00
bool bch2_btree_path_relock_intent ( struct btree_trans * , struct btree_path * ) ;
void bch2_path_put ( struct btree_trans * , struct btree_path * , bool ) ;
2021-07-14 22:13:27 +03:00
2019-05-15 16:47:40 +03:00
bool bch2_trans_relock ( struct btree_trans * ) ;
void bch2_trans_unlock ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
2021-07-26 00:19:52 +03:00
__always_inline
static inline int btree_trans_restart ( struct btree_trans * trans )
{
trans - > restarted = true ;
bch2_trans_unlock ( trans ) ;
return - EINTR ;
}
2021-11-03 19:08:02 +03:00
bool bch2_btree_node_upgrade ( struct btree_trans * ,
struct btree_path * , unsigned ) ;
2021-08-30 22:18:31 +03:00
bool __bch2_btree_path_upgrade ( struct btree_trans * ,
struct btree_path * , unsigned ) ;
2017-03-17 09:18:50 +03:00
2021-08-30 22:18:31 +03:00
static inline bool bch2_btree_path_upgrade ( struct btree_trans * trans ,
struct btree_path * path ,
2019-05-11 00:09:42 +03:00
unsigned new_locks_want )
2017-03-17 09:18:50 +03:00
{
new_locks_want = min ( new_locks_want , BTREE_MAX_DEPTH ) ;
2021-08-30 22:18:31 +03:00
return path - > locks_want < new_locks_want
? __bch2_btree_path_upgrade ( trans , path , new_locks_want )
: path - > uptodate = = BTREE_ITER_UPTODATE ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
void __bch2_btree_path_downgrade ( struct btree_path * , unsigned ) ;
2017-03-17 09:18:50 +03:00
2021-08-30 22:18:31 +03:00
static inline void bch2_btree_path_downgrade ( struct btree_path * path )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
unsigned new_locks_want = path - > level + ! ! path - > intent_ref ;
2021-04-03 04:29:05 +03:00
2021-08-30 22:18:31 +03:00
if ( path - > locks_want > new_locks_want )
__bch2_btree_path_downgrade ( path , new_locks_want ) ;
2017-03-17 09:18:50 +03:00
}
2020-06-08 20:26:48 +03:00
void bch2_trans_downgrade ( struct btree_trans * ) ;
2021-08-30 21:36:03 +03:00
void bch2_trans_node_add ( struct btree_trans * trans , struct btree * ) ;
void bch2_trans_node_reinit_iter ( struct btree_trans * , struct btree * ) ;
2017-03-17 09:18:50 +03:00
2021-09-07 22:34:16 +03:00
int __must_check __bch2_btree_iter_traverse ( struct btree_iter * iter ) ;
2021-03-24 04:22:50 +03:00
int __must_check bch2_btree_iter_traverse ( struct btree_iter * ) ;
2019-09-08 21:00:12 +03:00
2017-03-17 09:18:50 +03:00
struct btree * bch2_btree_iter_peek_node ( struct btree_iter * ) ;
2020-02-19 00:17:55 +03:00
struct btree * bch2_btree_iter_next_node ( struct btree_iter * ) ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c bch2_btree_iter_peek ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_next ( struct btree_iter * ) ;
2019-09-08 00:17:21 +03:00
struct bkey_s_c bch2_btree_iter_peek_prev ( struct btree_iter * ) ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c bch2_btree_iter_prev ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_peek_slot ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_next_slot ( struct btree_iter * ) ;
2021-03-03 06:45:28 +03:00
struct bkey_s_c bch2_btree_iter_prev_slot ( struct btree_iter * ) ;
2017-03-17 09:18:50 +03:00
2021-03-21 23:55:25 +03:00
bool bch2_btree_iter_advance ( struct btree_iter * ) ;
bool bch2_btree_iter_rewind ( struct btree_iter * ) ;
2021-03-22 04:16:52 +03:00
static inline void bch2_btree_iter_set_pos ( struct btree_iter * iter , struct bpos new_pos )
{
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
if ( ! ( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS ) )
new_pos . snapshot = iter - > snapshot ;
2021-04-04 04:09:13 +03:00
iter - > k . type = KEY_TYPE_deleted ;
iter - > k . p . inode = iter - > pos . inode = new_pos . inode ;
iter - > k . p . offset = iter - > pos . offset = new_pos . offset ;
iter - > k . p . snapshot = iter - > pos . snapshot = new_pos . snapshot ;
iter - > k . size = 0 ;
2021-03-22 04:16:52 +03:00
}
2017-03-17 09:18:50 +03:00
2021-06-15 01:16:10 +03:00
static inline void bch2_btree_iter_set_pos_to_extent_start ( struct btree_iter * iter )
{
BUG_ON ( ! ( iter - > flags & BTREE_ITER_IS_EXTENTS ) ) ;
iter - > pos = bkey_start_pos ( & iter - > k ) ;
}
2021-03-05 06:29:25 +03:00
static inline void bch2_btree_iter_set_snapshot ( struct btree_iter * iter , u32 snapshot )
{
struct bpos pos = iter - > pos ;
iter - > snapshot = snapshot ;
pos . snapshot = snapshot ;
bch2_btree_iter_set_pos ( iter , pos ) ;
}
2021-10-21 19:05:21 +03:00
void bch2_trans_iter_exit ( struct btree_trans * , struct btree_iter * ) ;
void bch2_trans_iter_init ( struct btree_trans * , struct btree_iter * ,
unsigned , struct bpos , unsigned ) ;
void bch2_trans_node_iter_init ( struct btree_trans * , struct btree_iter * ,
enum btree_id , struct bpos ,
unsigned , unsigned , unsigned ) ;
void bch2_trans_copy_iter ( struct btree_iter * , struct btree_iter * ) ;
static inline void set_btree_iter_dontneed ( struct btree_iter * iter )
{
iter - > path - > preserve = false ;
}
void * bch2_trans_kmalloc ( struct btree_trans * , size_t ) ;
void bch2_trans_begin ( struct btree_trans * ) ;
static inline struct btree *
__btree_iter_peek_node_and_restart ( struct btree_trans * trans , struct btree_iter * iter )
{
struct btree * b ;
while ( b = bch2_btree_iter_peek_node ( iter ) ,
PTR_ERR_OR_ZERO ( b ) = = - EINTR )
bch2_trans_begin ( trans ) ;
return b ;
}
2021-10-19 21:20:50 +03:00
# define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
_locks_want , _depth , _flags , _b , _ret ) \
2021-08-30 22:18:31 +03:00
for ( bch2_trans_node_iter_init ( ( _trans ) , & ( _iter ) , ( _btree_id ) , \
2021-10-21 19:05:21 +03:00
_start , _locks_want , _depth , _flags ) ; \
( _b ) = __btree_iter_peek_node_and_restart ( ( _trans ) , & ( _iter ) ) , \
2021-10-19 21:20:50 +03:00
! ( ( _ret ) = PTR_ERR_OR_ZERO ( _b ) ) & & ( _b ) ; \
2021-08-30 22:18:31 +03:00
( _b ) = bch2_btree_iter_next_node ( & ( _iter ) ) )
2017-03-17 09:18:50 +03:00
2019-03-25 22:10:15 +03:00
# define for_each_btree_node(_trans, _iter, _btree_id, _start, \
2021-10-19 21:20:50 +03:00
_flags , _b , _ret ) \
2019-03-25 22:10:15 +03:00
__for_each_btree_node ( _trans , _iter , _btree_id , _start , \
2021-10-19 21:20:50 +03:00
0 , 0 , _flags , _b , _ret )
2017-03-17 09:18:50 +03:00
2021-10-21 19:05:21 +03:00
static inline int bkey_err ( struct bkey_s_c k )
{
return PTR_ERR_OR_ZERO ( k . k ) ;
}
2017-03-17 09:18:50 +03:00
static inline struct bkey_s_c __bch2_btree_iter_peek ( struct btree_iter * iter ,
unsigned flags )
{
2021-06-11 03:15:50 +03:00
return flags & BTREE_ITER_SLOTS
? bch2_btree_iter_peek_slot ( iter )
: bch2_btree_iter_peek ( iter ) ;
2017-03-17 09:18:50 +03:00
}
2021-10-21 19:05:21 +03:00
static inline struct bkey_s_c
__bch2_btree_iter_peek_and_restart ( struct btree_trans * trans ,
struct btree_iter * iter , unsigned flags )
2017-03-17 09:18:50 +03:00
{
2021-10-21 19:05:21 +03:00
struct bkey_s_c k ;
2017-03-17 09:18:50 +03:00
2021-10-26 01:30:28 +03:00
while ( ( hweight64 ( trans - > paths_allocated ) > BTREE_ITER_MAX / 2 ) | |
( k = __bch2_btree_iter_peek ( iter , flags ) ,
bkey_err ( k ) = = - EINTR ) )
2021-10-21 19:05:21 +03:00
bch2_trans_begin ( trans ) ;
return k ;
2019-09-25 22:57:56 +03:00
}
2019-04-17 22:49:28 +03:00
# define for_each_btree_key(_trans, _iter, _btree_id, \
_start , _flags , _k , _ret ) \
2021-08-30 22:18:31 +03:00
for ( bch2_trans_iter_init ( ( _trans ) , & ( _iter ) , ( _btree_id ) , \
2021-10-21 19:05:21 +03:00
( _start ) , ( _flags ) ) ; \
( _k ) = __bch2_btree_iter_peek_and_restart ( ( _trans ) , & ( _iter ) , _flags ) , \
2021-03-05 06:11:28 +03:00
! ( ( _ret ) = bkey_err ( _k ) ) & & ( _k ) . k ; \
2021-10-21 19:05:21 +03:00
bch2_btree_iter_advance ( & ( _iter ) ) )
2017-03-17 09:18:50 +03:00
2021-10-21 19:05:21 +03:00
# define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
_start , _flags , _k , _ret ) \
for ( bch2_trans_iter_init ( ( _trans ) , & ( _iter ) , ( _btree_id ) , \
( _start ) , ( _flags ) ) ; \
( _k ) = __bch2_btree_iter_peek ( & ( _iter ) , _flags ) , \
2019-09-25 22:57:56 +03:00
! ( ( _ret ) = bkey_err ( _k ) ) & & ( _k ) . k ; \
2021-10-21 19:05:21 +03:00
bch2_btree_iter_advance ( & ( _iter ) ) )
2017-03-17 09:18:50 +03:00
2021-10-21 19:05:21 +03:00
# define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
for ( ; \
( _k ) = __bch2_btree_iter_peek_and_restart ( ( _trans ) , & ( _iter ) , _flags ) , \
! ( ( _ret ) = bkey_err ( _k ) ) & & ( _k ) . k ; \
bch2_btree_iter_advance ( & ( _iter ) ) )
2021-02-21 06:19:34 +03:00
2021-10-21 19:05:21 +03:00
# define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
for ( ; \
( _k ) = __bch2_btree_iter_peek ( & ( _iter ) , _flags ) , \
! ( ( _ret ) = bkey_err ( _k ) ) & & ( _k ) . k ; \
bch2_btree_iter_advance ( & ( _iter ) ) )
2021-02-21 06:19:34 +03:00
2021-10-21 19:05:21 +03:00
/* new multiple iterator interface: */
2021-03-20 05:54:18 +03:00
2021-10-21 19:05:21 +03:00
void bch2_dump_trans_paths_updates ( struct btree_trans * ) ;
2019-05-15 17:54:43 +03:00
void bch2_trans_init ( struct btree_trans * , struct bch_fs * , unsigned , size_t ) ;
2021-10-19 22:08:00 +03:00
void bch2_trans_exit ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
2020-06-02 23:36:11 +03:00
void bch2_btree_trans_to_text ( struct printbuf * , struct bch_fs * ) ;
2019-09-07 21:16:00 +03:00
void bch2_fs_btree_iter_exit ( struct bch_fs * ) ;
int bch2_fs_btree_iter_init ( struct bch_fs * ) ;
2017-03-17 09:18:50 +03:00
# endif /* _BCACHEFS_BTREE_ITER_H */