2017-03-17 09:18:50 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_ITER_H
# define _BCACHEFS_BTREE_ITER_H
2019-01-14 00:02:22 +03:00
# include "bset.h"
2017-03-17 09:18:50 +03:00
# include "btree_types.h"
2022-07-06 00:27:44 +03:00
# include "trace.h"
2017-03-17 09:18:50 +03:00
2023-04-30 20:02:05 +03:00
static inline int __bkey_err ( const struct bkey * k )
{
return PTR_ERR_OR_ZERO ( k ) ;
}
# define bkey_err(_k) __bkey_err((_k).k)
2021-08-30 22:18:31 +03:00
static inline void __btree_path_get ( struct btree_path * path , bool intent )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
path - > ref + + ;
path - > intent_ref + = intent ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline bool __btree_path_put ( struct btree_path * path , bool intent )
{
EBUG_ON ( ! path - > ref ) ;
EBUG_ON ( ! path - > intent_ref & & intent ) ;
path - > intent_ref - = intent ;
return - - path - > ref = = 0 ;
}
static inline void btree_path_set_dirty ( struct btree_path * path ,
enum btree_path_uptodate u )
{
path - > uptodate = max_t ( unsigned , path - > uptodate , u ) ;
}
static inline struct btree * btree_path_node ( struct btree_path * path ,
2017-03-17 09:18:50 +03:00
unsigned level )
{
2021-08-30 22:18:31 +03:00
return level < BTREE_MAX_DEPTH ? path - > l [ level ] . b : NULL ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline bool btree_node_lock_seq_matches ( const struct btree_path * path ,
2020-06-06 19:28:01 +03:00
const struct btree * b , unsigned level )
{
2023-06-16 22:56:42 +03:00
return path - > l [ level ] . lock_seq = = six_lock_seq ( & b - > c . lock ) ;
2020-06-06 19:28:01 +03:00
}
2021-08-30 22:18:31 +03:00
static inline struct btree * btree_node_parent ( struct btree_path * path ,
2017-03-17 09:18:50 +03:00
struct btree * b )
{
2021-08-30 22:18:31 +03:00
return btree_path_node ( path , b - > c . level + 1 ) ;
2019-03-28 05:03:30 +03:00
}
2021-08-30 22:18:31 +03:00
/* Iterate over paths within a transaction: */
2019-03-28 05:03:30 +03:00
2021-09-04 00:18:57 +03:00
void __bch2_btree_trans_sort_paths ( struct btree_trans * ) ;
static inline void btree_trans_sort_paths ( struct btree_trans * trans )
{
if ( ! IS_ENABLED ( CONFIG_BCACHEFS_DEBUG ) & &
trans - > paths_sorted )
return ;
__bch2_btree_trans_sort_paths ( trans ) ;
}
2023-12-13 04:08:29 +03:00
static inline unsigned long * trans_paths_nr ( struct btree_path * paths )
{
return & container_of ( paths , struct btree_trans_paths , paths [ 0 ] ) - > nr_paths ;
}
static inline unsigned long * trans_paths_allocated ( struct btree_path * paths )
{
unsigned long * v = trans_paths_nr ( paths ) ;
return v - BITS_TO_LONGS ( * v ) ;
}
# define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
for ( _idx = _start ; \
( _idx = find_next_bit ( _paths_allocated , _nr , _idx ) ) < _nr ; \
_idx + + )
2023-05-28 02:55:54 +03:00
static inline struct btree_path *
2023-12-11 08:23:33 +03:00
__trans_next_path ( struct btree_trans * trans , unsigned * idx )
2023-05-28 02:55:54 +03:00
{
2023-12-11 01:10:31 +03:00
unsigned long * w = trans - > paths_allocated + * idx / BITS_PER_LONG ;
/*
* Open coded find_next_bit ( ) , because
* - this is fast path , we can ' t afford the function call
* - and we know that nr_paths is a multiple of BITS_PER_LONG ,
*/
while ( * idx < trans - > nr_paths ) {
unsigned long v = * w > > ( * idx & ( BITS_PER_LONG - 1 ) ) ;
if ( v ) {
* idx + = __ffs ( v ) ;
return trans - > paths + * idx ;
}
* idx + = BITS_PER_LONG ;
* idx & = ~ ( BITS_PER_LONG - 1 ) ;
w + + ;
}
2023-05-28 02:55:54 +03:00
2023-12-11 01:10:31 +03:00
return NULL ;
2023-05-28 02:55:54 +03:00
}
/*
* This version is intended to be safe for use on a btree_trans that is owned by
* another thread , for bch2_btree_trans_to_text ( ) ;
*/
2023-12-11 08:23:33 +03:00
# define trans_for_each_path_from(_trans, _path, _idx, _start) \
2023-05-28 02:55:54 +03:00
for ( _idx = _start ; \
2023-12-11 08:23:33 +03:00
( _path = __trans_next_path ( ( _trans ) , & _idx ) ) ; \
2023-05-28 02:55:54 +03:00
_idx + + )
2023-12-11 08:23:33 +03:00
# define trans_for_each_path(_trans, _path, _idx) \
trans_for_each_path_from ( _trans , _path , _idx , 1 )
2023-12-11 07:37:45 +03:00
2021-08-30 22:18:31 +03:00
static inline struct btree_path * next_btree_path ( struct btree_trans * trans , struct btree_path * path )
2021-06-12 22:45:45 +03:00
{
2021-08-30 22:18:31 +03:00
unsigned idx = path ? path - > sorted_idx + 1 : 0 ;
2021-06-12 22:45:45 +03:00
EBUG_ON ( idx > trans - > nr_sorted ) ;
return idx < trans - > nr_sorted
2021-08-30 22:18:31 +03:00
? trans - > paths + trans - > sorted [ idx ]
2021-06-12 22:45:45 +03:00
: NULL ;
}
2021-08-30 22:18:31 +03:00
static inline struct btree_path * prev_btree_path ( struct btree_trans * trans , struct btree_path * path )
2021-06-12 22:45:45 +03:00
{
2021-08-30 22:18:31 +03:00
unsigned idx = path ? path - > sorted_idx : trans - > nr_sorted ;
2021-06-12 22:45:45 +03:00
return idx
2021-08-30 22:18:31 +03:00
? trans - > paths + trans - > sorted [ idx - 1 ]
2021-06-12 22:45:45 +03:00
: NULL ;
}
2023-12-11 07:29:06 +03:00
# define trans_for_each_path_idx_inorder(_trans, _iter) \
for ( _iter = ( struct trans_for_each_path_inorder_iter ) { 0 } ; \
( _iter . path_idx = trans - > sorted [ _iter . sorted_idx ] , \
_iter . sorted_idx < ( _trans ) - > nr_sorted ) ; \
_iter . sorted_idx + + )
2023-12-11 00:35:45 +03:00
struct trans_for_each_path_inorder_iter {
btree_path_idx_t sorted_idx ;
btree_path_idx_t path_idx ;
} ;
# define trans_for_each_path_inorder(_trans, _path, _iter) \
for ( _iter = ( struct trans_for_each_path_inorder_iter ) { 0 } ; \
( _iter . path_idx = trans - > sorted [ _iter . sorted_idx ] , \
_path = ( _trans ) - > paths + _iter . path_idx , \
_iter . sorted_idx < ( _trans ) - > nr_sorted ) ; \
_iter . sorted_idx + + )
2021-06-12 22:45:45 +03:00
2021-08-30 22:18:31 +03:00
# define trans_for_each_path_inorder_reverse(_trans, _path, _i) \
2021-06-12 22:45:45 +03:00
for ( _i = trans - > nr_sorted - 1 ; \
2021-08-30 22:18:31 +03:00
( ( _path ) = ( _trans ) - > paths + trans - > sorted [ _i ] ) , ( _i ) > = 0 ; \
2021-06-12 22:45:45 +03:00
- - _i )
2021-08-30 22:18:31 +03:00
static inline bool __path_has_node ( const struct btree_path * path ,
2017-03-17 09:18:50 +03:00
const struct btree * b )
{
2021-08-30 22:18:31 +03:00
return path - > l [ b - > c . level ] . b = = b & &
btree_node_lock_seq_matches ( path , b , b - > c . level ) ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline struct btree_path *
__trans_next_path_with_node ( struct btree_trans * trans , struct btree * b ,
2023-12-11 07:57:50 +03:00
unsigned * idx )
2017-03-17 09:18:50 +03:00
{
2023-12-11 07:57:50 +03:00
struct btree_path * path ;
2021-08-30 22:18:31 +03:00
2023-12-11 07:57:50 +03:00
while ( ( path = __trans_next_path ( trans , idx ) ) & &
2023-12-11 07:37:45 +03:00
! __path_has_node ( path , b ) )
2023-12-11 07:57:50 +03:00
( * idx ) + + ;
2019-03-28 05:03:30 +03:00
2021-08-30 22:18:31 +03:00
return path ;
}
2023-12-11 07:57:50 +03:00
# define trans_for_each_path_with_node(_trans, _b, _path, _iter) \
for ( _iter = 1 ; \
( _path = __trans_next_path_with_node ( ( _trans ) , ( _b ) , & _iter ) ) ; \
_iter + + )
2021-08-30 22:18:31 +03:00
2023-12-08 10:24:05 +03:00
btree_path_idx_t __bch2_btree_path_make_mut ( struct btree_trans * , btree_path_idx_t ,
bool , unsigned long ) ;
2019-03-28 05:03:30 +03:00
2023-12-08 10:24:05 +03:00
static inline btree_path_idx_t __must_check
2021-08-30 22:18:31 +03:00
bch2_btree_path_make_mut ( struct btree_trans * trans ,
2023-12-08 10:24:05 +03:00
btree_path_idx_t path , bool intent ,
2023-01-09 09:11:18 +03:00
unsigned long ip )
2021-08-30 22:18:31 +03:00
{
2023-12-08 10:24:05 +03:00
if ( trans - > paths [ path ] . ref > 1 | |
trans - > paths [ path ] . preserve )
2023-01-09 09:11:18 +03:00
path = __bch2_btree_path_make_mut ( trans , path , intent , ip ) ;
2023-12-08 10:24:05 +03:00
trans - > paths [ path ] . should_be_locked = false ;
2021-08-30 22:18:31 +03:00
return path ;
2017-03-17 09:18:50 +03:00
}
2023-12-08 10:10:23 +03:00
btree_path_idx_t __must_check
__bch2_btree_path_set_pos ( struct btree_trans * , btree_path_idx_t ,
struct bpos , bool , unsigned long ) ;
2022-02-07 06:21:44 +03:00
2023-12-08 10:10:23 +03:00
static inline btree_path_idx_t __must_check
2022-02-07 06:21:44 +03:00
bch2_btree_path_set_pos ( struct btree_trans * trans ,
2023-12-08 10:10:23 +03:00
btree_path_idx_t path , struct bpos new_pos ,
bool intent , unsigned long ip )
2022-02-07 06:21:44 +03:00
{
2023-12-08 10:10:23 +03:00
return ! bpos_eq ( new_pos , trans - > paths [ path ] . pos )
2023-12-08 09:51:04 +03:00
? __bch2_btree_path_set_pos ( trans , path , new_pos , intent , ip )
2022-02-07 06:21:44 +03:00
: path ;
}
2023-12-08 11:02:43 +03:00
int __must_check bch2_btree_path_traverse_one ( struct btree_trans * ,
btree_path_idx_t ,
2023-02-05 04:40:29 +03:00
unsigned , unsigned long ) ;
static inline int __must_check bch2_btree_path_traverse ( struct btree_trans * trans ,
2023-12-08 11:02:43 +03:00
btree_path_idx_t path , unsigned flags )
2023-02-05 04:40:29 +03:00
{
2023-12-08 11:02:43 +03:00
if ( trans - > paths [ path ] . uptodate < BTREE_ITER_NEED_RELOCK )
2023-02-05 04:40:29 +03:00
return 0 ;
return bch2_btree_path_traverse_one ( trans , path , flags , _RET_IP_ ) ;
}
2023-12-08 10:00:43 +03:00
btree_path_idx_t bch2_path_get ( struct btree_trans * , enum btree_id , struct bpos ,
2023-12-04 08:39:38 +03:00
unsigned , unsigned , unsigned , unsigned long ) ;
2022-11-14 04:01:42 +03:00
struct bkey_s_c bch2_btree_path_peek_slot ( struct btree_path * , struct bkey * ) ;
2017-03-17 09:18:50 +03:00
2023-08-05 19:55:08 +03:00
/*
* bch2_btree_path_peek_slot ( ) for a cached iterator might return a key in a
* different snapshot :
*/
static inline struct bkey_s_c bch2_btree_path_peek_slot_exact ( struct btree_path * path , struct bkey * u )
{
struct bkey_s_c k = bch2_btree_path_peek_slot ( path , u ) ;
if ( k . k & & bpos_eq ( path - > pos , k . k - > p ) )
return k ;
bkey_init ( u ) ;
u - > p = path - > pos ;
return ( struct bkey_s_c ) { u , NULL } ;
}
2022-05-21 20:10:39 +03:00
struct bkey_i * bch2_btree_journal_peek_slot ( struct btree_trans * ,
struct btree_iter * , struct bpos ) ;
2022-11-14 04:01:42 +03:00
void bch2_btree_path_level_init ( struct btree_trans * , struct btree_path * , struct btree * ) ;
2022-09-16 21:42:38 +03:00
2023-02-18 06:43:47 +03:00
int __bch2_trans_mutex_lock ( struct btree_trans * , struct mutex * ) ;
static inline int bch2_trans_mutex_lock ( struct btree_trans * trans , struct mutex * lock )
{
return mutex_trylock ( lock )
? 0
: __bch2_trans_mutex_lock ( trans , lock ) ;
}
2017-03-17 09:18:50 +03:00
# ifdef CONFIG_BCACHEFS_DEBUG
2021-08-30 22:18:31 +03:00
void bch2_trans_verify_paths ( struct btree_trans * ) ;
2021-11-06 07:03:40 +03:00
void bch2_assert_pos_locked ( struct btree_trans * , enum btree_id ,
struct bpos , bool ) ;
2017-03-17 09:18:50 +03:00
# else
2021-08-30 22:18:31 +03:00
static inline void bch2_trans_verify_paths ( struct btree_trans * trans ) { }
2021-11-06 07:03:40 +03:00
static inline void bch2_assert_pos_locked ( struct btree_trans * trans , enum btree_id id ,
struct bpos pos , bool key_cache ) { }
2017-03-17 09:18:50 +03:00
# endif
2021-08-30 22:18:31 +03:00
void bch2_btree_path_fix_key_modified ( struct btree_trans * trans ,
2021-08-25 04:30:06 +03:00
struct btree * , struct bkey_packed * ) ;
2021-08-30 22:18:31 +03:00
void bch2_btree_node_iter_fix ( struct btree_trans * trans , struct btree_path * ,
2021-08-25 04:30:06 +03:00
struct btree * , struct btree_node_iter * ,
struct bkey_packed * , unsigned , unsigned ) ;
2017-03-17 09:18:50 +03:00
2022-07-18 06:06:38 +03:00
int bch2_btree_path_relock_intent ( struct btree_trans * , struct btree_path * ) ;
2021-08-30 22:18:31 +03:00
2023-12-11 07:18:52 +03:00
void bch2_path_put ( struct btree_trans * , btree_path_idx_t , bool ) ;
2021-07-14 22:13:27 +03:00
2022-07-18 06:06:38 +03:00
int bch2_trans_relock ( struct btree_trans * ) ;
2023-01-24 08:26:48 +03:00
int bch2_trans_relock_notrace ( struct btree_trans * ) ;
2019-05-15 16:47:40 +03:00
void bch2_trans_unlock ( struct btree_trans * ) ;
2023-10-30 19:30:52 +03:00
void bch2_trans_unlock_long ( struct btree_trans * ) ;
2022-10-03 23:39:49 +03:00
bool bch2_trans_locked ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
2023-09-10 23:24:02 +03:00
static inline int trans_was_restarted ( struct btree_trans * trans , u32 restart_count )
2022-07-18 02:35:38 +03:00
{
2023-09-10 23:24:02 +03:00
return restart_count ! = trans - > restart_count
? - BCH_ERR_transaction_restart_nested
: 0 ;
2022-07-18 02:35:38 +03:00
}
2023-02-02 00:15:51 +03:00
void __noreturn bch2_trans_restart_error ( struct btree_trans * , u32 ) ;
static inline void bch2_trans_verify_not_restarted ( struct btree_trans * trans ,
u32 restart_count )
{
if ( trans_was_restarted ( trans , restart_count ) )
bch2_trans_restart_error ( trans , restart_count ) ;
}
void __noreturn bch2_trans_in_restart_error ( struct btree_trans * ) ;
static inline void bch2_trans_verify_not_in_restart ( struct btree_trans * trans )
{
if ( trans - > restarted )
bch2_trans_in_restart_error ( trans ) ;
}
2022-07-18 02:35:38 +03:00
2021-07-26 00:19:52 +03:00
__always_inline
2023-07-07 05:47:42 +03:00
static int btree_trans_restart_nounlock ( struct btree_trans * trans , int err )
2021-07-26 00:19:52 +03:00
{
2022-07-18 06:06:38 +03:00
BUG_ON ( err < = 0 ) ;
2023-06-21 13:44:44 +03:00
BUG_ON ( ! bch2_err_matches ( - err , BCH_ERR_transaction_restart ) ) ;
2022-07-18 06:06:38 +03:00
trans - > restarted = err ;
2023-02-19 05:20:18 +03:00
trans - > last_restarted_ip = _THIS_IP_ ;
2022-07-18 06:06:38 +03:00
return - err ;
}
__always_inline
2023-07-07 05:47:42 +03:00
static int btree_trans_restart ( struct btree_trans * trans , int err )
2022-07-18 06:06:38 +03:00
{
btree_trans_restart_nounlock ( trans , err ) ;
return - err ;
2021-07-26 00:19:52 +03:00
}
2021-11-03 19:08:02 +03:00
bool bch2_btree_node_upgrade ( struct btree_trans * ,
struct btree_path * , unsigned ) ;
2022-07-14 09:58:23 +03:00
void __bch2_btree_path_downgrade ( struct btree_trans * , struct btree_path * , unsigned ) ;
2017-03-17 09:18:50 +03:00
2022-07-14 09:58:23 +03:00
static inline void bch2_btree_path_downgrade ( struct btree_trans * trans ,
struct btree_path * path )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
unsigned new_locks_want = path - > level + ! ! path - > intent_ref ;
2021-04-03 04:29:05 +03:00
2021-08-30 22:18:31 +03:00
if ( path - > locks_want > new_locks_want )
2022-07-14 09:58:23 +03:00
__bch2_btree_path_downgrade ( trans , path , new_locks_want ) ;
2017-03-17 09:18:50 +03:00
}
2020-06-08 20:26:48 +03:00
void bch2_trans_downgrade ( struct btree_trans * ) ;
2023-12-11 01:44:04 +03:00
void bch2_trans_node_add ( struct btree_trans * trans , struct btree_path * , struct btree * ) ;
2021-08-30 21:36:03 +03:00
void bch2_trans_node_reinit_iter ( struct btree_trans * , struct btree * ) ;
2017-03-17 09:18:50 +03:00
2021-09-07 22:34:16 +03:00
int __must_check __bch2_btree_iter_traverse ( struct btree_iter * iter ) ;
2021-03-24 04:22:50 +03:00
int __must_check bch2_btree_iter_traverse ( struct btree_iter * ) ;
2019-09-08 21:00:12 +03:00
2017-03-17 09:18:50 +03:00
struct btree * bch2_btree_iter_peek_node ( struct btree_iter * ) ;
2023-03-06 12:01:22 +03:00
struct btree * bch2_btree_iter_peek_node_and_restart ( struct btree_iter * ) ;
2020-02-19 00:17:55 +03:00
struct btree * bch2_btree_iter_next_node ( struct btree_iter * ) ;
2017-03-17 09:18:50 +03:00
2022-03-11 20:31:52 +03:00
struct bkey_s_c bch2_btree_iter_peek_upto ( struct btree_iter * , struct bpos ) ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c bch2_btree_iter_next ( struct btree_iter * ) ;
2019-09-08 00:17:21 +03:00
2022-03-11 20:31:52 +03:00
static inline struct bkey_s_c bch2_btree_iter_peek ( struct btree_iter * iter )
{
return bch2_btree_iter_peek_upto ( iter , SPOS_MAX ) ;
}
2019-09-08 00:17:21 +03:00
struct bkey_s_c bch2_btree_iter_peek_prev ( struct btree_iter * ) ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c bch2_btree_iter_prev ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_peek_slot ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_next_slot ( struct btree_iter * ) ;
2021-03-03 06:45:28 +03:00
struct bkey_s_c bch2_btree_iter_prev_slot ( struct btree_iter * ) ;
2017-03-17 09:18:50 +03:00
2021-03-21 23:55:25 +03:00
bool bch2_btree_iter_advance ( struct btree_iter * ) ;
bool bch2_btree_iter_rewind ( struct btree_iter * ) ;
2021-03-22 04:16:52 +03:00
2022-01-09 09:07:29 +03:00
static inline void __bch2_btree_iter_set_pos ( struct btree_iter * iter , struct bpos new_pos )
2021-03-22 04:16:52 +03:00
{
2021-04-04 04:09:13 +03:00
iter - > k . type = KEY_TYPE_deleted ;
iter - > k . p . inode = iter - > pos . inode = new_pos . inode ;
iter - > k . p . offset = iter - > pos . offset = new_pos . offset ;
iter - > k . p . snapshot = iter - > pos . snapshot = new_pos . snapshot ;
iter - > k . size = 0 ;
2021-03-22 04:16:52 +03:00
}
2017-03-17 09:18:50 +03:00
2022-01-09 09:07:29 +03:00
static inline void bch2_btree_iter_set_pos ( struct btree_iter * iter , struct bpos new_pos )
{
2023-12-04 08:39:38 +03:00
struct btree_trans * trans = iter - > trans ;
2022-01-09 05:22:31 +03:00
if ( unlikely ( iter - > update_path ) )
2023-12-04 08:39:38 +03:00
bch2_path_put ( trans , iter - > update_path ,
2022-01-09 05:22:31 +03:00
iter - > flags & BTREE_ITER_INTENT ) ;
2023-12-04 08:39:38 +03:00
iter - > update_path = 0 ;
2022-01-09 05:22:31 +03:00
2022-01-09 09:07:29 +03:00
if ( ! ( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS ) )
new_pos . snapshot = iter - > snapshot ;
__bch2_btree_iter_set_pos ( iter , new_pos ) ;
}
2021-06-15 01:16:10 +03:00
static inline void bch2_btree_iter_set_pos_to_extent_start ( struct btree_iter * iter )
{
BUG_ON ( ! ( iter - > flags & BTREE_ITER_IS_EXTENTS ) ) ;
iter - > pos = bkey_start_pos ( & iter - > k ) ;
}
2021-03-05 06:29:25 +03:00
static inline void bch2_btree_iter_set_snapshot ( struct btree_iter * iter , u32 snapshot )
{
struct bpos pos = iter - > pos ;
iter - > snapshot = snapshot ;
pos . snapshot = snapshot ;
bch2_btree_iter_set_pos ( iter , pos ) ;
}
2021-10-21 19:05:21 +03:00
void bch2_trans_iter_exit ( struct btree_trans * , struct btree_iter * ) ;
2022-11-25 08:40:27 +03:00
static inline unsigned __bch2_btree_iter_flags ( struct btree_trans * trans ,
unsigned btree_id ,
unsigned flags )
{
if ( ! ( flags & ( BTREE_ITER_ALL_SNAPSHOTS | BTREE_ITER_NOT_EXTENTS ) ) & &
2023-10-20 07:01:53 +03:00
btree_id_is_extents ( btree_id ) )
2022-11-25 08:40:27 +03:00
flags | = BTREE_ITER_IS_EXTENTS ;
if ( ! ( flags & __BTREE_ITER_ALL_SNAPSHOTS ) & &
2023-11-04 20:49:31 +03:00
! btree_type_has_snapshot_field ( btree_id ) )
2022-11-25 08:40:27 +03:00
flags & = ~ BTREE_ITER_ALL_SNAPSHOTS ;
if ( ! ( flags & BTREE_ITER_ALL_SNAPSHOTS ) & &
btree_type_has_snapshots ( btree_id ) )
flags | = BTREE_ITER_FILTER_SNAPSHOTS ;
if ( trans - > journal_replay_not_finished )
flags | = BTREE_ITER_WITH_JOURNAL ;
return flags ;
}
static inline unsigned bch2_btree_iter_flags ( struct btree_trans * trans ,
unsigned btree_id ,
unsigned flags )
{
if ( ! btree_id_cached ( trans - > c , btree_id ) ) {
flags & = ~ BTREE_ITER_CACHED ;
flags & = ~ BTREE_ITER_WITH_KEY_CACHE ;
} else if ( ! ( flags & BTREE_ITER_CACHED ) )
flags | = BTREE_ITER_WITH_KEY_CACHE ;
return __bch2_btree_iter_flags ( trans , btree_id , flags ) ;
}
static inline void bch2_trans_iter_init_common ( struct btree_trans * trans ,
struct btree_iter * iter ,
unsigned btree_id , struct bpos pos ,
unsigned locks_want ,
unsigned depth ,
2023-01-09 09:11:18 +03:00
unsigned flags ,
unsigned long ip )
2022-11-25 08:40:27 +03:00
{
2023-11-17 06:35:29 +03:00
iter - > trans = trans ;
2023-12-04 08:39:38 +03:00
iter - > update_path = 0 ;
iter - > key_cache_path = 0 ;
2023-11-17 06:35:29 +03:00
iter - > btree_id = btree_id ;
iter - > min_depth = 0 ;
iter - > flags = flags ;
iter - > snapshot = pos . snapshot ;
iter - > pos = pos ;
iter - > k = POS_KEY ( pos ) ;
iter - > journal_idx = 0 ;
2023-01-09 09:11:18 +03:00
# ifdef CONFIG_BCACHEFS_DEBUG
iter - > ip_allocated = ip ;
# endif
2023-12-04 08:39:38 +03:00
iter - > path = bch2_path_get ( trans , btree_id , iter - > pos ,
2023-01-09 09:11:18 +03:00
locks_want , depth , flags , ip ) ;
2022-11-25 08:40:27 +03:00
}
void bch2_trans_iter_init_outlined ( struct btree_trans * , struct btree_iter * ,
enum btree_id , struct bpos , unsigned ) ;
static inline void bch2_trans_iter_init ( struct btree_trans * trans ,
struct btree_iter * iter ,
unsigned btree_id , struct bpos pos ,
unsigned flags )
{
if ( __builtin_constant_p ( btree_id ) & &
__builtin_constant_p ( flags ) )
bch2_trans_iter_init_common ( trans , iter , btree_id , pos , 0 , 0 ,
2023-01-09 09:11:18 +03:00
bch2_btree_iter_flags ( trans , btree_id , flags ) ,
_THIS_IP_ ) ;
2022-11-25 08:40:27 +03:00
else
bch2_trans_iter_init_outlined ( trans , iter , btree_id , pos , flags ) ;
}
2021-10-21 19:05:21 +03:00
void bch2_trans_node_iter_init ( struct btree_trans * , struct btree_iter * ,
enum btree_id , struct bpos ,
unsigned , unsigned , unsigned ) ;
void bch2_trans_copy_iter ( struct btree_iter * , struct btree_iter * ) ;
static inline void set_btree_iter_dontneed ( struct btree_iter * iter )
{
2023-12-04 08:39:38 +03:00
struct btree_trans * trans = iter - > trans ;
if ( ! trans - > restarted )
btree_iter_path ( trans , iter ) - > preserve = false ;
2021-10-21 19:05:21 +03:00
}
2022-09-26 23:15:17 +03:00
void * __bch2_trans_kmalloc ( struct btree_trans * , size_t ) ;
static inline void * bch2_trans_kmalloc ( struct btree_trans * trans , size_t size )
{
2022-11-24 06:13:19 +03:00
size = roundup ( size , 8 ) ;
if ( likely ( trans - > mem_top + size < = trans - > mem_bytes ) ) {
void * p = trans - > mem + trans - > mem_top ;
2022-09-26 23:15:17 +03:00
trans - > mem_top + = size ;
memset ( p , 0 , size ) ;
return p ;
} else {
return __bch2_trans_kmalloc ( trans , size ) ;
2022-11-24 06:13:19 +03:00
}
}
static inline void * bch2_trans_kmalloc_nomemzero ( struct btree_trans * trans , size_t size )
{
2023-11-12 05:43:47 +03:00
size = round_up ( size , 8 ) ;
2022-09-26 23:15:17 +03:00
2022-11-24 06:13:19 +03:00
if ( likely ( trans - > mem_top + size < = trans - > mem_bytes ) ) {
void * p = trans - > mem + trans - > mem_top ;
trans - > mem_top + = size ;
return p ;
} else {
return __bch2_trans_kmalloc ( trans , size ) ;
2022-09-26 23:15:17 +03:00
}
}
2023-04-30 02:33:09 +03:00
static inline struct bkey_s_c __bch2_bkey_get_iter ( struct btree_trans * trans ,
struct btree_iter * iter ,
unsigned btree_id , struct bpos pos ,
unsigned flags , unsigned type )
{
struct bkey_s_c k ;
bch2_trans_iter_init ( trans , iter , btree_id , pos , flags ) ;
k = bch2_btree_iter_peek_slot ( iter ) ;
if ( ! bkey_err ( k ) & & type & & k . k - > type ! = type )
2023-05-28 02:59:59 +03:00
k = bkey_s_c_err ( - BCH_ERR_ENOENT_bkey_type_mismatch ) ;
2023-04-30 02:33:09 +03:00
if ( unlikely ( bkey_err ( k ) ) )
bch2_trans_iter_exit ( trans , iter ) ;
return k ;
}
static inline struct bkey_s_c bch2_bkey_get_iter ( struct btree_trans * trans ,
struct btree_iter * iter ,
unsigned btree_id , struct bpos pos ,
unsigned flags )
{
return __bch2_bkey_get_iter ( trans , iter , btree_id , pos , flags , 0 ) ;
}
# define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
bkey_s_c_to_ # # _type ( __bch2_bkey_get_iter ( _trans , _iter , \
_btree_id , _pos , _flags , KEY_TYPE_ # # _type ) )
static inline int __bch2_bkey_get_val_typed ( struct btree_trans * trans ,
unsigned btree_id , struct bpos pos ,
unsigned flags , unsigned type ,
unsigned val_size , void * val )
{
struct btree_iter iter ;
struct bkey_s_c k ;
int ret ;
k = __bch2_bkey_get_iter ( trans , & iter , btree_id , pos , flags , type ) ;
ret = bkey_err ( k ) ;
if ( ! ret ) {
unsigned b = min_t ( unsigned , bkey_val_bytes ( k . k ) , val_size ) ;
memcpy ( val , k . v , b ) ;
if ( unlikely ( b < sizeof ( * val ) ) )
memset ( ( void * ) val + b , 0 , sizeof ( * val ) - b ) ;
bch2_trans_iter_exit ( trans , & iter ) ;
}
return ret ;
}
# define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
__bch2_bkey_get_val_typed ( _trans , _btree_id , _pos , _flags , \
KEY_TYPE_ # # _type , sizeof ( * _val ) , _val )
2023-10-30 19:30:52 +03:00
void bch2_trans_srcu_unlock ( struct btree_trans * ) ;
2022-07-18 02:35:38 +03:00
u32 bch2_trans_begin ( struct btree_trans * ) ;
2021-10-21 19:05:21 +03:00
2022-11-16 06:48:03 +03:00
/*
* XXX
* this does not handle transaction restarts from bch2_btree_iter_next_node ( )
* correctly
*/
2021-10-19 21:20:50 +03:00
# define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
_locks_want , _depth , _flags , _b , _ret ) \
2021-08-30 22:18:31 +03:00
for ( bch2_trans_node_iter_init ( ( _trans ) , & ( _iter ) , ( _btree_id ) , \
2021-10-21 19:05:21 +03:00
_start , _locks_want , _depth , _flags ) ; \
2023-03-06 12:01:22 +03:00
( _b ) = bch2_btree_iter_peek_node_and_restart ( & ( _iter ) ) , \
2021-10-19 21:20:50 +03:00
! ( ( _ret ) = PTR_ERR_OR_ZERO ( _b ) ) & & ( _b ) ; \
2021-08-30 22:18:31 +03:00
( _b ) = bch2_btree_iter_next_node ( & ( _iter ) ) )
2017-03-17 09:18:50 +03:00
2019-03-25 22:10:15 +03:00
# define for_each_btree_node(_trans, _iter, _btree_id, _start, \
2021-10-19 21:20:50 +03:00
_flags , _b , _ret ) \
2019-03-25 22:10:15 +03:00
__for_each_btree_node ( _trans , _iter , _btree_id , _start , \
2021-10-19 21:20:50 +03:00
0 , 0 , _flags , _b , _ret )
2017-03-17 09:18:50 +03:00
2022-07-20 23:13:27 +03:00
static inline struct bkey_s_c bch2_btree_iter_peek_prev_type ( struct btree_iter * iter ,
unsigned flags )
{
return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot ( iter ) :
bch2_btree_iter_peek_prev ( iter ) ;
}
2022-01-09 09:07:29 +03:00
static inline struct bkey_s_c bch2_btree_iter_peek_type ( struct btree_iter * iter ,
2022-03-11 20:31:52 +03:00
unsigned flags )
2017-03-17 09:18:50 +03:00
{
2023-11-13 04:35:51 +03:00
return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot ( iter ) :
2022-04-13 01:04:08 +03:00
bch2_btree_iter_peek ( iter ) ;
2017-03-17 09:18:50 +03:00
}
2022-03-11 20:31:52 +03:00
static inline struct bkey_s_c bch2_btree_iter_peek_upto_type ( struct btree_iter * iter ,
struct bpos end ,
unsigned flags )
{
if ( ! ( flags & BTREE_ITER_SLOTS ) )
return bch2_btree_iter_peek_upto ( iter , end ) ;
2022-11-24 11:12:22 +03:00
if ( bkey_gt ( iter - > pos , end ) )
2022-03-11 20:31:52 +03:00
return bkey_s_c_null ;
return bch2_btree_iter_peek_slot ( iter ) ;
}
2023-11-27 01:02:06 +03:00
int __bch2_btree_trans_too_many_iters ( struct btree_trans * ) ;
2021-11-24 03:00:23 +03:00
static inline int btree_trans_too_many_iters ( struct btree_trans * trans )
{
2023-12-11 03:26:30 +03:00
if ( bitmap_weight ( trans - > paths_allocated , trans - > nr_paths ) > BTREE_ITER_INITIAL - 8 )
2023-11-27 01:02:06 +03:00
return __bch2_btree_trans_too_many_iters ( trans ) ;
2022-07-06 00:27:44 +03:00
return 0 ;
2021-11-24 03:00:23 +03:00
}
2023-12-08 08:10:25 +03:00
/*
* goto instead of loop , so that when used inside for_each_btree_key2 ( )
* break / continue work correctly
*/
2022-07-18 02:35:38 +03:00
# define lockrestart_do(_trans, _do) \
( { \
2023-12-08 08:10:25 +03:00
__label__ transaction_restart ; \
2022-07-07 07:37:46 +03:00
u32 _restart_count ; \
2023-09-13 01:41:22 +03:00
int _ret2 ; \
2023-12-08 08:10:25 +03:00
transaction_restart : \
_restart_count = bch2_trans_begin ( _trans ) ; \
_ret2 = ( _do ) ; \
2022-07-18 02:35:38 +03:00
\
2023-12-08 08:10:25 +03:00
if ( bch2_err_matches ( _ret2 , BCH_ERR_transaction_restart ) ) \
goto transaction_restart ; \
2022-07-18 02:35:38 +03:00
\
2023-09-13 01:41:22 +03:00
if ( ! _ret2 ) \
2022-07-07 07:37:46 +03:00
bch2_trans_verify_not_restarted ( _trans , _restart_count ) ; \
2023-09-13 01:41:22 +03:00
_ret2 ; \
2022-07-18 02:35:38 +03:00
} )
/*
* nested_lockrestart_do ( ) , nested_commit_do ( ) :
*
* These are like lockrestart_do ( ) and commit_do ( ) , with two differences :
*
* - We don ' t call bch2_trans_begin ( ) unless we had a transaction restart
2022-07-18 06:06:38 +03:00
* - We return - BCH_ERR_transaction_restart_nested if we succeeded after a
* transaction restart
2022-07-18 02:35:38 +03:00
*/
# define nested_lockrestart_do(_trans, _do) \
( { \
u32 _restart_count , _orig_restart_count ; \
2023-09-13 01:41:22 +03:00
int _ret2 ; \
2022-07-18 02:35:38 +03:00
\
_restart_count = _orig_restart_count = ( _trans ) - > restart_count ; \
\
2023-09-13 01:41:22 +03:00
while ( bch2_err_matches ( _ret2 = ( _do ) , BCH_ERR_transaction_restart ) ) \
2022-07-18 02:35:38 +03:00
_restart_count = bch2_trans_begin ( _trans ) ; \
\
2023-09-13 01:41:22 +03:00
if ( ! _ret2 ) \
2022-07-18 02:35:38 +03:00
bch2_trans_verify_not_restarted ( _trans , _restart_count ) ; \
\
2023-09-13 01:41:22 +03:00
_ret2 ? : trans_was_restarted ( _trans , _restart_count ) ; \
2022-07-18 02:35:38 +03:00
} )
2023-12-17 05:46:23 +03:00
# define for_each_btree_key_upto(_trans, _iter, _btree_id, \
_start , _end , _flags , _k , _do ) \
2022-10-11 11:32:41 +03:00
( { \
2023-12-17 06:30:09 +03:00
struct btree_iter _iter ; \
struct bkey_s_c _k ; \
2023-09-13 01:41:22 +03:00
int _ret3 = 0 ; \
2022-10-11 11:32:41 +03:00
\
bch2_trans_iter_init ( ( _trans ) , & ( _iter ) , ( _btree_id ) , \
( _start ) , ( _flags ) ) ; \
\
2023-12-08 08:10:25 +03:00
do { \
_ret3 = lockrestart_do ( _trans , ( { \
( _k ) = bch2_btree_iter_peek_upto_type ( & ( _iter ) , \
_end , ( _flags ) ) ; \
if ( ! ( _k ) . k ) \
break ; \
2022-10-11 11:32:41 +03:00
\
2023-12-08 08:10:25 +03:00
bkey_err ( _k ) ? : ( _do ) ; \
} ) ) ; \
} while ( ! _ret3 & & bch2_btree_iter_advance ( & ( _iter ) ) ) ; \
2022-10-11 11:32:41 +03:00
\
bch2_trans_iter_exit ( ( _trans ) , & ( _iter ) ) ; \
2023-09-13 01:41:22 +03:00
_ret3 ; \
2022-10-11 11:32:41 +03:00
} )
2023-12-08 07:33:11 +03:00
# define for_each_btree_key(_trans, _iter, _btree_id, \
_start , _flags , _k , _do ) \
2023-12-17 05:46:23 +03:00
for_each_btree_key_upto ( _trans , _iter , _btree_id , _start , \
2023-12-08 08:10:25 +03:00
SPOS_MAX , _flags , _k , _do )
2022-07-20 23:13:27 +03:00
# define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
_start , _flags , _k , _do ) \
( { \
2023-12-17 06:30:09 +03:00
struct btree_iter _iter ; \
struct bkey_s_c _k ; \
2023-09-13 01:41:22 +03:00
int _ret3 = 0 ; \
2022-07-20 23:13:27 +03:00
\
bch2_trans_iter_init ( ( _trans ) , & ( _iter ) , ( _btree_id ) , \
( _start ) , ( _flags ) ) ; \
\
2023-12-08 08:10:25 +03:00
do { \
_ret3 = lockrestart_do ( _trans , ( { \
( _k ) = bch2_btree_iter_peek_prev_type ( & ( _iter ) , \
( _flags ) ) ; \
if ( ! ( _k ) . k ) \
break ; \
2022-07-20 23:13:27 +03:00
\
2023-12-08 08:10:25 +03:00
bkey_err ( _k ) ? : ( _do ) ; \
} ) ) ; \
} while ( ! _ret3 & & bch2_btree_iter_rewind ( & ( _iter ) ) ) ; \
2022-07-16 03:51:09 +03:00
\
bch2_trans_iter_exit ( ( _trans ) , & ( _iter ) ) ; \
2023-09-13 01:41:22 +03:00
_ret3 ; \
2022-07-16 03:51:09 +03:00
} )
# define for_each_btree_key_commit(_trans, _iter, _btree_id, \
_start , _iter_flags , _k , \
_disk_res , _journal_seq , _commit_flags , \
_do ) \
2023-12-08 07:33:11 +03:00
for_each_btree_key ( _trans , _iter , _btree_id , _start , _iter_flags , _k , \
2022-07-16 03:51:09 +03:00
( _do ) ? : bch2_trans_commit ( _trans , ( _disk_res ) , \
( _journal_seq ) , ( _commit_flags ) ) )
2023-06-26 01:04:46 +03:00
# define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id, \
_start , _iter_flags , _k , \
_disk_res , _journal_seq , _commit_flags , \
_do ) \
for_each_btree_key_reverse ( _trans , _iter , _btree_id , _start , _iter_flags , _k , \
( _do ) ? : bch2_trans_commit ( _trans , ( _disk_res ) , \
( _journal_seq ) , ( _commit_flags ) ) )
2022-10-11 11:32:41 +03:00
# define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
_start , _end , _iter_flags , _k , \
_disk_res , _journal_seq , _commit_flags , \
_do ) \
2023-12-17 05:46:23 +03:00
for_each_btree_key_upto ( _trans , _iter , _btree_id , _start , _end , _iter_flags , _k , \
2022-10-11 11:32:41 +03:00
( _do ) ? : bch2_trans_commit ( _trans , ( _disk_res ) , \
( _journal_seq ) , ( _commit_flags ) ) )
2023-12-17 11:39:03 +03:00
struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined ( struct btree_iter * ) ;
static inline struct bkey_s_c
__bch2_btree_iter_peek_and_restart ( struct btree_trans * trans ,
struct btree_iter * iter , unsigned flags )
{
struct bkey_s_c k ;
while ( btree_trans_too_many_iters ( trans ) | |
( k = bch2_btree_iter_peek_type ( iter , flags ) ,
bch2_err_matches ( bkey_err ( k ) , BCH_ERR_transaction_restart ) ) )
bch2_trans_begin ( trans ) ;
return k ;
}
2023-12-17 05:46:23 +03:00
# define for_each_btree_key_old(_trans, _iter, _btree_id, \
2019-04-17 22:49:28 +03:00
_start , _flags , _k , _ret ) \
2021-08-30 22:18:31 +03:00
for ( bch2_trans_iter_init ( ( _trans ) , & ( _iter ) , ( _btree_id ) , \
2021-10-21 19:05:21 +03:00
( _start ) , ( _flags ) ) ; \
( _k ) = __bch2_btree_iter_peek_and_restart ( ( _trans ) , & ( _iter ) , _flags ) , \
2021-03-05 06:11:28 +03:00
! ( ( _ret ) = bkey_err ( _k ) ) & & ( _k ) . k ; \
2021-10-21 19:05:21 +03:00
bch2_btree_iter_advance ( & ( _iter ) ) )
2017-03-17 09:18:50 +03:00
2022-03-11 20:31:52 +03:00
# define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
_start , _end , _flags , _k , _ret ) \
for ( bch2_trans_iter_init ( ( _trans ) , & ( _iter ) , ( _btree_id ) , \
( _start ) , ( _flags ) ) ; \
( _k ) = bch2_btree_iter_peek_upto_type ( & ( _iter ) , _end , _flags ) , \
! ( ( _ret ) = bkey_err ( _k ) ) & & ( _k ) . k ; \
bch2_btree_iter_advance ( & ( _iter ) ) )
2017-03-17 09:18:50 +03:00
2022-10-11 11:32:41 +03:00
# define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
for ( ; \
( _k ) = bch2_btree_iter_peek_upto_type ( & ( _iter ) , _end , _flags ) , \
! ( ( _ret ) = bkey_err ( _k ) ) & & ( _k ) . k ; \
bch2_btree_iter_advance ( & ( _iter ) ) )
2023-12-17 05:55:12 +03:00
# define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
_start , _flags , _k , _ret ) \
for_each_btree_key_upto_norestart ( _trans , _iter , _btree_id , _start , \
SPOS_MAX , _flags , _k , _ret )
# define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
for_each_btree_key_upto_continue_norestart ( _iter , SPOS_MAX , _flags , _k , _ret )
2024-01-16 04:37:23 +03:00
/*
* This should not be used in a fastpath , without first trying _do in
* nonblocking mode - it will cause excessive transaction restarts and
* potentially livelocking :
*/
2023-05-29 01:06:27 +03:00
# define drop_locks_do(_trans, _do) \
( { \
bch2_trans_unlock ( _trans ) ; \
_do ? : bch2_trans_relock ( _trans ) ; \
} )
2023-05-28 10:44:38 +03:00
# define allocate_dropping_locks_errcode(_trans, _do) \
( { \
gfp_t _gfp = GFP_NOWAIT | __GFP_NOWARN ; \
int _ret = _do ; \
\
if ( bch2_err_matches ( _ret , ENOMEM ) ) { \
_gfp = GFP_KERNEL ; \
_ret = drop_locks_do ( trans , _do ) ; \
} \
_ret ; \
} )
# define allocate_dropping_locks(_trans, _ret, _do) \
( { \
gfp_t _gfp = GFP_NOWAIT | __GFP_NOWARN ; \
typeof ( _do ) _p = _do ; \
\
_ret = 0 ; \
if ( unlikely ( ! _p ) ) { \
_gfp = GFP_KERNEL ; \
_ret = drop_locks_do ( trans , ( ( _p = _do ) , 0 ) ) ; \
} \
_p ; \
} )
2022-03-12 02:38:24 +03:00
void bch2_trans_updates_to_text ( struct printbuf * , struct btree_trans * ) ;
2022-08-12 03:14:54 +03:00
void bch2_trans_paths_to_text ( struct printbuf * , struct btree_trans * ) ;
2022-03-03 06:18:56 +03:00
void bch2_dump_trans_updates ( struct btree_trans * ) ;
2021-10-21 19:05:21 +03:00
void bch2_dump_trans_paths_updates ( struct btree_trans * ) ;
2023-09-13 00:16:02 +03:00
struct btree_trans * __bch2_trans_get ( struct bch_fs * , unsigned ) ;
void bch2_trans_put ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
2022-10-17 14:03:11 +03:00
extern const char * bch2_btree_transaction_fns [ BCH_TRANSACTIONS_NR ] ;
unsigned bch2_trans_get_fn_idx ( const char * ) ;
2023-09-13 00:16:02 +03:00
# define bch2_trans_get(_c) \
( { \
2022-10-17 14:03:11 +03:00
static unsigned trans_fn_idx ; \
\
if ( unlikely ( ! trans_fn_idx ) ) \
trans_fn_idx = bch2_trans_get_fn_idx ( __func__ ) ; \
2023-09-13 00:16:02 +03:00
__bch2_trans_get ( _c , trans_fn_idx ) ; \
} )
2022-01-04 08:33:52 +03:00
2022-06-18 03:12:02 +03:00
void bch2_btree_trans_to_text ( struct printbuf * , struct btree_trans * ) ;
2020-06-02 23:36:11 +03:00
2019-09-07 21:16:00 +03:00
void bch2_fs_btree_iter_exit ( struct bch_fs * ) ;
2023-12-14 22:06:41 +03:00
void bch2_fs_btree_iter_init_early ( struct bch_fs * ) ;
2019-09-07 21:16:00 +03:00
int bch2_fs_btree_iter_init ( struct bch_fs * ) ;
2017-03-17 09:18:50 +03:00
# endif /* _BCACHEFS_BTREE_ITER_H */