2017-03-17 09:18:50 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_ITER_H
# define _BCACHEFS_BTREE_ITER_H
2019-01-14 00:02:22 +03:00
# include "bset.h"
2017-03-17 09:18:50 +03:00
# include "btree_types.h"
static inline void btree_iter_set_dirty ( struct btree_iter * iter ,
enum btree_iter_uptodate u )
{
iter - > uptodate = max_t ( unsigned , iter - > uptodate , u ) ;
}
static inline struct btree * btree_iter_node ( struct btree_iter * iter ,
unsigned level )
{
return level < BTREE_MAX_DEPTH ? iter - > l [ level ] . b : NULL ;
}
2020-06-06 19:28:01 +03:00
static inline bool btree_node_lock_seq_matches ( const struct btree_iter * iter ,
const struct btree * b , unsigned level )
{
/*
* We don ' t compare the low bits of the lock sequence numbers because
* @ iter might have taken a write lock on @ b , and we don ' t want to skip
* the linked iterator if the sequence numbers were equal before taking
* that write lock . The lock sequence number is incremented by taking
* and releasing write locks and is even when unlocked :
*/
return iter - > l [ level ] . lock_seq > > 1 = = b - > c . lock . state . seq > > 1 ;
}
2017-03-17 09:18:50 +03:00
static inline struct btree * btree_node_parent ( struct btree_iter * iter ,
struct btree * b )
{
2020-06-06 19:28:01 +03:00
return btree_iter_node ( iter , b - > c . level + 1 ) ;
2017-03-17 09:18:50 +03:00
}
2019-03-28 05:03:30 +03:00
static inline bool btree_trans_has_multiple_iters ( const struct btree_trans * trans )
{
return hweight64 ( trans - > iters_linked ) > 1 ;
}
static inline int btree_iter_err ( const struct btree_iter * iter )
{
return iter - > flags & BTREE_ITER_ERROR ? - EIO : 0 ;
}
/* Iterate over iters within a transaction: */
static inline struct btree_iter *
2019-03-28 06:14:38 +03:00
__trans_next_iter ( struct btree_trans * trans , unsigned idx )
2019-03-28 05:03:30 +03:00
{
2020-12-09 21:34:42 +03:00
u64 l ;
if ( idx = = BTREE_ITER_MAX )
return NULL ;
l = trans - > iters_linked > > idx ;
2020-12-02 07:11:53 +03:00
if ( ! l )
return NULL ;
2019-03-28 05:03:30 +03:00
2020-12-02 07:11:53 +03:00
idx + = __ffs64 ( l ) ;
2020-12-09 21:34:42 +03:00
EBUG_ON ( idx > = BTREE_ITER_MAX ) ;
2020-12-02 07:11:53 +03:00
EBUG_ON ( trans - > iters [ idx ] . idx ! = idx ) ;
return & trans - > iters [ idx ] ;
2019-03-28 05:03:30 +03:00
}
# define trans_for_each_iter(_trans, _iter) \
2019-03-28 06:14:38 +03:00
for ( _iter = __trans_next_iter ( ( _trans ) , 0 ) ; \
( _iter ) ; \
_iter = __trans_next_iter ( ( _trans ) , ( _iter ) - > idx + 1 ) )
2019-03-28 05:03:30 +03:00
2017-03-17 09:18:50 +03:00
static inline bool __iter_has_node ( const struct btree_iter * iter ,
const struct btree * b )
{
2020-06-06 19:28:01 +03:00
return iter - > l [ b - > c . level ] . b = = b & &
btree_node_lock_seq_matches ( iter , b , b - > c . level ) ;
2017-03-17 09:18:50 +03:00
}
static inline struct btree_iter *
2019-03-28 05:03:30 +03:00
__trans_next_iter_with_node ( struct btree_trans * trans , struct btree * b ,
2019-03-28 06:14:38 +03:00
unsigned idx )
2017-03-17 09:18:50 +03:00
{
2019-05-14 21:08:23 +03:00
struct btree_iter * iter = __trans_next_iter ( trans , idx ) ;
2019-03-28 05:03:30 +03:00
2019-05-14 21:08:23 +03:00
while ( iter & & ! __iter_has_node ( iter , b ) )
iter = __trans_next_iter ( trans , iter - > idx + 1 ) ;
2019-03-28 05:03:30 +03:00
2019-05-14 21:08:23 +03:00
return iter ;
2017-03-17 09:18:50 +03:00
}
2019-03-28 05:03:30 +03:00
# define trans_for_each_iter_with_node(_trans, _b, _iter) \
2019-03-28 06:14:38 +03:00
for ( _iter = __trans_next_iter_with_node ( ( _trans ) , ( _b ) , 0 ) ; \
( _iter ) ; \
_iter = __trans_next_iter_with_node ( ( _trans ) , ( _b ) , \
( _iter ) - > idx + 1 ) )
2017-03-17 09:18:50 +03:00
# ifdef CONFIG_BCACHEFS_DEBUG
2020-02-19 00:17:55 +03:00
void bch2_btree_trans_verify_iters ( struct btree_trans * , struct btree * ) ;
2019-03-28 05:03:30 +03:00
void bch2_btree_trans_verify_locks ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
# else
2020-02-19 00:17:55 +03:00
static inline void bch2_btree_trans_verify_iters ( struct btree_trans * trans ,
struct btree * b ) { }
2019-03-28 05:03:30 +03:00
static inline void bch2_btree_trans_verify_locks ( struct btree_trans * iter ) { }
2017-03-17 09:18:50 +03:00
# endif
2019-10-02 16:56:39 +03:00
void bch2_btree_iter_fix_key_modified ( struct btree_iter * , struct btree * ,
struct bkey_packed * ) ;
2017-03-17 09:18:50 +03:00
void bch2_btree_node_iter_fix ( struct btree_iter * , struct btree * ,
2018-08-12 02:12:05 +03:00
struct btree_node_iter * , struct bkey_packed * ,
unsigned , unsigned ) ;
2017-03-17 09:18:50 +03:00
2019-03-08 03:46:10 +03:00
bool bch2_btree_iter_relock ( struct btree_iter * , bool ) ;
2019-05-15 16:47:40 +03:00
bool bch2_trans_relock ( struct btree_trans * ) ;
void bch2_trans_unlock ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
bool __bch2_btree_iter_upgrade ( struct btree_iter * , unsigned ) ;
bool __bch2_btree_iter_upgrade_nounlock ( struct btree_iter * , unsigned ) ;
static inline bool bch2_btree_iter_upgrade ( struct btree_iter * iter ,
2019-05-11 00:09:42 +03:00
unsigned new_locks_want )
2017-03-17 09:18:50 +03:00
{
new_locks_want = min ( new_locks_want , BTREE_MAX_DEPTH ) ;
return iter - > locks_want < new_locks_want
2019-05-11 00:15:30 +03:00
? ( ! iter - > trans - > nounlock
2017-03-17 09:18:50 +03:00
? __bch2_btree_iter_upgrade ( iter , new_locks_want )
: __bch2_btree_iter_upgrade_nounlock ( iter , new_locks_want ) )
: iter - > uptodate < = BTREE_ITER_NEED_PEEK ;
}
void __bch2_btree_iter_downgrade ( struct btree_iter * , unsigned ) ;
static inline void bch2_btree_iter_downgrade ( struct btree_iter * iter )
{
if ( iter - > locks_want > ( iter - > flags & BTREE_ITER_INTENT ) ? 1 : 0 )
__bch2_btree_iter_downgrade ( iter , 0 ) ;
}
2020-06-08 20:26:48 +03:00
void bch2_trans_downgrade ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
void bch2_btree_iter_node_replace ( struct btree_iter * , struct btree * ) ;
void bch2_btree_iter_node_drop ( struct btree_iter * , struct btree * ) ;
void bch2_btree_iter_reinit_node ( struct btree_iter * , struct btree * ) ;
2019-09-08 21:00:12 +03:00
int __must_check __bch2_btree_iter_traverse ( struct btree_iter * ) ;
static inline int __must_check
bch2_btree_iter_traverse ( struct btree_iter * iter )
{
return iter - > uptodate > = BTREE_ITER_NEED_RELOCK
? __bch2_btree_iter_traverse ( iter )
: 0 ;
}
2019-03-28 07:07:24 +03:00
int bch2_btree_iter_traverse_all ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
struct btree * bch2_btree_iter_peek_node ( struct btree_iter * ) ;
2020-02-19 00:17:55 +03:00
struct btree * bch2_btree_iter_next_node ( struct btree_iter * ) ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c bch2_btree_iter_peek ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_next ( struct btree_iter * ) ;
2019-09-08 00:17:21 +03:00
2020-03-06 02:44:59 +03:00
struct bkey_s_c bch2_btree_iter_peek_with_updates ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_next_with_updates ( struct btree_iter * ) ;
2019-09-08 00:17:21 +03:00
struct bkey_s_c bch2_btree_iter_peek_prev ( struct btree_iter * ) ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c bch2_btree_iter_prev ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_peek_slot ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_next_slot ( struct btree_iter * ) ;
2019-03-08 03:46:10 +03:00
struct bkey_s_c bch2_btree_iter_peek_cached ( struct btree_iter * ) ;
2020-01-31 21:23:18 +03:00
void __bch2_btree_iter_set_pos ( struct btree_iter * , struct bpos , bool ) ;
2017-03-17 09:18:50 +03:00
void bch2_btree_iter_set_pos ( struct btree_iter * , struct bpos ) ;
2020-10-26 21:45:20 +03:00
/* Sort order for locking btree iterators: */
static inline int btree_iter_lock_cmp ( const struct btree_iter * l ,
const struct btree_iter * r )
2017-03-17 09:18:50 +03:00
{
2019-03-08 03:46:10 +03:00
return cmp_int ( l - > btree_id , r - > btree_id ) ? :
2020-11-06 04:49:08 +03:00
- cmp_int ( btree_iter_is_cached ( l ) , btree_iter_is_cached ( r ) ) ? :
2019-03-08 03:46:10 +03:00
bkey_cmp ( l - > pos , r - > pos ) ;
2017-03-17 09:18:50 +03:00
}
/*
* Unlocks before scheduling
* Note : does not revalidate iterator
*/
2019-05-15 16:53:27 +03:00
static inline int bch2_trans_cond_resched ( struct btree_trans * trans )
2017-03-17 09:18:50 +03:00
{
2019-05-15 16:53:27 +03:00
if ( need_resched ( ) | | race_fault ( ) ) {
2019-03-26 00:06:42 +03:00
bch2_trans_unlock ( trans ) ;
2017-03-17 09:18:50 +03:00
schedule ( ) ;
2019-05-15 16:53:27 +03:00
return bch2_trans_relock ( trans ) ? 0 : - EINTR ;
} else {
return 0 ;
2017-03-17 09:18:50 +03:00
}
}
2019-03-25 22:10:15 +03:00
# define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
2017-03-17 09:18:50 +03:00
_locks_want , _depth , _flags , _b ) \
2019-03-25 22:10:15 +03:00
for ( iter = bch2_trans_get_node_iter ( ( _trans ) , ( _btree_id ) , \
_start , _locks_want , _depth , _flags ) , \
2017-03-17 09:18:50 +03:00
_b = bch2_btree_iter_peek_node ( _iter ) ; \
( _b ) ; \
2020-02-19 00:17:55 +03:00
( _b ) = bch2_btree_iter_next_node ( _iter ) )
2017-03-17 09:18:50 +03:00
2019-03-25 22:10:15 +03:00
# define for_each_btree_node(_trans, _iter, _btree_id, _start, \
_flags , _b ) \
__for_each_btree_node ( _trans , _iter , _btree_id , _start , \
0 , 0 , _flags , _b )
2017-03-17 09:18:50 +03:00
static inline struct bkey_s_c __bch2_btree_iter_peek ( struct btree_iter * iter ,
unsigned flags )
{
2019-03-08 03:46:10 +03:00
if ( ( flags & BTREE_ITER_TYPE ) = = BTREE_ITER_CACHED )
return bch2_btree_iter_peek_cached ( iter ) ;
else
return flags & BTREE_ITER_SLOTS
? bch2_btree_iter_peek_slot ( iter )
: bch2_btree_iter_peek ( iter ) ;
2017-03-17 09:18:50 +03:00
}
static inline struct bkey_s_c __bch2_btree_iter_next ( struct btree_iter * iter ,
unsigned flags )
{
return flags & BTREE_ITER_SLOTS
? bch2_btree_iter_next_slot ( iter )
: bch2_btree_iter_next ( iter ) ;
}
2019-09-25 22:57:56 +03:00
static inline int bkey_err ( struct bkey_s_c k )
{
return PTR_ERR_OR_ZERO ( k . k ) ;
}
2019-04-17 22:49:28 +03:00
# define for_each_btree_key(_trans, _iter, _btree_id, \
_start , _flags , _k , _ret ) \
2020-12-02 07:11:53 +03:00
for ( ( _iter ) = bch2_trans_get_iter ( ( _trans ) , ( _btree_id ) , \
( _start ) , ( _flags ) ) , \
( _ret ) = PTR_ERR_OR_ZERO ( ( ( _k ) = \
2019-04-17 22:49:28 +03:00
__bch2_btree_iter_peek ( _iter , _flags ) ) . k ) ; \
2019-08-10 00:12:37 +03:00
! _ret & & ( _k ) . k ; \
2019-04-17 22:49:28 +03:00
( _ret ) = PTR_ERR_OR_ZERO ( ( ( _k ) = \
__bch2_btree_iter_next ( _iter , _flags ) ) . k ) )
2017-03-17 09:18:50 +03:00
2019-09-25 22:57:56 +03:00
# define for_each_btree_key_continue(_iter, _flags, _k, _ret) \
2017-03-17 09:18:50 +03:00
for ( ( _k ) = __bch2_btree_iter_peek ( _iter , _flags ) ; \
2019-09-25 22:57:56 +03:00
! ( ( _ret ) = bkey_err ( _k ) ) & & ( _k ) . k ; \
2017-03-17 09:18:50 +03:00
( _k ) = __bch2_btree_iter_next ( _iter , _flags ) )
/* new multiple iterator interface: */
2019-03-25 22:10:15 +03:00
int bch2_trans_iter_put ( struct btree_trans * , struct btree_iter * ) ;
int bch2_trans_iter_free ( struct btree_trans * , struct btree_iter * ) ;
2019-03-25 22:34:48 +03:00
2019-09-27 05:21:39 +03:00
void bch2_trans_unlink_iters ( struct btree_trans * ) ;
2017-03-17 09:18:50 +03:00
2020-04-02 00:14:14 +03:00
struct btree_iter * __bch2_trans_get_iter ( struct btree_trans * , enum btree_id ,
struct bpos , unsigned ) ;
static inline struct btree_iter *
bch2_trans_get_iter ( struct btree_trans * trans , enum btree_id btree_id ,
struct bpos pos , unsigned flags )
{
struct btree_iter * iter =
__bch2_trans_get_iter ( trans , btree_id , pos , flags ) ;
2020-12-02 07:11:53 +03:00
iter - > ip_allocated = _THIS_IP_ ;
2020-04-02 00:14:14 +03:00
return iter ;
}
struct btree_iter * __bch2_trans_copy_iter ( struct btree_trans * ,
2019-03-26 05:43:26 +03:00
struct btree_iter * ) ;
2020-04-02 00:14:14 +03:00
static inline struct btree_iter *
bch2_trans_copy_iter ( struct btree_trans * trans , struct btree_iter * src )
{
struct btree_iter * iter =
__bch2_trans_copy_iter ( trans , src ) ;
2020-12-02 07:11:53 +03:00
iter - > ip_allocated = _THIS_IP_ ;
2020-04-02 00:14:14 +03:00
return iter ;
}
2019-09-27 05:21:39 +03:00
struct btree_iter * bch2_trans_get_node_iter ( struct btree_trans * ,
enum btree_id , struct bpos ,
unsigned , unsigned , unsigned ) ;
2017-03-17 09:18:50 +03:00
2020-02-26 23:39:46 +03:00
# define TRANS_RESET_NOTRAVERSE (1 << 0)
2017-03-17 09:18:50 +03:00
2019-09-27 05:21:39 +03:00
void bch2_trans_reset ( struct btree_trans * , unsigned ) ;
2017-03-17 09:18:50 +03:00
2019-09-27 05:21:39 +03:00
static inline void bch2_trans_begin ( struct btree_trans * trans )
2017-03-17 09:18:50 +03:00
{
2020-02-26 23:39:46 +03:00
return bch2_trans_reset ( trans , 0 ) ;
2017-03-17 09:18:50 +03:00
}
void * bch2_trans_kmalloc ( struct btree_trans * , size_t ) ;
2019-05-15 17:54:43 +03:00
void bch2_trans_init ( struct btree_trans * , struct bch_fs * , unsigned , size_t ) ;
2017-03-17 09:18:50 +03:00
int bch2_trans_exit ( struct btree_trans * ) ;
2020-06-02 23:36:11 +03:00
void bch2_btree_trans_to_text ( struct printbuf * , struct bch_fs * ) ;
2019-09-07 21:16:00 +03:00
void bch2_fs_btree_iter_exit ( struct bch_fs * ) ;
int bch2_fs_btree_iter_init ( struct bch_fs * ) ;
2017-03-17 09:18:50 +03:00
# endif /* _BCACHEFS_BTREE_ITER_H */