2017-03-16 22:18:50 -08:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_ITER_H
# define _BCACHEFS_BTREE_ITER_H
2019-01-13 16:02:22 -05:00
# include "bset.h"
2017-03-16 22:18:50 -08:00
# include "btree_types.h"
static inline void btree_iter_set_dirty ( struct btree_iter * iter ,
enum btree_iter_uptodate u )
{
iter - > uptodate = max_t ( unsigned , iter - > uptodate , u ) ;
}
static inline struct btree * btree_iter_node ( struct btree_iter * iter ,
unsigned level )
{
return level < BTREE_MAX_DEPTH ? iter - > l [ level ] . b : NULL ;
}
2020-06-06 12:28:01 -04:00
static inline bool btree_node_lock_seq_matches ( const struct btree_iter * iter ,
const struct btree * b , unsigned level )
{
/*
* We don ' t compare the low bits of the lock sequence numbers because
* @ iter might have taken a write lock on @ b , and we don ' t want to skip
* the linked iterator if the sequence numbers were equal before taking
* that write lock . The lock sequence number is incremented by taking
* and releasing write locks and is even when unlocked :
*/
return iter - > l [ level ] . lock_seq > > 1 = = b - > c . lock . state . seq > > 1 ;
}
2017-03-16 22:18:50 -08:00
static inline struct btree * btree_node_parent ( struct btree_iter * iter ,
struct btree * b )
{
2020-06-06 12:28:01 -04:00
return btree_iter_node ( iter , b - > c . level + 1 ) ;
2017-03-16 22:18:50 -08:00
}
2019-03-27 22:03:30 -04:00
static inline bool btree_trans_has_multiple_iters ( const struct btree_trans * trans )
{
return hweight64 ( trans - > iters_linked ) > 1 ;
}
static inline int btree_iter_err ( const struct btree_iter * iter )
{
return iter - > flags & BTREE_ITER_ERROR ? - EIO : 0 ;
}
/* Iterate over iters within a transaction: */
2019-10-19 19:03:23 -04:00
# define trans_for_each_iter_all(_trans, _iter) \
for ( _iter = ( _trans ) - > iters ; \
_iter < ( _trans ) - > iters + ( _trans ) - > nr_iters ; \
_iter + + )
2019-03-27 22:03:30 -04:00
static inline struct btree_iter *
2019-03-27 23:14:38 -04:00
__trans_next_iter ( struct btree_trans * trans , unsigned idx )
2019-03-27 22:03:30 -04:00
{
2019-03-27 23:14:38 -04:00
EBUG_ON ( idx < trans - > nr_iters & & trans - > iters [ idx ] . idx ! = idx ) ;
2019-03-27 22:03:30 -04:00
2019-03-27 23:14:38 -04:00
for ( ; idx < trans - > nr_iters ; idx + + )
2019-03-27 22:03:30 -04:00
if ( trans - > iters_linked & ( 1ULL < < idx ) )
return & trans - > iters [ idx ] ;
return NULL ;
}
# define trans_for_each_iter(_trans, _iter) \
2019-03-27 23:14:38 -04:00
for ( _iter = __trans_next_iter ( ( _trans ) , 0 ) ; \
( _iter ) ; \
_iter = __trans_next_iter ( ( _trans ) , ( _iter ) - > idx + 1 ) )
2019-03-27 22:03:30 -04:00
2017-03-16 22:18:50 -08:00
static inline bool __iter_has_node ( const struct btree_iter * iter ,
const struct btree * b )
{
2020-06-06 12:28:01 -04:00
return iter - > l [ b - > c . level ] . b = = b & &
btree_node_lock_seq_matches ( iter , b , b - > c . level ) ;
2017-03-16 22:18:50 -08:00
}
static inline struct btree_iter *
2019-03-27 22:03:30 -04:00
__trans_next_iter_with_node ( struct btree_trans * trans , struct btree * b ,
2019-03-27 23:14:38 -04:00
unsigned idx )
2017-03-16 22:18:50 -08:00
{
2019-05-14 14:08:23 -04:00
struct btree_iter * iter = __trans_next_iter ( trans , idx ) ;
2019-03-27 22:03:30 -04:00
2019-05-14 14:08:23 -04:00
while ( iter & & ! __iter_has_node ( iter , b ) )
iter = __trans_next_iter ( trans , iter - > idx + 1 ) ;
2019-03-27 22:03:30 -04:00
2019-05-14 14:08:23 -04:00
return iter ;
2017-03-16 22:18:50 -08:00
}
2019-03-27 22:03:30 -04:00
# define trans_for_each_iter_with_node(_trans, _b, _iter) \
2019-03-27 23:14:38 -04:00
for ( _iter = __trans_next_iter_with_node ( ( _trans ) , ( _b ) , 0 ) ; \
( _iter ) ; \
_iter = __trans_next_iter_with_node ( ( _trans ) , ( _b ) , \
( _iter ) - > idx + 1 ) )
2017-03-16 22:18:50 -08:00
# ifdef CONFIG_BCACHEFS_DEBUG
2020-02-18 16:17:55 -05:00
void bch2_btree_trans_verify_iters ( struct btree_trans * , struct btree * ) ;
2019-03-27 22:03:30 -04:00
void bch2_btree_trans_verify_locks ( struct btree_trans * ) ;
2017-03-16 22:18:50 -08:00
# else
2020-02-18 16:17:55 -05:00
static inline void bch2_btree_trans_verify_iters ( struct btree_trans * trans ,
struct btree * b ) { }
2019-03-27 22:03:30 -04:00
static inline void bch2_btree_trans_verify_locks ( struct btree_trans * iter ) { }
2017-03-16 22:18:50 -08:00
# endif
2019-10-02 09:56:39 -04:00
void bch2_btree_iter_fix_key_modified ( struct btree_iter * , struct btree * ,
struct bkey_packed * ) ;
2017-03-16 22:18:50 -08:00
void bch2_btree_node_iter_fix ( struct btree_iter * , struct btree * ,
2018-08-11 19:12:05 -04:00
struct btree_node_iter * , struct bkey_packed * ,
unsigned , unsigned ) ;
2017-03-16 22:18:50 -08:00
2019-03-07 19:46:10 -05:00
bool bch2_btree_iter_relock ( struct btree_iter * , bool ) ;
2019-05-15 09:47:40 -04:00
bool bch2_trans_relock ( struct btree_trans * ) ;
void bch2_trans_unlock ( struct btree_trans * ) ;
2017-03-16 22:18:50 -08:00
bool __bch2_btree_iter_upgrade ( struct btree_iter * , unsigned ) ;
bool __bch2_btree_iter_upgrade_nounlock ( struct btree_iter * , unsigned ) ;
static inline bool bch2_btree_iter_upgrade ( struct btree_iter * iter ,
2019-05-10 17:09:42 -04:00
unsigned new_locks_want )
2017-03-16 22:18:50 -08:00
{
new_locks_want = min ( new_locks_want , BTREE_MAX_DEPTH ) ;
return iter - > locks_want < new_locks_want
2019-05-10 17:15:30 -04:00
? ( ! iter - > trans - > nounlock
2017-03-16 22:18:50 -08:00
? __bch2_btree_iter_upgrade ( iter , new_locks_want )
: __bch2_btree_iter_upgrade_nounlock ( iter , new_locks_want ) )
: iter - > uptodate < = BTREE_ITER_NEED_PEEK ;
}
void __bch2_btree_iter_downgrade ( struct btree_iter * , unsigned ) ;
static inline void bch2_btree_iter_downgrade ( struct btree_iter * iter )
{
if ( iter - > locks_want > ( iter - > flags & BTREE_ITER_INTENT ) ? 1 : 0 )
__bch2_btree_iter_downgrade ( iter , 0 ) ;
}
2020-06-08 13:26:48 -04:00
void bch2_trans_downgrade ( struct btree_trans * ) ;
2017-03-16 22:18:50 -08:00
void bch2_btree_iter_node_replace ( struct btree_iter * , struct btree * ) ;
void bch2_btree_iter_node_drop ( struct btree_iter * , struct btree * ) ;
void bch2_btree_iter_reinit_node ( struct btree_iter * , struct btree * ) ;
2019-09-08 14:00:12 -04:00
int __must_check __bch2_btree_iter_traverse ( struct btree_iter * ) ;
static inline int __must_check
bch2_btree_iter_traverse ( struct btree_iter * iter )
{
return iter - > uptodate > = BTREE_ITER_NEED_RELOCK
? __bch2_btree_iter_traverse ( iter )
: 0 ;
}
2019-03-28 00:07:24 -04:00
int bch2_btree_iter_traverse_all ( struct btree_trans * ) ;
2017-03-16 22:18:50 -08:00
struct btree * bch2_btree_iter_peek_node ( struct btree_iter * ) ;
2020-02-18 16:17:55 -05:00
struct btree * bch2_btree_iter_next_node ( struct btree_iter * ) ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c bch2_btree_iter_peek ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_next ( struct btree_iter * ) ;
2019-09-07 17:17:21 -04:00
2020-03-05 18:44:59 -05:00
struct bkey_s_c bch2_btree_iter_peek_with_updates ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_next_with_updates ( struct btree_iter * ) ;
2019-09-07 17:17:21 -04:00
struct bkey_s_c bch2_btree_iter_peek_prev ( struct btree_iter * ) ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c bch2_btree_iter_prev ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_peek_slot ( struct btree_iter * ) ;
struct bkey_s_c bch2_btree_iter_next_slot ( struct btree_iter * ) ;
2019-03-07 19:46:10 -05:00
struct bkey_s_c bch2_btree_iter_peek_cached ( struct btree_iter * ) ;
2017-03-16 22:18:50 -08:00
void bch2_btree_iter_set_pos_same_leaf ( struct btree_iter * , struct bpos ) ;
2020-01-31 13:23:18 -05:00
void __bch2_btree_iter_set_pos ( struct btree_iter * , struct bpos , bool ) ;
2017-03-16 22:18:50 -08:00
void bch2_btree_iter_set_pos ( struct btree_iter * , struct bpos ) ;
2020-10-26 14:45:20 -04:00
/* Sort order for locking btree iterators: */
static inline int btree_iter_lock_cmp ( const struct btree_iter * l ,
const struct btree_iter * r )
2017-03-16 22:18:50 -08:00
{
2019-03-07 19:46:10 -05:00
return cmp_int ( l - > btree_id , r - > btree_id ) ? :
2020-11-05 20:49:08 -05:00
- cmp_int ( btree_iter_is_cached ( l ) , btree_iter_is_cached ( r ) ) ? :
2019-03-07 19:46:10 -05:00
bkey_cmp ( l - > pos , r - > pos ) ;
2017-03-16 22:18:50 -08:00
}
/*
* Unlocks before scheduling
* Note : does not revalidate iterator
*/
2019-05-15 09:53:27 -04:00
static inline int bch2_trans_cond_resched ( struct btree_trans * trans )
2017-03-16 22:18:50 -08:00
{
2019-05-15 09:53:27 -04:00
if ( need_resched ( ) | | race_fault ( ) ) {
2019-03-25 17:06:42 -04:00
bch2_trans_unlock ( trans ) ;
2017-03-16 22:18:50 -08:00
schedule ( ) ;
2019-05-15 09:53:27 -04:00
return bch2_trans_relock ( trans ) ? 0 : - EINTR ;
} else {
return 0 ;
2017-03-16 22:18:50 -08:00
}
}
2019-03-25 15:10:15 -04:00
# define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
2017-03-16 22:18:50 -08:00
_locks_want , _depth , _flags , _b ) \
2019-03-25 15:10:15 -04:00
for ( iter = bch2_trans_get_node_iter ( ( _trans ) , ( _btree_id ) , \
_start , _locks_want , _depth , _flags ) , \
2017-03-16 22:18:50 -08:00
_b = bch2_btree_iter_peek_node ( _iter ) ; \
( _b ) ; \
2020-02-18 16:17:55 -05:00
( _b ) = bch2_btree_iter_next_node ( _iter ) )
2017-03-16 22:18:50 -08:00
2019-03-25 15:10:15 -04:00
# define for_each_btree_node(_trans, _iter, _btree_id, _start, \
_flags , _b ) \
__for_each_btree_node ( _trans , _iter , _btree_id , _start , \
0 , 0 , _flags , _b )
2017-03-16 22:18:50 -08:00
static inline struct bkey_s_c __bch2_btree_iter_peek ( struct btree_iter * iter ,
unsigned flags )
{
2019-03-07 19:46:10 -05:00
if ( ( flags & BTREE_ITER_TYPE ) = = BTREE_ITER_CACHED )
return bch2_btree_iter_peek_cached ( iter ) ;
else
return flags & BTREE_ITER_SLOTS
? bch2_btree_iter_peek_slot ( iter )
: bch2_btree_iter_peek ( iter ) ;
2017-03-16 22:18:50 -08:00
}
static inline struct bkey_s_c __bch2_btree_iter_next ( struct btree_iter * iter ,
unsigned flags )
{
return flags & BTREE_ITER_SLOTS
? bch2_btree_iter_next_slot ( iter )
: bch2_btree_iter_next ( iter ) ;
}
2019-09-25 15:57:56 -04:00
static inline int bkey_err ( struct bkey_s_c k )
{
return PTR_ERR_OR_ZERO ( k . k ) ;
}
2019-04-17 15:49:28 -04:00
# define for_each_btree_key(_trans, _iter, _btree_id, \
_start , _flags , _k , _ret ) \
for ( ( _ret ) = PTR_ERR_OR_ZERO ( ( _iter ) = \
bch2_trans_get_iter ( ( _trans ) , ( _btree_id ) , \
( _start ) , ( _flags ) ) ) ? : \
PTR_ERR_OR_ZERO ( ( ( _k ) = \
__bch2_btree_iter_peek ( _iter , _flags ) ) . k ) ; \
2019-08-09 17:12:37 -04:00
! _ret & & ( _k ) . k ; \
2019-04-17 15:49:28 -04:00
( _ret ) = PTR_ERR_OR_ZERO ( ( ( _k ) = \
__bch2_btree_iter_next ( _iter , _flags ) ) . k ) )
2017-03-16 22:18:50 -08:00
2019-09-25 15:57:56 -04:00
# define for_each_btree_key_continue(_iter, _flags, _k, _ret) \
2017-03-16 22:18:50 -08:00
for ( ( _k ) = __bch2_btree_iter_peek ( _iter , _flags ) ; \
2019-09-25 15:57:56 -04:00
! ( ( _ret ) = bkey_err ( _k ) ) & & ( _k ) . k ; \
2017-03-16 22:18:50 -08:00
( _k ) = __bch2_btree_iter_next ( _iter , _flags ) )
/* new multiple iterator interface: */
2019-03-25 15:10:15 -04:00
int bch2_trans_iter_put ( struct btree_trans * , struct btree_iter * ) ;
int bch2_trans_iter_free ( struct btree_trans * , struct btree_iter * ) ;
2019-03-25 15:34:48 -04:00
2019-09-26 22:21:39 -04:00
void bch2_trans_unlink_iters ( struct btree_trans * ) ;
2017-03-16 22:18:50 -08:00
2020-04-01 17:14:14 -04:00
struct btree_iter * __bch2_trans_get_iter ( struct btree_trans * , enum btree_id ,
struct bpos , unsigned ) ;
static inline struct btree_iter *
bch2_trans_get_iter ( struct btree_trans * trans , enum btree_id btree_id ,
struct bpos pos , unsigned flags )
{
struct btree_iter * iter =
__bch2_trans_get_iter ( trans , btree_id , pos , flags ) ;
if ( ! IS_ERR ( iter ) )
iter - > ip_allocated = _THIS_IP_ ;
return iter ;
}
struct btree_iter * __bch2_trans_copy_iter ( struct btree_trans * ,
2019-03-25 22:43:26 -04:00
struct btree_iter * ) ;
2020-04-01 17:14:14 -04:00
static inline struct btree_iter *
bch2_trans_copy_iter ( struct btree_trans * trans , struct btree_iter * src )
{
struct btree_iter * iter =
__bch2_trans_copy_iter ( trans , src ) ;
if ( ! IS_ERR ( iter ) )
iter - > ip_allocated = _THIS_IP_ ;
return iter ;
}
2019-09-26 22:21:39 -04:00
struct btree_iter * bch2_trans_get_node_iter ( struct btree_trans * ,
enum btree_id , struct bpos ,
unsigned , unsigned , unsigned ) ;
2017-03-16 22:18:50 -08:00
2020-02-26 15:39:46 -05:00
# define TRANS_RESET_NOTRAVERSE (1 << 0)
2017-03-16 22:18:50 -08:00
2019-09-26 22:21:39 -04:00
void bch2_trans_reset ( struct btree_trans * , unsigned ) ;
2017-03-16 22:18:50 -08:00
2019-09-26 22:21:39 -04:00
static inline void bch2_trans_begin ( struct btree_trans * trans )
2017-03-16 22:18:50 -08:00
{
2020-02-26 15:39:46 -05:00
return bch2_trans_reset ( trans , 0 ) ;
2017-03-16 22:18:50 -08:00
}
void * bch2_trans_kmalloc ( struct btree_trans * , size_t ) ;
2019-05-15 10:54:43 -04:00
void bch2_trans_init ( struct btree_trans * , struct bch_fs * , unsigned , size_t ) ;
2017-03-16 22:18:50 -08:00
int bch2_trans_exit ( struct btree_trans * ) ;
2020-06-02 16:36:11 -04:00
void bch2_btree_trans_to_text ( struct printbuf * , struct bch_fs * ) ;
2019-09-07 14:16:00 -04:00
void bch2_fs_btree_iter_exit ( struct bch_fs * ) ;
int bch2_fs_btree_iter_init ( struct bch_fs * ) ;
2017-03-16 22:18:50 -08:00
# endif /* _BCACHEFS_BTREE_ITER_H */