2017-03-16 22:18:50 -08:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_CACHE_H
# define _BCACHEFS_BTREE_CACHE_H
# include "bcachefs.h"
# include "btree_types.h"
2022-10-21 19:15:07 -04:00
# include "bkey_methods.h"
2017-03-16 22:18:50 -08:00
2022-02-26 11:10:20 -05:00
extern const char * const bch2_btree_node_flags [ ] ;
2017-03-16 22:18:50 -08:00
struct btree_iter ;
void bch2_recalc_btree_reserve ( struct bch_fs * ) ;
void bch2_btree_node_hash_remove ( struct btree_cache * , struct btree * ) ;
int __bch2_btree_node_hash_insert ( struct btree_cache * , struct btree * ) ;
int bch2_btree_node_hash_insert ( struct btree_cache * , struct btree * ,
unsigned , enum btree_id ) ;
2023-12-02 03:36:27 -05:00
void bch2_btree_cache_cannibalize_unlock ( struct btree_trans * ) ;
int bch2_btree_cache_cannibalize_lock ( struct btree_trans * , struct closure * ) ;
2017-03-16 22:18:50 -08:00
2021-04-20 20:21:12 -04:00
struct btree * __bch2_btree_node_mem_alloc ( struct bch_fs * ) ;
2023-03-02 02:12:18 -05:00
struct btree * bch2_btree_node_mem_alloc ( struct btree_trans * , bool ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
struct btree * bch2_btree_node_get ( struct btree_trans * , struct btree_path * ,
2017-03-16 22:18:50 -08:00
const struct bkey_i * , unsigned ,
2020-10-28 14:17:46 -04:00
enum six_lock_type , unsigned long ) ;
2017-03-16 22:18:50 -08:00
2022-08-21 14:29:43 -04:00
struct btree * bch2_btree_node_get_noiter ( struct btree_trans * , const struct bkey_i * ,
2021-01-26 20:59:00 -05:00
enum btree_id , unsigned , bool ) ;
2020-03-15 23:29:43 -04:00
2023-03-02 02:12:18 -05:00
int bch2_btree_node_prefetch ( struct btree_trans * , struct btree_path * ,
2021-08-30 15:18:31 -04:00
const struct bkey_i * , enum btree_id , unsigned ) ;
2017-03-16 22:18:50 -08:00
2022-08-21 14:29:43 -04:00
void bch2_btree_node_evict ( struct btree_trans * , const struct bkey_i * ) ;
2021-04-25 16:24:03 -04:00
2017-03-16 22:18:50 -08:00
void bch2_fs_btree_cache_exit ( struct bch_fs * ) ;
int bch2_fs_btree_cache_init ( struct bch_fs * ) ;
void bch2_fs_btree_cache_init_early ( struct btree_cache * ) ;
2020-02-18 17:15:32 -05:00
static inline u64 btree_ptr_hash_val ( const struct bkey_i * k )
{
switch ( k - > k . type ) {
case KEY_TYPE_btree_ptr :
return * ( ( u64 * ) bkey_i_to_btree_ptr_c ( k ) - > v . start ) ;
2020-02-07 13:38:02 -05:00
case KEY_TYPE_btree_ptr_v2 :
2023-07-06 22:47:42 -04:00
/*
* The cast / deref is only necessary to avoid sparse endianness
* warnings :
*/
return * ( ( u64 * ) & bkey_i_to_btree_ptr_v2_c ( k ) - > v . seq ) ;
2020-02-18 17:15:32 -05:00
default :
return 0 ;
}
}
2017-03-16 22:18:50 -08:00
2020-02-24 15:25:00 -05:00
static inline struct btree * btree_node_mem_ptr ( const struct bkey_i * k )
{
return k - > k . type = = KEY_TYPE_btree_ptr_v2
? ( void * ) ( unsigned long ) bkey_i_to_btree_ptr_v2_c ( k ) - > v . mem_ptr
: NULL ;
}
2017-03-16 22:18:50 -08:00
/* is btree node in hash table? */
static inline bool btree_node_hashed ( struct btree * b )
{
2020-02-18 17:15:32 -05:00
return b - > hash_val ! = 0 ;
2017-03-16 22:18:50 -08:00
}
# define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \
for ( ( _tbl ) = rht_dereference_rcu ( ( _c ) - > btree_cache . table . tbl , \
& ( _c ) - > btree_cache . table ) , \
_iter = 0 ; _iter < ( _tbl ) - > size ; _iter + + ) \
rht_for_each_entry_rcu ( ( _b ) , ( _pos ) , _tbl , _iter , hash )
static inline size_t btree_bytes ( struct bch_fs * c )
{
2021-12-14 14:24:41 -05:00
return c - > opts . btree_node_size ;
2017-03-16 22:18:50 -08:00
}
static inline size_t btree_max_u64s ( struct bch_fs * c )
{
return ( btree_bytes ( c ) - sizeof ( struct btree_node ) ) / sizeof ( u64 ) ;
}
static inline size_t btree_pages ( struct bch_fs * c )
{
2020-07-25 15:07:37 -04:00
return btree_bytes ( c ) / PAGE_SIZE ;
2017-03-16 22:18:50 -08:00
}
static inline unsigned btree_blocks ( struct bch_fs * c )
{
2021-12-14 14:24:41 -05:00
return btree_sectors ( c ) > > c - > block_bits ;
2017-03-16 22:18:50 -08:00
}
2020-04-11 12:31:16 -04:00
# define BTREE_SPLIT_THRESHOLD(c) (btree_max_u64s(c) * 2 / 3)
2017-03-16 22:18:50 -08:00
# define BTREE_FOREGROUND_MERGE_THRESHOLD(c) (btree_max_u64s(c) * 1 / 3)
# define BTREE_FOREGROUND_MERGE_HYSTERESIS(c) \
( BTREE_FOREGROUND_MERGE_THRESHOLD ( c ) + \
2021-03-31 16:10:21 -04:00
( BTREE_FOREGROUND_MERGE_THRESHOLD ( c ) > > 2 ) )
2017-03-16 22:18:50 -08:00
2023-06-28 22:09:13 -04:00
static inline unsigned btree_id_nr_alive ( struct bch_fs * c )
{
return BTREE_ID_NR + c - > btree_roots_extra . nr ;
}
static inline struct btree_root * bch2_btree_id_root ( struct bch_fs * c , unsigned id )
{
if ( likely ( id < BTREE_ID_NR ) ) {
return & c - > btree_roots_known [ id ] ;
} else {
unsigned idx = id - BTREE_ID_NR ;
EBUG_ON ( idx > = c - > btree_roots_extra . nr ) ;
return & c - > btree_roots_extra . data [ idx ] ;
}
}
static inline struct btree * btree_node_root ( struct bch_fs * c , struct btree * b )
{
return bch2_btree_id_root ( c , b - > c . btree_id ) - > b ;
}
2017-03-16 22:18:50 -08:00
2023-10-19 22:49:08 -04:00
const char * bch2_btree_id_str ( enum btree_id ) ;
void bch2_btree_pos_to_text ( struct printbuf * , struct bch_fs * , const struct btree * ) ;
void bch2_btree_node_to_text ( struct printbuf * , struct bch_fs * , const struct btree * ) ;
2023-03-06 02:34:59 -05:00
void bch2_btree_cache_to_text ( struct printbuf * , const struct bch_fs * ) ;
2017-03-16 22:18:50 -08:00
# endif /* _BCACHEFS_BTREE_CACHE_H */