8cad3e2f73
This switches inode updates to use cached btree iterators - which should be a nice performance boost, since lock contention on the inodes btree can be a bottleneck on multithreaded workloads. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
29 lines
986 B
C
29 lines
986 B
C
#ifndef _BCACHEFS_BTREE_KEY_CACHE_H
|
|
#define _BCACHEFS_BTREE_KEY_CACHE_H
|
|
|
|
struct bkey_cached *
|
|
bch2_btree_key_cache_find(struct bch_fs *, enum btree_id, struct bpos);
|
|
|
|
int bch2_btree_iter_traverse_cached(struct btree_iter *);
|
|
|
|
bool bch2_btree_insert_key_cached(struct btree_trans *,
|
|
struct btree_iter *, struct bkey_i *);
|
|
int bch2_btree_key_cache_flush(struct btree_trans *,
|
|
enum btree_id, struct bpos);
|
|
#ifdef CONFIG_BCACHEFS_DEBUG
|
|
void bch2_btree_key_cache_verify_clean(struct btree_trans *,
|
|
enum btree_id, struct bpos);
|
|
#else
|
|
static inline void
|
|
bch2_btree_key_cache_verify_clean(struct btree_trans *trans,
|
|
enum btree_id id, struct bpos pos) {}
|
|
#endif
|
|
|
|
void bch2_fs_btree_key_cache_exit(struct btree_key_cache *);
|
|
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *);
|
|
int bch2_fs_btree_key_cache_init(struct btree_key_cache *);
|
|
|
|
void bch2_btree_key_cache_to_text(struct printbuf *, struct btree_key_cache *);
|
|
|
|
#endif /* _BCACHEFS_BTREE_KEY_CACHE_H */
|