bcachefs: btree_bkey_cached_common->cached
Add a type descriptor to btree_bkey_cached_common - there's no reason not to since we've got padding that was otherwise unused, and this is a nice cleanup (and helpful in later patches). Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
6b81f194f3
commit
4e6defd106
@ -3021,8 +3021,7 @@ void bch2_trans_exit(struct btree_trans *trans)
|
||||
|
||||
static void __maybe_unused
|
||||
bch2_btree_path_node_to_text(struct printbuf *out,
|
||||
struct btree_bkey_cached_common *b,
|
||||
bool cached)
|
||||
struct btree_bkey_cached_common *b)
|
||||
{
|
||||
struct six_lock_count c = six_lock_counts(&b->lock);
|
||||
struct task_struct *owner;
|
||||
@ -3035,7 +3034,7 @@ bch2_btree_path_node_to_text(struct printbuf *out,
|
||||
|
||||
prt_printf(out, " l=%u %s:",
|
||||
b->level, bch2_btree_ids[b->btree_id]);
|
||||
bch2_bpos_to_text(out, btree_node_pos(b, cached));
|
||||
bch2_bpos_to_text(out, btree_node_pos(b));
|
||||
|
||||
prt_printf(out, " locks %u:%u:%u held by pid %u",
|
||||
c.n[0], c.n[1], c.n[2], pid);
|
||||
@ -3068,7 +3067,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
|
||||
!IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
|
||||
prt_printf(out, " %c l=%u ",
|
||||
lock_types[btree_node_locked_type(path, l)], l);
|
||||
bch2_btree_path_node_to_text(out, b, path->cached);
|
||||
bch2_btree_path_node_to_text(out, b);
|
||||
prt_printf(out, "\n");
|
||||
}
|
||||
}
|
||||
@ -3086,7 +3085,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
|
||||
bch2_bpos_to_text(out, trans->locking_pos);
|
||||
|
||||
prt_printf(out, " node ");
|
||||
bch2_btree_path_node_to_text(out, b, path->cached);
|
||||
bch2_btree_path_node_to_text(out, b);
|
||||
prt_printf(out, "\n");
|
||||
}
|
||||
}
|
||||
|
@ -204,6 +204,7 @@ bkey_cached_alloc(struct btree_trans *trans,
|
||||
INIT_LIST_HEAD(&ck->list);
|
||||
__six_lock_init(&ck->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
|
||||
lockdep_set_novalidate_class(&ck->c.lock);
|
||||
ck->c.cached = true;
|
||||
BUG_ON(!six_trylock_intent(&ck->c.lock));
|
||||
BUG_ON(!six_trylock_write(&ck->c.lock));
|
||||
return ck;
|
||||
|
@ -144,8 +144,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans,
|
||||
|
||||
/* Must lock btree nodes in key order: */
|
||||
if (btree_node_locked(linked, level) &&
|
||||
bpos_cmp(pos, btree_node_pos(&linked->l[level].b->c,
|
||||
linked->cached)) <= 0) {
|
||||
bpos_cmp(pos, btree_node_pos(&linked->l[level].b->c)) <= 0) {
|
||||
reason = 7;
|
||||
goto deadlock;
|
||||
}
|
||||
|
@ -63,6 +63,7 @@ struct btree_bkey_cached_common {
|
||||
struct six_lock lock;
|
||||
u8 level;
|
||||
u8 btree_id;
|
||||
bool cached;
|
||||
};
|
||||
|
||||
struct btree {
|
||||
@ -335,10 +336,9 @@ struct bkey_cached {
|
||||
struct bkey_i *k;
|
||||
};
|
||||
|
||||
static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b,
|
||||
bool cached)
|
||||
static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
|
||||
{
|
||||
return !cached
|
||||
return !b->cached
|
||||
? container_of(b, struct btree, c)->key.k.p
|
||||
: container_of(b, struct bkey_cached, c)->key.pos;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user