bcachefs: Convert more locking code to btree_bkey_cached_common
Ideally, all the code in btree_locking.c should be converted, but then we'd want to convert btree_path to point to btree_key_cached_common too, and then we'd be in for a much bigger cleanup - but a bit of incremental cleanup will still be helpful for the next patches. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
4e6defd106
commit
da4474f209
@ -343,7 +343,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
ret = bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b);
|
||||
ret = bch2_btree_node_lock_write(trans, ck_path, &ck_path->l[0].b->c);
|
||||
if (ret) {
|
||||
kfree(new_k);
|
||||
goto err;
|
||||
|
@ -52,9 +52,10 @@ void bch2_btree_node_unlock_write(struct btree_trans *trans,
|
||||
|
||||
/* lock */
|
||||
|
||||
void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
|
||||
void __bch2_btree_node_lock_write(struct btree_trans *trans,
|
||||
struct btree_bkey_cached_common *b)
|
||||
{
|
||||
int readers = bch2_btree_node_lock_counts(trans, NULL, &b->c, b->c.level).n[SIX_LOCK_read];
|
||||
int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
|
||||
|
||||
/*
|
||||
* Must drop our read locks before calling six_lock_write() -
|
||||
@ -62,9 +63,9 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
|
||||
* goes to 0, and it's safe because we have the node intent
|
||||
* locked:
|
||||
*/
|
||||
six_lock_readers_add(&b->c.lock, -readers);
|
||||
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
|
||||
six_lock_readers_add(&b->c.lock, readers);
|
||||
six_lock_readers_add(&b->lock, -readers);
|
||||
btree_node_lock_nopath_nofail(trans, b, SIX_LOCK_write);
|
||||
six_lock_readers_add(&b->lock, readers);
|
||||
}
|
||||
|
||||
static inline bool path_has_read_locks(struct btree_path *path)
|
||||
|
@ -279,31 +279,31 @@ static inline int btree_node_lock(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
|
||||
void __bch2_btree_node_lock_write(struct btree_trans *, struct btree_bkey_cached_common *);
|
||||
|
||||
static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
struct btree *b)
|
||||
struct btree_bkey_cached_common *b)
|
||||
{
|
||||
EBUG_ON(path->l[b->c.level].b != b);
|
||||
EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
|
||||
EBUG_ON(!btree_node_intent_locked(path, b->c.level));
|
||||
EBUG_ON(&path->l[b->level].b->c != b);
|
||||
EBUG_ON(path->l[b->level].lock_seq != b->lock.state.seq);
|
||||
EBUG_ON(!btree_node_intent_locked(path, b->level));
|
||||
|
||||
/*
|
||||
* six locks are unfair, and read locks block while a thread wants a
|
||||
* write lock: thus, we need to tell the cycle detector we have a write
|
||||
* lock _before_ taking the lock:
|
||||
*/
|
||||
mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
|
||||
mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write);
|
||||
|
||||
if (unlikely(!six_trylock_write(&b->c.lock)))
|
||||
if (unlikely(!six_trylock_write(&b->lock)))
|
||||
__bch2_btree_node_lock_write(trans, b);
|
||||
}
|
||||
|
||||
static inline int __must_check
|
||||
bch2_btree_node_lock_write(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
struct btree *b)
|
||||
struct btree_bkey_cached_common *b)
|
||||
{
|
||||
bch2_btree_node_lock_write_nofail(trans, path, b);
|
||||
return 0;
|
||||
|
@ -1163,7 +1163,7 @@ static void bch2_btree_set_root(struct btree_update *as,
|
||||
* Ensure no one is using the old root while we switch to the
|
||||
* new root:
|
||||
*/
|
||||
bch2_btree_node_lock_write_nofail(trans, path, old);
|
||||
bch2_btree_node_lock_write_nofail(trans, path, &old->c);
|
||||
|
||||
bch2_btree_set_root_inmem(c, b);
|
||||
|
||||
@ -2002,7 +2002,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_btree_node_lock_write_nofail(trans, iter->path, b);
|
||||
bch2_btree_node_lock_write_nofail(trans, iter->path, &b->c);
|
||||
|
||||
if (new_hash) {
|
||||
mutex_lock(&c->btree_cache.lock);
|
||||
|
@ -81,7 +81,7 @@ void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
struct btree *b)
|
||||
{
|
||||
bch2_btree_node_lock_write_nofail(trans, path, b);
|
||||
bch2_btree_node_lock_write_nofail(trans, path, &b->c);
|
||||
bch2_btree_node_prep_for_write(trans, path, b);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user