diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 775b3e8468da..f32fc45c85d2 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -147,6 +147,11 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, b->c.level = level; b->c.btree_id = id; + if (level) + six_lock_pcpu_alloc(&b->c.lock); + else + six_lock_pcpu_free_rcu(&b->c.lock); + mutex_lock(&bc->lock); ret = __bch2_btree_node_hash_insert(bc, b); if (!ret) @@ -393,6 +398,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) while (!list_empty(&bc->freed)) { b = list_first_entry(&bc->freed, struct btree, list); list_del(&b->list); + six_lock_pcpu_free(&b->c.lock); kfree(b); } diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 5f30626d1852..cf41ece0d66e 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -79,11 +79,19 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter) * goes to 0, and it's safe because we have the node intent * locked: */ - atomic64_sub(__SIX_VAL(read_lock, readers), - &b->c.lock.state.counter); + if (!b->c.lock.readers) + atomic64_sub(__SIX_VAL(read_lock, readers), + &b->c.lock.state.counter); + else + this_cpu_sub(*b->c.lock.readers, readers); + btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write); - atomic64_add(__SIX_VAL(read_lock, readers), - &b->c.lock.state.counter); + + if (!b->c.lock.readers) + atomic64_add(__SIX_VAL(read_lock, readers), + &b->c.lock.state.counter); + else + this_cpu_add(*b->c.lock.readers, readers); } bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level) diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 4ad8084714f9..2c202dd01766 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -988,6 +988,11 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b) list_del_init(&b->list); mutex_unlock(&c->btree_cache.lock); + if (b->c.level) + six_lock_pcpu_alloc(&b->c.lock); + else + six_lock_pcpu_free(&b->c.lock); + mutex_lock(&c->btree_root_lock); BUG_ON(btree_node_root(c, b) && (b->c.level < btree_node_root(c, b)->c.level ||