bcachefs: Make bch2_btree_cache_scan() try harder

Previously, when bch2_btree_cache_scan() attempted to reclaim a node but
failed (because trylock failed, because it was dirty, etc.), it would
count that against the number of nodes it was scanning and attempting to
free. This patch changes that behaviour, so that now we only count nodes
that we then don't free if they have the accessed bit (which we also
clear).

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2022-03-03 11:04:01 -05:00 committed by Kent Overstreet
parent e0c014e7e4
commit 05a49d2275

View File

@ -327,17 +327,13 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
}
restart:
list_for_each_entry_safe(b, t, &bc->live, list) {
touched++;
if (touched >= nr) {
/* Save position */
if (&t->list != &bc->live)
list_move_tail(&bc->live, &t->list);
break;
/* tweak this */
if (btree_node_accessed(b)) {
clear_btree_node_accessed(b);
goto touched;
}
if (!btree_node_accessed(b) &&
!btree_node_reclaim(c, b)) {
if (!btree_node_reclaim(c, b)) {
/* can't call bch2_btree_node_hash_remove under lock */
freed++;
if (&t->list != &bc->live)
@ -358,8 +354,18 @@ restart:
else if (!mutex_trylock(&bc->lock))
goto out;
goto restart;
} else
clear_btree_node_accessed(b);
} else {
continue;
}
touched:
touched++;
if (touched >= nr) {
/* Save position */
if (&t->list != &bc->live)
list_move_tail(&bc->live, &t->list);
break;
}
}
mutex_unlock(&bc->lock);