bcachefs: Refactor replicas code

Awhile back the mechanism for garbage collecting unused replicas entries
was significantly improved, but some cleanup was missed - this patch
does that now.

This is also prep work for a patch to account for erasure coded parity
blocks separately - we need to consolidate the logic for
checking/marking the various replicas entries from one bkey into a
single function.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2020-07-10 16:13:52 -04:00 committed by Kent Overstreet
parent 8f3b41ab4f
commit 988e98cfce
5 changed files with 33 additions and 72 deletions

View File

@ -111,7 +111,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
atomic64_set(&c->key_version, k.k->version.lo);
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
fsck_err_on(!bch2_bkey_replicas_marked(c, k, false), c,
fsck_err_on(!bch2_bkey_replicas_marked(c, k), c,
"superblock not marked as containing replicas (type %u)",
k.k->type)) {
ret = bch2_mark_bkey_replicas(c, k);

View File

@ -178,11 +178,6 @@ void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
if (!percpu_down_read_trylock(&c->mark_lock))
return;
bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
!bch2_bkey_replicas_marked_locked(c, k, false), c,
"btree key bad (replicas not marked in superblock):\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
bkey_for_each_ptr(ptrs, ptr) {
ca = bch_dev_bkey_exists(c, ptr->dev);
@ -266,11 +261,6 @@ void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
if (!percpu_down_read_trylock(&c->mark_lock))
return;
bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
!bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
"extent key bad (replicas not marked in superblock):\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
extent_for_each_ptr_decode(e, p, entry) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);

View File

@ -699,7 +699,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list)
if (!degraded &&
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
fsck_err_on(!bch2_replicas_marked(c, &replicas.e, false), c,
fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
"superblock not marked as containing replicas %s",
(bch2_replicas_entry_to_text(&PBUF(buf),
&replicas.e), buf)))) {

View File

@ -213,29 +213,20 @@ static bool __replicas_has_entry(struct bch_replicas_cpu *r,
return __replicas_entry_idx(r, search) >= 0;
}
static bool bch2_replicas_marked_locked(struct bch_fs *c,
struct bch_replicas_entry *search,
bool check_gc_replicas)
bool bch2_replicas_marked(struct bch_fs *c,
struct bch_replicas_entry *search)
{
bool marked;
if (!search->nr_devs)
return true;
verify_replicas_entry(search);
return __replicas_has_entry(&c->replicas, search) &&
(!check_gc_replicas ||
likely((!c->replicas_gc.entries)) ||
__replicas_has_entry(&c->replicas_gc, search));
}
bool bch2_replicas_marked(struct bch_fs *c,
struct bch_replicas_entry *search,
bool check_gc_replicas)
{
bool marked;
percpu_down_read(&c->mark_lock);
marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
marked = __replicas_has_entry(&c->replicas, search) &&
(likely((!c->replicas_gc.entries)) ||
__replicas_has_entry(&c->replicas_gc, search));
percpu_up_read(&c->mark_lock);
return marked;
@ -426,49 +417,22 @@ err:
goto out;
}
int bch2_mark_replicas(struct bch_fs *c,
struct bch_replicas_entry *r)
static int __bch2_mark_replicas(struct bch_fs *c,
struct bch_replicas_entry *r,
bool check)
{
return likely(bch2_replicas_marked(c, r, true))
? 0
return likely(bch2_replicas_marked(c, r)) ? 0
: check ? -1
: bch2_mark_replicas_slowpath(c, r);
}
bool bch2_bkey_replicas_marked_locked(struct bch_fs *c,
struct bkey_s_c k,
bool check_gc_replicas)
int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry *r)
{
struct bch_replicas_padded search;
struct bch_devs_list cached = bch2_bkey_cached_devs(k);
unsigned i;
for (i = 0; i < cached.nr; i++) {
bch2_replicas_entry_cached(&search.e, cached.devs[i]);
if (!bch2_replicas_marked_locked(c, &search.e,
check_gc_replicas))
return false;
}
bch2_bkey_to_replicas(&search.e, k);
return bch2_replicas_marked_locked(c, &search.e, check_gc_replicas);
return __bch2_mark_replicas(c, r, false);
}
bool bch2_bkey_replicas_marked(struct bch_fs *c,
struct bkey_s_c k,
bool check_gc_replicas)
{
bool marked;
percpu_down_read(&c->mark_lock);
marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
percpu_up_read(&c->mark_lock);
return marked;
}
int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
static int __bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k,
bool check)
{
struct bch_replicas_padded search;
struct bch_devs_list cached = bch2_bkey_cached_devs(k);
@ -478,14 +442,25 @@ int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
for (i = 0; i < cached.nr; i++) {
bch2_replicas_entry_cached(&search.e, cached.devs[i]);
ret = bch2_mark_replicas(c, &search.e);
ret = __bch2_mark_replicas(c, &search.e, check);
if (ret)
return ret;
}
bch2_bkey_to_replicas(&search.e, k);
return bch2_mark_replicas(c, &search.e);
return __bch2_mark_replicas(c, &search.e, check);
}
bool bch2_bkey_replicas_marked(struct bch_fs *c,
struct bkey_s_c k)
{
return __bch2_mark_bkey_replicas(c, k, true) == 0;
}
int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
{
return __bch2_mark_bkey_replicas(c, k, false);
}
int bch2_replicas_gc_end(struct bch_fs *c, int ret)

View File

@ -21,16 +21,12 @@ int bch2_replicas_entry_idx(struct bch_fs *,
void bch2_devlist_to_replicas(struct bch_replicas_entry *,
enum bch_data_type,
struct bch_devs_list);
bool bch2_replicas_marked(struct bch_fs *,
struct bch_replicas_entry *, bool);
bool bch2_replicas_marked(struct bch_fs *, struct bch_replicas_entry *);
int bch2_mark_replicas(struct bch_fs *,
struct bch_replicas_entry *);
bool bch2_bkey_replicas_marked_locked(struct bch_fs *,
struct bkey_s_c, bool);
void bch2_bkey_to_replicas(struct bch_replicas_entry *, struct bkey_s_c);
bool bch2_bkey_replicas_marked(struct bch_fs *,
struct bkey_s_c, bool);
bool bch2_bkey_replicas_marked(struct bch_fs *, struct bkey_s_c);
int bch2_mark_bkey_replicas(struct bch_fs *, struct bkey_s_c);
static inline void bch2_replicas_entry_cached(struct bch_replicas_entry *e,