bcachefs: Refactor replicas code
Awhile back the mechanism for garbage collecting unused replicas entries was significantly improved, but some cleanup was missed - this patch does that now. This is also prep work for a patch to account for erasure coded parity blocks separately - we need to consolidate the logic for checking/marking the various replicas entries from one bkey into a single function. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
8f3b41ab4f
commit
988e98cfce
@ -111,7 +111,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
|
|||||||
atomic64_set(&c->key_version, k.k->version.lo);
|
atomic64_set(&c->key_version, k.k->version.lo);
|
||||||
|
|
||||||
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
|
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
|
||||||
fsck_err_on(!bch2_bkey_replicas_marked(c, k, false), c,
|
fsck_err_on(!bch2_bkey_replicas_marked(c, k), c,
|
||||||
"superblock not marked as containing replicas (type %u)",
|
"superblock not marked as containing replicas (type %u)",
|
||||||
k.k->type)) {
|
k.k->type)) {
|
||||||
ret = bch2_mark_bkey_replicas(c, k);
|
ret = bch2_mark_bkey_replicas(c, k);
|
||||||
|
@ -178,11 +178,6 @@ void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
|
|||||||
if (!percpu_down_read_trylock(&c->mark_lock))
|
if (!percpu_down_read_trylock(&c->mark_lock))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
|
|
||||||
!bch2_bkey_replicas_marked_locked(c, k, false), c,
|
|
||||||
"btree key bad (replicas not marked in superblock):\n%s",
|
|
||||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
|
|
||||||
|
|
||||||
bkey_for_each_ptr(ptrs, ptr) {
|
bkey_for_each_ptr(ptrs, ptr) {
|
||||||
ca = bch_dev_bkey_exists(c, ptr->dev);
|
ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||||
|
|
||||||
@ -266,11 +261,6 @@ void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
|
|||||||
if (!percpu_down_read_trylock(&c->mark_lock))
|
if (!percpu_down_read_trylock(&c->mark_lock))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
|
|
||||||
!bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
|
|
||||||
"extent key bad (replicas not marked in superblock):\n%s",
|
|
||||||
(bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
|
|
||||||
|
|
||||||
extent_for_each_ptr_decode(e, p, entry) {
|
extent_for_each_ptr_decode(e, p, entry) {
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||||
struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
|
struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
|
||||||
|
@ -699,7 +699,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list)
|
|||||||
|
|
||||||
if (!degraded &&
|
if (!degraded &&
|
||||||
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
|
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
|
||||||
fsck_err_on(!bch2_replicas_marked(c, &replicas.e, false), c,
|
fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
|
||||||
"superblock not marked as containing replicas %s",
|
"superblock not marked as containing replicas %s",
|
||||||
(bch2_replicas_entry_to_text(&PBUF(buf),
|
(bch2_replicas_entry_to_text(&PBUF(buf),
|
||||||
&replicas.e), buf)))) {
|
&replicas.e), buf)))) {
|
||||||
|
@ -213,29 +213,20 @@ static bool __replicas_has_entry(struct bch_replicas_cpu *r,
|
|||||||
return __replicas_entry_idx(r, search) >= 0;
|
return __replicas_entry_idx(r, search) >= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bch2_replicas_marked_locked(struct bch_fs *c,
|
bool bch2_replicas_marked(struct bch_fs *c,
|
||||||
struct bch_replicas_entry *search,
|
struct bch_replicas_entry *search)
|
||||||
bool check_gc_replicas)
|
|
||||||
{
|
{
|
||||||
|
bool marked;
|
||||||
|
|
||||||
if (!search->nr_devs)
|
if (!search->nr_devs)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
verify_replicas_entry(search);
|
verify_replicas_entry(search);
|
||||||
|
|
||||||
return __replicas_has_entry(&c->replicas, search) &&
|
|
||||||
(!check_gc_replicas ||
|
|
||||||
likely((!c->replicas_gc.entries)) ||
|
|
||||||
__replicas_has_entry(&c->replicas_gc, search));
|
|
||||||
}
|
|
||||||
|
|
||||||
bool bch2_replicas_marked(struct bch_fs *c,
|
|
||||||
struct bch_replicas_entry *search,
|
|
||||||
bool check_gc_replicas)
|
|
||||||
{
|
|
||||||
bool marked;
|
|
||||||
|
|
||||||
percpu_down_read(&c->mark_lock);
|
percpu_down_read(&c->mark_lock);
|
||||||
marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
|
marked = __replicas_has_entry(&c->replicas, search) &&
|
||||||
|
(likely((!c->replicas_gc.entries)) ||
|
||||||
|
__replicas_has_entry(&c->replicas_gc, search));
|
||||||
percpu_up_read(&c->mark_lock);
|
percpu_up_read(&c->mark_lock);
|
||||||
|
|
||||||
return marked;
|
return marked;
|
||||||
@ -426,49 +417,22 @@ err:
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_mark_replicas(struct bch_fs *c,
|
static int __bch2_mark_replicas(struct bch_fs *c,
|
||||||
struct bch_replicas_entry *r)
|
struct bch_replicas_entry *r,
|
||||||
|
bool check)
|
||||||
{
|
{
|
||||||
return likely(bch2_replicas_marked(c, r, true))
|
return likely(bch2_replicas_marked(c, r)) ? 0
|
||||||
? 0
|
: check ? -1
|
||||||
: bch2_mark_replicas_slowpath(c, r);
|
: bch2_mark_replicas_slowpath(c, r);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool bch2_bkey_replicas_marked_locked(struct bch_fs *c,
|
int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry *r)
|
||||||
struct bkey_s_c k,
|
|
||||||
bool check_gc_replicas)
|
|
||||||
{
|
{
|
||||||
struct bch_replicas_padded search;
|
return __bch2_mark_replicas(c, r, false);
|
||||||
struct bch_devs_list cached = bch2_bkey_cached_devs(k);
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
for (i = 0; i < cached.nr; i++) {
|
|
||||||
bch2_replicas_entry_cached(&search.e, cached.devs[i]);
|
|
||||||
|
|
||||||
if (!bch2_replicas_marked_locked(c, &search.e,
|
|
||||||
check_gc_replicas))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bch2_bkey_to_replicas(&search.e, k);
|
|
||||||
|
|
||||||
return bch2_replicas_marked_locked(c, &search.e, check_gc_replicas);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool bch2_bkey_replicas_marked(struct bch_fs *c,
|
static int __bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k,
|
||||||
struct bkey_s_c k,
|
bool check)
|
||||||
bool check_gc_replicas)
|
|
||||||
{
|
|
||||||
bool marked;
|
|
||||||
|
|
||||||
percpu_down_read(&c->mark_lock);
|
|
||||||
marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
|
|
||||||
percpu_up_read(&c->mark_lock);
|
|
||||||
|
|
||||||
return marked;
|
|
||||||
}
|
|
||||||
|
|
||||||
int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
|
|
||||||
{
|
{
|
||||||
struct bch_replicas_padded search;
|
struct bch_replicas_padded search;
|
||||||
struct bch_devs_list cached = bch2_bkey_cached_devs(k);
|
struct bch_devs_list cached = bch2_bkey_cached_devs(k);
|
||||||
@ -478,14 +442,25 @@ int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
|
|||||||
for (i = 0; i < cached.nr; i++) {
|
for (i = 0; i < cached.nr; i++) {
|
||||||
bch2_replicas_entry_cached(&search.e, cached.devs[i]);
|
bch2_replicas_entry_cached(&search.e, cached.devs[i]);
|
||||||
|
|
||||||
ret = bch2_mark_replicas(c, &search.e);
|
ret = __bch2_mark_replicas(c, &search.e, check);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
bch2_bkey_to_replicas(&search.e, k);
|
bch2_bkey_to_replicas(&search.e, k);
|
||||||
|
|
||||||
return bch2_mark_replicas(c, &search.e);
|
return __bch2_mark_replicas(c, &search.e, check);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool bch2_bkey_replicas_marked(struct bch_fs *c,
|
||||||
|
struct bkey_s_c k)
|
||||||
|
{
|
||||||
|
return __bch2_mark_bkey_replicas(c, k, true) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
|
||||||
|
{
|
||||||
|
return __bch2_mark_bkey_replicas(c, k, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_replicas_gc_end(struct bch_fs *c, int ret)
|
int bch2_replicas_gc_end(struct bch_fs *c, int ret)
|
||||||
|
@ -21,16 +21,12 @@ int bch2_replicas_entry_idx(struct bch_fs *,
|
|||||||
void bch2_devlist_to_replicas(struct bch_replicas_entry *,
|
void bch2_devlist_to_replicas(struct bch_replicas_entry *,
|
||||||
enum bch_data_type,
|
enum bch_data_type,
|
||||||
struct bch_devs_list);
|
struct bch_devs_list);
|
||||||
bool bch2_replicas_marked(struct bch_fs *,
|
bool bch2_replicas_marked(struct bch_fs *, struct bch_replicas_entry *);
|
||||||
struct bch_replicas_entry *, bool);
|
|
||||||
int bch2_mark_replicas(struct bch_fs *,
|
int bch2_mark_replicas(struct bch_fs *,
|
||||||
struct bch_replicas_entry *);
|
struct bch_replicas_entry *);
|
||||||
|
|
||||||
bool bch2_bkey_replicas_marked_locked(struct bch_fs *,
|
|
||||||
struct bkey_s_c, bool);
|
|
||||||
void bch2_bkey_to_replicas(struct bch_replicas_entry *, struct bkey_s_c);
|
void bch2_bkey_to_replicas(struct bch_replicas_entry *, struct bkey_s_c);
|
||||||
bool bch2_bkey_replicas_marked(struct bch_fs *,
|
bool bch2_bkey_replicas_marked(struct bch_fs *, struct bkey_s_c);
|
||||||
struct bkey_s_c, bool);
|
|
||||||
int bch2_mark_bkey_replicas(struct bch_fs *, struct bkey_s_c);
|
int bch2_mark_bkey_replicas(struct bch_fs *, struct bkey_s_c);
|
||||||
|
|
||||||
static inline void bch2_replicas_entry_cached(struct bch_replicas_entry *e,
|
static inline void bch2_replicas_entry_cached(struct bch_replicas_entry *e,
|
||||||
|
Loading…
Reference in New Issue
Block a user