From 1ffb876fb0f31632b761ee721f633e0d7491ca7b Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 12 Sep 2022 02:22:47 -0400 Subject: [PATCH] bcachefs: Kill journal_keys->journal_seq_base This removes an optimization that didn't actually save us any memory, due to alignment, but did make the code more complicated than it needed to be. We were also seeing a bug where journal_seq_base wasn't getting correctly initailized, so hopefully it'll fix that too. Signed-off-by: Kent Overstreet --- fs/bcachefs/bcachefs.h | 5 ++--- fs/bcachefs/recovery.c | 14 ++------------ 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index c1d96222f4c3..74da688d994b 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -555,13 +555,13 @@ struct journal_seq_blacklist_table { struct journal_keys { struct journal_key { + u64 journal_seq; + u32 journal_offset; enum btree_id btree_id:8; unsigned level:8; bool allocated; bool overwritten; struct bkey_i *k; - u32 journal_seq; - u32 journal_offset; } *d; /* * Gap buffer: instead of all the empty space in the array being at the @@ -571,7 +571,6 @@ struct journal_keys { size_t gap; size_t nr; size_t size; - u64 journal_seq_base; }; struct btree_path_buf { diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index 2cf347530b65..ea8cc636a9e0 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -222,7 +222,6 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id, struct journal_keys new_keys = { .nr = keys->nr, .size = max_t(size_t, keys->size, 8) * 2, - .journal_seq_base = keys->journal_seq_base, }; new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL); @@ -493,9 +492,6 @@ static int journal_keys_sort(struct bch_fs *c) if (!i || i->ignore) continue; - if (!keys->journal_seq_base) - keys->journal_seq_base = le64_to_cpu(i->j.seq); - for_each_jset_key(k, _n, entry, &i->j) nr_keys++; } @@ -515,15 +511,12 @@ static int journal_keys_sort(struct bch_fs *c) if (!i || i->ignore) continue; - BUG_ON(le64_to_cpu(i->j.seq) - keys->journal_seq_base > U32_MAX); - for_each_jset_key(k, _n, entry, &i->j) keys->d[keys->nr++] = (struct journal_key) { .btree_id = entry->btree_id, .level = entry->level, .k = k, - .journal_seq = le64_to_cpu(i->j.seq) - - keys->journal_seq_base, + .journal_seq = le64_to_cpu(i->j.seq), .journal_offset = k->_data - i->j._data, }; } @@ -617,15 +610,12 @@ static int bch2_journal_replay(struct bch_fs *c) sizeof(keys_sorted[0]), journal_sort_seq_cmp, NULL); - if (keys->nr) - replay_now_at(j, keys->journal_seq_base); - for (i = 0; i < keys->nr; i++) { k = keys_sorted[i]; cond_resched(); - replay_now_at(j, keys->journal_seq_base + k->journal_seq); + replay_now_at(j, k->journal_seq); ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW|