bcachefs: Kill bch2_invalidate_bucket()
This patch is working towards eventually getting rid of the in memory struct bucket, and relying only on the btree representation. Since bch2_invalidate_bucket() was only used for incrementing gens, not invalidating cached data, no other counters were being changed as a side effect - meaning it's safe for the allocator code to increment the bucket gen directly. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
72eab8da47
commit
9afc6652d1
@ -896,33 +896,31 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
|
||||
|
||||
/* first, put on free_inc and mark as owned by allocator: */
|
||||
percpu_down_read(&c->mark_lock);
|
||||
spin_lock(&c->freelist_lock);
|
||||
|
||||
verify_not_on_freelist(c, ca, b);
|
||||
|
||||
BUG_ON(!fifo_push(&ca->free_inc, b));
|
||||
|
||||
g = bucket(ca, b);
|
||||
m = READ_ONCE(g->mark);
|
||||
|
||||
invalidating_cached_data = m.cached_sectors != 0;
|
||||
BUG_ON(m.data_type || m.dirty_sectors);
|
||||
|
||||
bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
|
||||
|
||||
spin_lock(&c->freelist_lock);
|
||||
verify_not_on_freelist(c, ca, b);
|
||||
BUG_ON(!fifo_push(&ca->free_inc, b));
|
||||
spin_unlock(&c->freelist_lock);
|
||||
|
||||
/*
|
||||
* If we're not invalidating cached data, we only increment the bucket
|
||||
* gen in memory here, the incremented gen will be updated in the btree
|
||||
* by bch2_trans_mark_pointer():
|
||||
*/
|
||||
|
||||
if (!invalidating_cached_data)
|
||||
bch2_invalidate_bucket(c, ca, b, &m);
|
||||
else
|
||||
bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
|
||||
|
||||
spin_unlock(&c->freelist_lock);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
if (!invalidating_cached_data)
|
||||
if (!m.cached_sectors &&
|
||||
!bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
|
||||
bucket_cmpxchg(g, m, m.gen++);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
/*
|
||||
* If the read-only path is trying to shut down, we can't be generating
|
||||
|
@ -644,46 +644,6 @@ unwind:
|
||||
ret; \
|
||||
})
|
||||
|
||||
static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
size_t b, struct bucket_mark *ret,
|
||||
bool gc)
|
||||
{
|
||||
struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
|
||||
struct bucket *g = __bucket(ca, b, gc);
|
||||
struct bucket_mark old, new;
|
||||
|
||||
old = bucket_cmpxchg(g, new, ({
|
||||
BUG_ON(!is_available_bucket(new));
|
||||
|
||||
new.owned_by_allocator = true;
|
||||
new.data_type = 0;
|
||||
new.cached_sectors = 0;
|
||||
new.dirty_sectors = 0;
|
||||
new.gen++;
|
||||
}));
|
||||
|
||||
bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
|
||||
|
||||
if (old.cached_sectors)
|
||||
update_cached_sectors(c, fs_usage, ca->dev_idx,
|
||||
-((s64) old.cached_sectors));
|
||||
|
||||
if (!gc)
|
||||
*ret = old;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
size_t b, struct bucket_mark *old)
|
||||
{
|
||||
do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
|
||||
ca, b, old);
|
||||
|
||||
if (!old->owned_by_allocator && old->cached_sectors)
|
||||
trace_invalidate(ca, bucket_to_sector(ca, b),
|
||||
old->cached_sectors);
|
||||
}
|
||||
|
||||
static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
size_t b, bool owned_by_allocator,
|
||||
bool gc)
|
||||
|
@ -236,8 +236,6 @@ bch2_fs_usage_read_short(struct bch_fs *);
|
||||
void bch2_bucket_seq_cleanup(struct bch_fs *);
|
||||
void bch2_fs_usage_initialize(struct bch_fs *);
|
||||
|
||||
void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
|
||||
size_t, struct bucket_mark *);
|
||||
void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
|
||||
size_t, bool, struct gc_pos, unsigned);
|
||||
void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
|
||||
|
Loading…
Reference in New Issue
Block a user