bcachefs: PTR_BUCKET_POS() now takes bch_dev

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2024-04-30 19:34:28 -04:00
parent fa6cce09f0
commit 1f2f92ec3f
8 changed files with 57 additions and 49 deletions

View File

@ -30,7 +30,9 @@ static bool extent_matches_bp(struct bch_fs *c,
if (p.ptr.cached)
continue;
bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bucket2, &bp2);
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, &bucket2, &bp2);
if (bpos_eq(bucket, bucket2) &&
!memcmp(&bp, &bp2, sizeof(bp)))
return true;
@ -666,7 +668,8 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
if (p.ptr.cached)
continue;
bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bucket_pos, &bp);
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
bch2_extent_ptr_to_bp(c, ca, btree, level, k, p, entry, &bucket_pos, &bp);
ret = check_bp_exists(trans, s, bucket_pos, bp, k);
if (ret)

View File

@ -120,7 +120,7 @@ static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
}
}
static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, struct extent_ptr_decoded p,
const union bch_extent_entry *entry,
@ -130,7 +130,7 @@ static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
s64 sectors = level ? btree_sectors(c) : k.k->size;
u32 bucket_offset;
*bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
*bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset);
*bp = (struct bch_backpointer) {
.btree_id = btree_id,
.level = level,

View File

@ -977,7 +977,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
struct bpos bucket;
struct bch_backpointer bp;
bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, entry, &bucket, &bp);
bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp);
*sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
if (flags & BTREE_TRIGGER_transactional) {

View File

@ -120,20 +120,16 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
return sector_to_bucket(ca, ptr->offset);
}
static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
const struct bch_extent_ptr *ptr)
static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
const struct bch_extent_ptr *ptr)
{
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
}
static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c,
static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
const struct bch_extent_ptr *ptr,
u32 *bucket_offset)
{
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
}

View File

@ -357,10 +357,11 @@ void bch2_data_update_exit(struct data_update *update)
bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
if (c->opts.nocow_enabled)
bch2_bucket_nocow_unlock(&c->nocow_locks,
PTR_BUCKET_POS(c, ptr), 0);
bch2_dev_put(bch2_dev_bkey_exists(c, ptr->dev));
PTR_BUCKET_POS(ca, ptr), 0);
bch2_dev_put(ca);
}
bch2_bkey_buf_exit(&update->k, c);
@ -547,6 +548,8 @@ int bch2_data_update_init(struct btree_trans *trans,
i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
bool locked;
if (((1U << i) & m->data_opts.rewrite_ptrs)) {
@ -580,15 +583,13 @@ int bch2_data_update_init(struct btree_trans *trans,
if (ctxt) {
move_ctxt_wait_event(ctxt,
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0)) ||
bucket, 0)) ||
list_empty(&ctxt->ios));
if (!locked)
bch2_bucket_nocow_lock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0);
bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
} else {
if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0)) {
if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
ret = -BCH_ERR_nocow_lock_blocked;
goto err;
}
@ -650,10 +651,11 @@ int bch2_data_update_init(struct btree_trans *trans,
err:
i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
if ((1U << i) & ptrs_locked)
bch2_bucket_nocow_unlock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0);
bch2_dev_put(bch2_dev_bkey_exists(c, p.ptr.dev));
bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
bch2_dev_put(ca);
i++;
}

View File

@ -164,6 +164,7 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
/* Triggers: */
static int __mark_stripe_bucket(struct btree_trans *trans,
struct bch_dev *ca,
struct bkey_s_c_stripe s,
unsigned ptr_idx, bool deleting,
struct bpos bucket,
@ -179,13 +180,6 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
int ret = 0;
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
if (unlikely(!ca)) {
if (!(flags & BTREE_TRIGGER_overwrite))
ret = -EIO;
goto err;
}
if (deleting)
sectors = -sectors;
@ -263,7 +257,6 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
alloc_data_type_set(a, data_type);
err:
bch2_dev_put(ca);
printbuf_exit(&buf);
return ret;
}
@ -275,34 +268,40 @@ static int mark_stripe_bucket(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
struct bpos bucket = PTR_BUCKET_POS(c, ptr);
int ret = 0;
struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
if (unlikely(!ca)) {
if (!(flags & BTREE_TRIGGER_overwrite))
ret = -EIO;
goto err;
}
struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
if (flags & BTREE_TRIGGER_transactional) {
struct bkey_i_alloc_v4 *a =
bch2_trans_start_alloc_update(trans, bucket);
return PTR_ERR_OR_ZERO(a) ?:
__mark_stripe_bucket(trans, s, ptr_idx, deleting, bucket, &a->v, flags);
ret = PTR_ERR_OR_ZERO(a) ?:
__mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags);
}
if (flags & BTREE_TRIGGER_gc) {
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, bucket.offset);
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
int ret = __mark_stripe_bucket(trans, s, ptr_idx, deleting, bucket, &new, flags);
ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags);
if (!ret) {
alloc_to_bucket(g, new);
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
}
bucket_unlock(g);
percpu_up_read(&c->mark_lock);
return ret;
}
BUG();
return 0;
err:
bch2_dev_put(ca);
return ret;
}
static int mark_stripe_buckets(struct btree_trans *trans,
@ -1298,17 +1297,21 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
{
struct bch_fs *c = trans->c;
struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
struct bch_extent_ptr bucket = v->ptrs[block];
struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
struct bch_extent_ptr ptr = v->ptrs[block];
struct bpos bp_pos = POS_MIN;
int ret = 0;
struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev);
if (!ca)
return -EIO;
struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr);
while (1) {
ret = commit_do(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_no_enospc,
ec_stripe_update_extent(trans, bucket_pos, bucket.gen,
s, &bp_pos));
ec_stripe_update_extent(trans, bucket_pos, ptr.gen, s, &bp_pos));
if (ret)
break;
if (bkey_eq(bp_pos, POS_MAX))
@ -1317,6 +1320,7 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
bp_pos = bpos_nosnap_successor(bp_pos);
}
bch2_dev_put(ca);
return ret;
}

View File

@ -768,7 +768,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
PTR_BUCKET_POS(c, &ptr),
PTR_BUCKET_POS(ca, &ptr),
BTREE_ITER_cached);
prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");

View File

@ -1117,10 +1117,12 @@ static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
for_each_keylist_key(&op->insert_keys, k) {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
bkey_for_each_ptr(ptrs, ptr)
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
bch2_bucket_nocow_unlock(&c->nocow_locks,
PTR_BUCKET_POS(c, ptr),
PTR_BUCKET_POS(ca, ptr),
BUCKET_NOCOW_LOCK_UPDATE);
}
}
}
@ -1270,12 +1272,13 @@ retry:
/* Get iorefs before dropping btree locks: */
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) {
struct bpos b = PTR_BUCKET_POS(c, ptr);
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
struct bpos b = PTR_BUCKET_POS(ca, ptr);
struct nocow_lock_bucket *l =
bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
prefetch(l);
if (unlikely(!bch2_dev_get_ioref(bch2_dev_bkey_exists(c, ptr->dev), WRITE)))
if (unlikely(!bch2_dev_get_ioref(ca, WRITE)))
goto err_get_ioref;
/* XXX allocating memory with btree locks held - rare */