bcachefs: Kill BTREE_INSERT_USE_RESERVE
Now that we have journal watermarks and alloc watermarks unified, BTREE_INSERT_USE_RESERVE is redundant and can be deleted. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
@ -1719,7 +1719,8 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
|
||||
write:
|
||||
ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
|
||||
BCH_WATERMARK_btree|
|
||||
BTREE_INSERT_NOFAIL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -1827,7 +1828,8 @@ static int invalidate_one_bucket(struct btree_trans *trans,
|
||||
ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
|
||||
BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
|
||||
BCH_WATERMARK_btree|
|
||||
BTREE_INSERT_NOFAIL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -1766,7 +1766,11 @@ static void btree_node_write_work(struct work_struct *work)
|
||||
} else {
|
||||
ret = bch2_trans_do(c, NULL, NULL, 0,
|
||||
bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key,
|
||||
!wbio->wbio.failed.nr));
|
||||
BCH_WATERMARK_reclaim|
|
||||
BTREE_INSERT_JOURNAL_RECLAIM|
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_NOCHECK_RW,
|
||||
!wbio->wbio.failed.nr));
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
@ -650,7 +650,6 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOCHECK_RW|
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_USE_RESERVE|
|
||||
(ck->journal.seq == journal_last_seq(j)
|
||||
? BCH_WATERMARK_reclaim
|
||||
: 0)|
|
||||
|
@ -27,7 +27,6 @@ enum btree_insert_flags {
|
||||
__BTREE_INSERT_NOFAIL = BCH_WATERMARK_BITS,
|
||||
__BTREE_INSERT_NOCHECK_RW,
|
||||
__BTREE_INSERT_LAZY_RW,
|
||||
__BTREE_INSERT_USE_RESERVE,
|
||||
__BTREE_INSERT_JOURNAL_REPLAY,
|
||||
__BTREE_INSERT_JOURNAL_RECLAIM,
|
||||
__BTREE_INSERT_NOWAIT,
|
||||
@ -37,26 +36,23 @@ enum btree_insert_flags {
|
||||
};
|
||||
|
||||
/* Don't check for -ENOSPC: */
|
||||
#define BTREE_INSERT_NOFAIL (1 << __BTREE_INSERT_NOFAIL)
|
||||
#define BTREE_INSERT_NOFAIL BIT(__BTREE_INSERT_NOFAIL)
|
||||
|
||||
#define BTREE_INSERT_NOCHECK_RW (1 << __BTREE_INSERT_NOCHECK_RW)
|
||||
#define BTREE_INSERT_LAZY_RW (1 << __BTREE_INSERT_LAZY_RW)
|
||||
|
||||
/* for copygc, or when merging btree nodes */
|
||||
#define BTREE_INSERT_USE_RESERVE (1 << __BTREE_INSERT_USE_RESERVE)
|
||||
#define BTREE_INSERT_NOCHECK_RW BIT(__BTREE_INSERT_NOCHECK_RW)
|
||||
#define BTREE_INSERT_LAZY_RW BIT(__BTREE_INSERT_LAZY_RW)
|
||||
|
||||
/* Insert is for journal replay - don't get journal reservations: */
|
||||
#define BTREE_INSERT_JOURNAL_REPLAY (1 << __BTREE_INSERT_JOURNAL_REPLAY)
|
||||
#define BTREE_INSERT_JOURNAL_REPLAY BIT(__BTREE_INSERT_JOURNAL_REPLAY)
|
||||
|
||||
/* Insert is being called from journal reclaim path: */
|
||||
#define BTREE_INSERT_JOURNAL_RECLAIM (1 << __BTREE_INSERT_JOURNAL_RECLAIM)
|
||||
#define BTREE_INSERT_JOURNAL_RECLAIM BIT(__BTREE_INSERT_JOURNAL_RECLAIM)
|
||||
|
||||
/* Don't block on allocation failure (for new btree nodes: */
|
||||
#define BTREE_INSERT_NOWAIT (1 << __BTREE_INSERT_NOWAIT)
|
||||
#define BTREE_INSERT_GC_LOCK_HELD (1 << __BTREE_INSERT_GC_LOCK_HELD)
|
||||
#define BTREE_INSERT_NOWAIT BIT(__BTREE_INSERT_NOWAIT)
|
||||
#define BTREE_INSERT_GC_LOCK_HELD BIT(__BTREE_INSERT_GC_LOCK_HELD)
|
||||
|
||||
#define BCH_HASH_SET_MUST_CREATE (1 << __BCH_HASH_SET_MUST_CREATE)
|
||||
#define BCH_HASH_SET_MUST_REPLACE (1 << __BCH_HASH_SET_MUST_REPLACE)
|
||||
#define BCH_HASH_SET_MUST_CREATE BIT(__BCH_HASH_SET_MUST_CREATE)
|
||||
#define BCH_HASH_SET_MUST_REPLACE BIT(__BCH_HASH_SET_MUST_REPLACE)
|
||||
|
||||
int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *,
|
||||
unsigned, unsigned);
|
||||
@ -80,9 +76,10 @@ int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
|
||||
struct btree *, unsigned);
|
||||
void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
|
||||
int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
|
||||
struct btree *, struct bkey_i *, bool);
|
||||
int bch2_btree_node_update_key_get_iter(struct btree_trans *,
|
||||
struct btree *, struct bkey_i *, bool);
|
||||
struct btree *, struct bkey_i *,
|
||||
unsigned, bool);
|
||||
int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
|
||||
struct bkey_i *, unsigned, bool);
|
||||
|
||||
int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id,
|
||||
struct bpos, struct bpos);
|
||||
|
@ -246,18 +246,12 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
|
||||
BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
struct open_buckets ob = { .nr = 0 };
|
||||
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
|
||||
unsigned nr_reserve;
|
||||
enum bch_watermark alloc_reserve;
|
||||
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
|
||||
unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim
|
||||
? BTREE_NODE_RESERVE
|
||||
: 0;
|
||||
int ret;
|
||||
|
||||
if (flags & BTREE_INSERT_USE_RESERVE) {
|
||||
nr_reserve = 0;
|
||||
alloc_reserve = BCH_WATERMARK_btree_copygc;
|
||||
} else {
|
||||
nr_reserve = BTREE_NODE_RESERVE;
|
||||
alloc_reserve = BCH_WATERMARK_btree;
|
||||
}
|
||||
|
||||
mutex_lock(&c->btree_reserve_cache_lock);
|
||||
if (c->btree_reserve_cache_nr > nr_reserve) {
|
||||
struct btree_alloc *a =
|
||||
@ -279,7 +273,7 @@ retry:
|
||||
&devs_have,
|
||||
res->nr_replicas,
|
||||
c->opts.metadata_replicas_required,
|
||||
alloc_reserve, 0, cl, &wp);
|
||||
watermark, 0, cl, &wp);
|
||||
if (unlikely(ret))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
@ -647,11 +641,10 @@ static void btree_update_nodes_written(struct btree_update *as)
|
||||
* which may require allocations as well.
|
||||
*/
|
||||
ret = commit_do(&trans, &as->disk_res, &journal_seq,
|
||||
BCH_WATERMARK_reclaim|
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_NOCHECK_RW|
|
||||
BTREE_INSERT_USE_RESERVE|
|
||||
BTREE_INSERT_JOURNAL_RECLAIM|
|
||||
BCH_WATERMARK_reclaim,
|
||||
BTREE_INSERT_JOURNAL_RECLAIM,
|
||||
btree_update_nodes_written_trans(&trans, as));
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
@ -1049,14 +1042,24 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
|
||||
? BCH_DISK_RESERVATION_NOFAIL : 0;
|
||||
unsigned nr_nodes[2] = { 0, 0 };
|
||||
unsigned update_level = level;
|
||||
int journal_flags = flags & BCH_WATERMARK_MASK;
|
||||
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
|
||||
unsigned journal_flags = 0;
|
||||
int ret = 0;
|
||||
u32 restart_count = trans->restart_count;
|
||||
|
||||
BUG_ON(!path->should_be_locked);
|
||||
|
||||
if (watermark == BCH_WATERMARK_copygc)
|
||||
watermark = BCH_WATERMARK_btree_copygc;
|
||||
if (watermark < BCH_WATERMARK_btree)
|
||||
watermark = BCH_WATERMARK_btree;
|
||||
|
||||
flags &= ~BCH_WATERMARK_MASK;
|
||||
flags |= watermark;
|
||||
|
||||
if (flags & BTREE_INSERT_JOURNAL_RECLAIM)
|
||||
journal_flags |= JOURNAL_RES_GET_NONBLOCK;
|
||||
journal_flags |= watermark;
|
||||
|
||||
while (1) {
|
||||
nr_nodes[!!update_level] += 1 + split;
|
||||
@ -1845,9 +1848,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
|
||||
parent = btree_node_parent(path, b);
|
||||
as = bch2_btree_update_start(trans, path, level, false,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_USE_RESERVE|
|
||||
flags);
|
||||
BTREE_INSERT_NOFAIL|flags);
|
||||
ret = PTR_ERR_OR_ZERO(as);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -2127,6 +2128,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct btree *b, struct btree *new_hash,
|
||||
struct bkey_i *new_key,
|
||||
unsigned commit_flags,
|
||||
bool skip_triggers)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -2187,12 +2189,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
trans->extra_journal_entries.nr += jset_u64s(new_key->k.u64s);
|
||||
}
|
||||
|
||||
ret = bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_NOCHECK_RW|
|
||||
BTREE_INSERT_USE_RESERVE|
|
||||
BTREE_INSERT_JOURNAL_RECLAIM|
|
||||
BCH_WATERMARK_reclaim);
|
||||
ret = bch2_trans_commit(trans, NULL, NULL, commit_flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -2226,7 +2223,7 @@ err:
|
||||
|
||||
int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct btree *b, struct bkey_i *new_key,
|
||||
bool skip_triggers)
|
||||
unsigned commit_flags, bool skip_triggers)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree *new_hash = NULL;
|
||||
@ -2256,8 +2253,8 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
|
||||
}
|
||||
|
||||
path->intent_ref++;
|
||||
ret = __bch2_btree_node_update_key(trans, iter, b, new_hash,
|
||||
new_key, skip_triggers);
|
||||
ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key,
|
||||
commit_flags, skip_triggers);
|
||||
--path->intent_ref;
|
||||
|
||||
if (new_hash) {
|
||||
@ -2275,7 +2272,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
|
||||
|
||||
int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
|
||||
struct btree *b, struct bkey_i *new_key,
|
||||
bool skip_triggers)
|
||||
unsigned commit_flags, bool skip_triggers)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
int ret;
|
||||
@ -2296,7 +2293,8 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
|
||||
|
||||
BUG_ON(!btree_node_hashed(b));
|
||||
|
||||
ret = bch2_btree_node_update_key(trans, &iter, b, new_key, skip_triggers);
|
||||
ret = bch2_btree_node_update_key(trans, &iter, b, new_key,
|
||||
commit_flags, skip_triggers);
|
||||
out:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
|
@ -213,6 +213,9 @@ slowpath:
|
||||
btree_write_buffered_journal_cmp,
|
||||
NULL);
|
||||
|
||||
commit_flags &= ~BCH_WATERMARK_MASK;
|
||||
commit_flags |= BCH_WATERMARK_reclaim;
|
||||
|
||||
for (i = keys; i < keys + nr; i++) {
|
||||
if (!i->journal_seq)
|
||||
continue;
|
||||
@ -231,8 +234,7 @@ slowpath:
|
||||
ret = commit_do(trans, NULL, NULL,
|
||||
commit_flags|
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_JOURNAL_RECLAIM|
|
||||
BCH_WATERMARK_reclaim,
|
||||
BTREE_INSERT_JOURNAL_RECLAIM,
|
||||
__bch2_btree_insert(trans, i->btree, &i->k, 0));
|
||||
if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
|
||||
break;
|
||||
|
@ -458,8 +458,7 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||
m->op.compression_type =
|
||||
bch2_compression_opt_to_type[io_opts.background_compression ?:
|
||||
io_opts.compression];
|
||||
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
|
||||
m->op.watermark = BCH_WATERMARK_copygc;
|
||||
m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
|
@ -141,7 +141,7 @@ retry:
|
||||
break;
|
||||
}
|
||||
|
||||
ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, false);
|
||||
ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, 0, false);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
|
||||
ret = 0;
|
||||
continue;
|
||||
|
@ -202,7 +202,7 @@ static int bch2_copygc(struct btree_trans *trans,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct data_update_opts data_opts = {
|
||||
.btree_insert_flags = BTREE_INSERT_USE_RESERVE|BCH_WATERMARK_copygc,
|
||||
.btree_insert_flags = BCH_WATERMARK_copygc,
|
||||
};
|
||||
move_buckets buckets = { 0 };
|
||||
struct move_bucket_in_flight *f;
|
||||
|
Reference in New Issue
Block a user