bcachefs: Factor out btree_key_can_insert()
working on getting rid of all the reasons bch2_insert_fixup_extent() can fail/stop partway, which is needed for other refactorings. One of the reasons we could have to bail out is if we're splitting a compressed extent we might need to add to our disk reservation - but we can check that before actually starting the insert. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
a50ed7c8e8
commit
b0004d8dfa
@ -448,7 +448,6 @@ enum btree_insert_ret {
|
||||
/* extent spanned multiple leaf nodes: have to traverse to next node: */
|
||||
BTREE_INSERT_NEED_TRAVERSE,
|
||||
/* write lock held for too long */
|
||||
BTREE_INSERT_NEED_RESCHED,
|
||||
/* leaf node needs to be split */
|
||||
BTREE_INSERT_BTREE_NODE_FULL,
|
||||
BTREE_INSERT_JOURNAL_RES_FULL,
|
||||
|
@ -297,6 +297,30 @@ static inline int btree_trans_cmp(struct btree_insert_entry l,
|
||||
|
||||
/* Normal update interface: */
|
||||
|
||||
static enum btree_insert_ret
|
||||
btree_key_can_insert(struct btree_insert *trans,
|
||||
struct btree_insert_entry *insert,
|
||||
unsigned *u64s)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree *b = insert->iter->l[0].b;
|
||||
static enum btree_insert_ret ret;
|
||||
|
||||
if (unlikely(btree_node_fake(b)))
|
||||
return BTREE_INSERT_BTREE_NODE_FULL;
|
||||
|
||||
ret = !btree_node_is_extents(b)
|
||||
? BTREE_INSERT_OK
|
||||
: bch2_extent_can_insert(trans, insert, u64s);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (*u64s > bch_btree_keys_u64s_remaining(c, b))
|
||||
return BTREE_INSERT_BTREE_NODE_FULL;
|
||||
|
||||
return BTREE_INSERT_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get journal reservation, take write locks, and attempt to do btree update(s):
|
||||
*/
|
||||
@ -336,24 +360,34 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the insert will fit in the leaf node with the write lock
|
||||
* held, otherwise another thread could write the node changing the
|
||||
* amount of space available:
|
||||
*/
|
||||
u64s = 0;
|
||||
trans_for_each_entry(trans, i) {
|
||||
/* Multiple inserts might go to same leaf: */
|
||||
if (!same_leaf_as_prev(trans, i))
|
||||
u64s = 0;
|
||||
|
||||
/*
|
||||
* bch2_btree_node_insert_fits() must be called under write lock:
|
||||
* with only an intent lock, another thread can still call
|
||||
* bch2_btree_node_write(), converting an unwritten bset to a
|
||||
* written one
|
||||
*/
|
||||
u64s += i->k->k.u64s + i->extra_res;
|
||||
if (!bch2_btree_node_insert_fits(c,
|
||||
i->iter->l[0].b, u64s)) {
|
||||
switch (btree_key_can_insert(trans, i, &u64s)) {
|
||||
case BTREE_INSERT_OK:
|
||||
break;
|
||||
case BTREE_INSERT_BTREE_NODE_FULL:
|
||||
ret = -EINTR;
|
||||
*split = i->iter;
|
||||
goto out;
|
||||
case BTREE_INSERT_ENOSPC:
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
case BTREE_INSERT_NEED_GC_LOCK:
|
||||
ret = -EINTR;
|
||||
*cycle_gc_lock = true;
|
||||
goto out;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
@ -373,7 +407,6 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
|
||||
break;
|
||||
case BTREE_INSERT_JOURNAL_RES_FULL:
|
||||
case BTREE_INSERT_NEED_TRAVERSE:
|
||||
case BTREE_INSERT_NEED_RESCHED:
|
||||
ret = -EINTR;
|
||||
break;
|
||||
case BTREE_INSERT_BTREE_NODE_FULL:
|
||||
@ -383,10 +416,6 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
|
||||
case BTREE_INSERT_ENOSPC:
|
||||
ret = -ENOSPC;
|
||||
break;
|
||||
case BTREE_INSERT_NEED_GC_LOCK:
|
||||
ret = -EINTR;
|
||||
*cycle_gc_lock = true;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
@ -1113,8 +1113,6 @@ static bool bch2_extent_merge_inline(struct bch_fs *,
|
||||
struct bkey_packed *,
|
||||
bool);
|
||||
|
||||
#define MAX_LOCK_HOLD_TIME (5 * NSEC_PER_MSEC)
|
||||
|
||||
static enum btree_insert_ret
|
||||
extent_insert_should_stop(struct extent_insert_state *s)
|
||||
{
|
||||
@ -1287,23 +1285,41 @@ extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
|
||||
return __extent_insert_advance_pos(s, next_pos, k);
|
||||
}
|
||||
|
||||
static enum btree_insert_ret
|
||||
extent_insert_check_split_compressed(struct extent_insert_state *s,
|
||||
struct bkey_s_c k,
|
||||
enum bch_extent_overlap overlap)
|
||||
enum btree_insert_ret
|
||||
bch2_extent_can_insert(struct btree_insert *trans,
|
||||
struct btree_insert_entry *insert,
|
||||
unsigned *u64s)
|
||||
{
|
||||
struct bch_fs *c = s->trans->c;
|
||||
unsigned sectors;
|
||||
struct btree_iter_level *l = &insert->iter->l[0];
|
||||
struct btree_node_iter node_iter = l->iter;
|
||||
enum bch_extent_overlap overlap;
|
||||
struct bkey_packed *_k;
|
||||
struct bkey unpacked;
|
||||
struct bkey_s_c k;
|
||||
int sectors;
|
||||
|
||||
_k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
|
||||
KEY_TYPE_DISCARD);
|
||||
if (!_k)
|
||||
return BTREE_INSERT_OK;
|
||||
|
||||
k = bkey_disassemble(l->b, _k, &unpacked);
|
||||
|
||||
overlap = bch2_extent_overlap(&insert->k->k, k.k);
|
||||
|
||||
/* account for having to split existing extent: */
|
||||
if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
|
||||
*u64s += _k->u64s;
|
||||
|
||||
if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
|
||||
(sectors = bch2_extent_is_compressed(k))) {
|
||||
int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
|
||||
|
||||
if (s->trans->flags & BTREE_INSERT_NOFAIL)
|
||||
if (trans->flags & BTREE_INSERT_NOFAIL)
|
||||
flags |= BCH_DISK_RESERVATION_NOFAIL;
|
||||
|
||||
switch (bch2_disk_reservation_add(c,
|
||||
s->trans->disk_res,
|
||||
switch (bch2_disk_reservation_add(trans->c,
|
||||
trans->disk_res,
|
||||
sectors * bch2_extent_nr_dirty_ptrs(k),
|
||||
flags)) {
|
||||
case 0:
|
||||
@ -1471,10 +1487,6 @@ __bch2_delete_fixup_extent(struct extent_insert_state *s)
|
||||
|
||||
overlap = bch2_extent_overlap(&insert->k, k.k);
|
||||
|
||||
ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
ret = extent_insert_advance_pos(s, k.s_c);
|
||||
if (ret)
|
||||
break;
|
||||
@ -1550,10 +1562,6 @@ __bch2_insert_fixup_extent(struct extent_insert_state *s)
|
||||
|
||||
overlap = bch2_extent_overlap(&insert->k, k.k);
|
||||
|
||||
ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (!k.k->size)
|
||||
goto squash;
|
||||
|
||||
|
@ -63,8 +63,10 @@ int bch2_extent_pick_ptr(struct bch_fs *, struct bkey_s_c,
|
||||
struct extent_pick_ptr *);
|
||||
|
||||
enum btree_insert_ret
|
||||
bch2_insert_fixup_extent(struct btree_insert *,
|
||||
struct btree_insert_entry *);
|
||||
bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *,
|
||||
unsigned *);
|
||||
enum btree_insert_ret
|
||||
bch2_insert_fixup_extent(struct btree_insert *, struct btree_insert_entry *);
|
||||
|
||||
bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
|
||||
void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent,
|
||||
|
Loading…
x
Reference in New Issue
Block a user