bcachefs: allocate_dropping_locks()
Add two new helpers for allocating memory with btree locks held: The idea is to first try the allocation with GFP_NOWAIT|__GFP_NOWARN, then if that fails - unlock, retry with GFP_KERNEL, and then call trans_relock(). Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
3ebfc8fe95
commit
d95dd378c2
@ -859,6 +859,32 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
|||||||
bch2_trans_unlock(_trans); \
|
bch2_trans_unlock(_trans); \
|
||||||
_do ?: bch2_trans_relock(_trans); \
|
_do ?: bch2_trans_relock(_trans); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
#define allocate_dropping_locks_errcode(_trans, _do) \
|
||||||
|
({ \
|
||||||
|
gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
|
||||||
|
int _ret = _do; \
|
||||||
|
\
|
||||||
|
if (bch2_err_matches(_ret, ENOMEM)) { \
|
||||||
|
_gfp = GFP_KERNEL; \
|
||||||
|
_ret = drop_locks_do(trans, _do); \
|
||||||
|
} \
|
||||||
|
_ret; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define allocate_dropping_locks(_trans, _ret, _do) \
|
||||||
|
({ \
|
||||||
|
gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
|
||||||
|
typeof(_do) _p = _do; \
|
||||||
|
\
|
||||||
|
_ret = 0; \
|
||||||
|
if (unlikely(!_p)) { \
|
||||||
|
_gfp = GFP_KERNEL; \
|
||||||
|
_ret = drop_locks_do(trans, ((_p = _do), 0)); \
|
||||||
|
} \
|
||||||
|
_p; \
|
||||||
|
})
|
||||||
|
|
||||||
/* new multiple iterator interface: */
|
/* new multiple iterator interface: */
|
||||||
|
|
||||||
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
|
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
|
||||||
|
@ -264,15 +264,8 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
|
|||||||
return ck;
|
return ck;
|
||||||
}
|
}
|
||||||
|
|
||||||
ck = kmem_cache_zalloc(bch2_key_cache, GFP_NOWAIT|__GFP_NOWARN);
|
ck = allocate_dropping_locks(trans, ret,
|
||||||
if (likely(ck))
|
kmem_cache_zalloc(bch2_key_cache, _gfp));
|
||||||
goto init;
|
|
||||||
|
|
||||||
bch2_trans_unlock(trans);
|
|
||||||
|
|
||||||
ck = kmem_cache_zalloc(bch2_key_cache, GFP_KERNEL);
|
|
||||||
|
|
||||||
ret = bch2_trans_relock(trans);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kmem_cache_free(bch2_key_cache, ck);
|
kmem_cache_free(bch2_key_cache, ck);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
@ -280,7 +273,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
|
|||||||
|
|
||||||
if (!ck)
|
if (!ck)
|
||||||
return NULL;
|
return NULL;
|
||||||
init:
|
|
||||||
INIT_LIST_HEAD(&ck->list);
|
INIT_LIST_HEAD(&ck->list);
|
||||||
bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
|
bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
|
||||||
|
|
||||||
|
@ -578,15 +578,8 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
|
|||||||
static int ec_stripe_mem_alloc(struct btree_trans *trans,
|
static int ec_stripe_mem_alloc(struct btree_trans *trans,
|
||||||
struct btree_iter *iter)
|
struct btree_iter *iter)
|
||||||
{
|
{
|
||||||
size_t idx = iter->pos.offset;
|
return allocate_dropping_locks_errcode(trans,
|
||||||
|
__ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp));
|
||||||
if (!__ec_stripe_mem_alloc(trans->c, idx, GFP_NOWAIT|__GFP_NOWARN))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
bch2_trans_unlock(trans);
|
|
||||||
|
|
||||||
return __ec_stripe_mem_alloc(trans->c, idx, GFP_KERNEL) ?:
|
|
||||||
bch2_trans_relock(trans);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user