bcachefs: x-macroize alloc_reserve enum
This makes an array of strings available, like our other enums. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
f13fd87a39
commit
3e1547116f
@ -780,7 +780,7 @@ static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
|
||||
* Don't strand buckets on the copygc freelist until
|
||||
* after recovery is finished:
|
||||
*/
|
||||
if (i == RESERVE_MOVINGGC &&
|
||||
if (i == RESERVE_movinggc &&
|
||||
!test_bit(BCH_FS_STARTED, &c->flags))
|
||||
continue;
|
||||
|
||||
@ -941,7 +941,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
|
||||
* allocations for foreground writes must wait -
|
||||
* not -ENOSPC calculations.
|
||||
*/
|
||||
for (j = 0; j < RESERVE_NONE; j++)
|
||||
for (j = 0; j < RESERVE_none; j++)
|
||||
dev_reserve += ca->free[j].size;
|
||||
|
||||
dev_reserve += 1; /* btree write point */
|
||||
|
@ -27,6 +27,13 @@
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
const char * const bch2_alloc_reserves[] = {
|
||||
#define x(t) #t,
|
||||
BCH_ALLOC_RESERVES()
|
||||
#undef x
|
||||
NULL
|
||||
};
|
||||
|
||||
/*
|
||||
* Open buckets represent a bucket that's currently being allocated from. They
|
||||
* serve two purposes:
|
||||
@ -168,10 +175,10 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
|
||||
static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
|
||||
{
|
||||
switch (reserve) {
|
||||
case RESERVE_BTREE:
|
||||
case RESERVE_BTREE_MOVINGGC:
|
||||
case RESERVE_btree:
|
||||
case RESERVE_btree_movinggc:
|
||||
return 0;
|
||||
case RESERVE_MOVINGGC:
|
||||
case RESERVE_movinggc:
|
||||
return OPEN_BUCKETS_COUNT / 4;
|
||||
default:
|
||||
return OPEN_BUCKETS_COUNT / 2;
|
||||
@ -219,17 +226,17 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
|
||||
c->blocked_allocate_open_bucket = local_clock();
|
||||
|
||||
spin_unlock(&c->freelist_lock);
|
||||
trace_open_bucket_alloc_fail(ca, reserve);
|
||||
trace_open_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
|
||||
return ERR_PTR(-OPEN_BUCKETS_EMPTY);
|
||||
}
|
||||
|
||||
if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
|
||||
if (likely(fifo_pop(&ca->free[RESERVE_none], b)))
|
||||
goto out;
|
||||
|
||||
switch (reserve) {
|
||||
case RESERVE_BTREE_MOVINGGC:
|
||||
case RESERVE_MOVINGGC:
|
||||
if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
|
||||
case RESERVE_btree_movinggc:
|
||||
case RESERVE_movinggc:
|
||||
if (fifo_pop(&ca->free[RESERVE_movinggc], b))
|
||||
goto out;
|
||||
break;
|
||||
default:
|
||||
@ -244,7 +251,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
|
||||
|
||||
spin_unlock(&c->freelist_lock);
|
||||
|
||||
trace_bucket_alloc_fail(ca, reserve);
|
||||
trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
|
||||
return ERR_PTR(-FREELIST_EMPTY);
|
||||
out:
|
||||
verify_not_on_freelist(c, ca, b);
|
||||
@ -282,7 +289,7 @@ out:
|
||||
|
||||
bch2_wake_allocator(ca);
|
||||
|
||||
trace_bucket_alloc(ca, reserve);
|
||||
trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]);
|
||||
return ob;
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,8 @@ struct bch_dev;
|
||||
struct bch_fs;
|
||||
struct bch_devs_List;
|
||||
|
||||
extern const char * const bch2_alloc_reserves[];
|
||||
|
||||
struct dev_alloc_list {
|
||||
unsigned nr;
|
||||
u8 devs[BCH_SB_MEMBERS_MAX];
|
||||
|
@ -22,12 +22,17 @@ enum allocator_states {
|
||||
#undef x
|
||||
};
|
||||
|
||||
#define BCH_ALLOC_RESERVES() \
|
||||
x(btree_movinggc) \
|
||||
x(btree) \
|
||||
x(movinggc) \
|
||||
x(none)
|
||||
|
||||
enum alloc_reserve {
|
||||
RESERVE_BTREE_MOVINGGC = -2,
|
||||
RESERVE_BTREE = -1,
|
||||
RESERVE_MOVINGGC = 0,
|
||||
RESERVE_NONE = 1,
|
||||
RESERVE_NR = 2,
|
||||
#define x(name) RESERVE_##name,
|
||||
BCH_ALLOC_RESERVES()
|
||||
#undef x
|
||||
RESERVE_NR
|
||||
};
|
||||
|
||||
typedef FIFO(long) alloc_fifo;
|
||||
|
@ -194,10 +194,10 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
|
||||
|
||||
if (flags & BTREE_INSERT_USE_RESERVE) {
|
||||
nr_reserve = 0;
|
||||
alloc_reserve = RESERVE_BTREE_MOVINGGC;
|
||||
alloc_reserve = RESERVE_btree_movinggc;
|
||||
} else {
|
||||
nr_reserve = BTREE_NODE_RESERVE;
|
||||
alloc_reserve = RESERVE_BTREE;
|
||||
alloc_reserve = RESERVE_btree;
|
||||
}
|
||||
|
||||
mutex_lock(&c->btree_reserve_cache_lock);
|
||||
|
@ -2091,9 +2091,9 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
|
||||
sizeof(unsigned long),
|
||||
GFP_KERNEL|__GFP_ZERO))) ||
|
||||
!init_fifo(&free[RESERVE_MOVINGGC],
|
||||
!init_fifo(&free[RESERVE_movinggc],
|
||||
copygc_reserve, GFP_KERNEL) ||
|
||||
!init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
|
||||
!init_fifo(&free[RESERVE_none], reserve_none, GFP_KERNEL) ||
|
||||
!init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
|
||||
!init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
|
||||
goto err;
|
||||
|
@ -1307,8 +1307,8 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
|
||||
&nr_have_parity,
|
||||
&have_cache,
|
||||
h->copygc
|
||||
? RESERVE_MOVINGGC
|
||||
: RESERVE_NONE,
|
||||
? RESERVE_movinggc
|
||||
: RESERVE_none,
|
||||
0,
|
||||
cl);
|
||||
|
||||
@ -1336,8 +1336,8 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
|
||||
&nr_have_data,
|
||||
&have_cache,
|
||||
h->copygc
|
||||
? RESERVE_MOVINGGC
|
||||
: RESERVE_NONE,
|
||||
? RESERVE_movinggc
|
||||
: RESERVE_none,
|
||||
0,
|
||||
cl);
|
||||
|
||||
|
@ -70,7 +70,7 @@ static inline u64 *op_journal_seq(struct bch_write_op *op)
|
||||
|
||||
static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
|
||||
{
|
||||
return op->alloc_reserve == RESERVE_MOVINGGC
|
||||
return op->alloc_reserve == RESERVE_movinggc
|
||||
? op->c->copygc_wq
|
||||
: op->c->btree_update_wq;
|
||||
}
|
||||
@ -97,7 +97,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
|
||||
op->compression_type = bch2_compression_opt_to_type[opts.compression];
|
||||
op->nr_replicas = 0;
|
||||
op->nr_replicas_required = c->opts.data_replicas_required;
|
||||
op->alloc_reserve = RESERVE_NONE;
|
||||
op->alloc_reserve = RESERVE_none;
|
||||
op->incompressible = 0;
|
||||
op->open_buckets.nr = 0;
|
||||
op->devs_have.nr = 0;
|
||||
|
@ -817,7 +817,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
||||
}
|
||||
} else {
|
||||
rcu_read_lock();
|
||||
ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
|
||||
ob = bch2_bucket_alloc(c, ca, RESERVE_none,
|
||||
false, cl);
|
||||
rcu_read_unlock();
|
||||
if (IS_ERR(ob)) {
|
||||
|
@ -351,7 +351,7 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
|
||||
}
|
||||
|
||||
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) {
|
||||
m->op.alloc_reserve = RESERVE_MOVINGGC;
|
||||
m->op.alloc_reserve = RESERVE_movinggc;
|
||||
m->op.flags |= BCH_WRITE_ALLOC_NOWAIT;
|
||||
} else {
|
||||
/* XXX: this should probably be passed in */
|
||||
|
@ -30,21 +30,6 @@
|
||||
#include <linux/sort.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
/*
|
||||
* We can't use the entire copygc reserve in one iteration of copygc: we may
|
||||
* need the buckets we're freeing up to go back into the copygc reserve to make
|
||||
* forward progress, but if the copygc reserve is full they'll be available for
|
||||
* any allocation - and it's possible that in a given iteration, we free up most
|
||||
* of the buckets we're going to free before we allocate most of the buckets
|
||||
* we're going to allocate.
|
||||
*
|
||||
* If we only use half of the reserve per iteration, then in steady state we'll
|
||||
* always have room in the reserve for the buckets we're going to need in the
|
||||
* next iteration:
|
||||
*/
|
||||
#define COPYGC_BUCKETS_PER_ITER(ca) \
|
||||
((ca)->free[RESERVE_MOVINGGC].size / 2)
|
||||
|
||||
static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
|
||||
{
|
||||
const struct copygc_heap_entry *l = _l;
|
||||
@ -124,7 +109,7 @@ static bool have_copygc_reserve(struct bch_dev *ca)
|
||||
bool ret;
|
||||
|
||||
spin_lock(&ca->fs->freelist_lock);
|
||||
ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
|
||||
ret = fifo_full(&ca->free[RESERVE_movinggc]) ||
|
||||
ca->allocator_state != ALLOCATOR_running;
|
||||
spin_unlock(&ca->fs->freelist_lock);
|
||||
|
||||
@ -265,7 +250,7 @@ static int bch2_copygc(struct bch_fs *c)
|
||||
closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
|
||||
|
||||
spin_lock(&ca->fs->freelist_lock);
|
||||
sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size;
|
||||
sectors_reserved += fifo_used(&ca->free[RESERVE_movinggc]) * ca->mi.bucket_size;
|
||||
spin_unlock(&ca->fs->freelist_lock);
|
||||
}
|
||||
|
||||
@ -281,7 +266,7 @@ static int bch2_copygc(struct bch_fs *c)
|
||||
}
|
||||
|
||||
/*
|
||||
* Our btree node allocations also come out of RESERVE_MOVINGGC:
|
||||
* Our btree node allocations also come out of RESERVE_movingc:
|
||||
*/
|
||||
sectors_reserved = (sectors_reserved * 3) / 4;
|
||||
if (!sectors_reserved) {
|
||||
|
@ -758,8 +758,8 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
stats.buckets_ec,
|
||||
__dev_buckets_available(ca, stats),
|
||||
fifo_used(&ca->free_inc), ca->free_inc.size,
|
||||
fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
|
||||
fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
|
||||
fifo_used(&ca->free[RESERVE_movinggc]), ca->free[RESERVE_movinggc].size,
|
||||
fifo_used(&ca->free[RESERVE_none]), ca->free[RESERVE_none].size,
|
||||
c->freelist_wait.list.first ? "waiting" : "empty",
|
||||
OPEN_BUCKETS_COUNT - c->open_buckets_nr_free,
|
||||
ca->nr_open_buckets,
|
||||
|
@ -468,37 +468,37 @@ TRACE_EVENT(invalidate,
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(bucket_alloc,
|
||||
TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
|
||||
TP_ARGS(ca, reserve),
|
||||
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
|
||||
TP_ARGS(ca, alloc_reserve),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(dev_t, dev )
|
||||
__field(enum alloc_reserve, reserve )
|
||||
__array(char, reserve, 16 )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = ca->dev;
|
||||
__entry->reserve = reserve;
|
||||
strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
|
||||
),
|
||||
|
||||
TP_printk("%d,%d reserve %d",
|
||||
TP_printk("%d,%d reserve %s",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->reserve)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(bucket_alloc, bucket_alloc,
|
||||
TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
|
||||
TP_ARGS(ca, reserve)
|
||||
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
|
||||
TP_ARGS(ca, alloc_reserve)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
|
||||
TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
|
||||
TP_ARGS(ca, reserve)
|
||||
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
|
||||
TP_ARGS(ca, alloc_reserve)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(bucket_alloc, open_bucket_alloc_fail,
|
||||
TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
|
||||
TP_ARGS(ca, reserve)
|
||||
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
|
||||
TP_ARGS(ca, alloc_reserve)
|
||||
);
|
||||
|
||||
/* Moving IO */
|
||||
|
Loading…
Reference in New Issue
Block a user