bcachefs: Better inlining in core write path
Provide inline versions of some allocation functions - bch2_alloc_sectors_done_inlined() - bch2_alloc_sectors_append_ptrs_inlined() and use them in the core IO path. Also, inline bch2_extent_update_i_size_sectors() and bch2_bkey_append_ptr(). In the core write path, function call overhead matters - every function call is a jump to a new location and a potential cache miss. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
19a614d2e4
commit
393a1f6863
@ -1244,34 +1244,11 @@ struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* Append pointers to the space we just allocated to @k, and mark @sectors space
|
||||
* as allocated out of @ob
|
||||
*/
|
||||
void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
|
||||
struct bkey_i *k, unsigned sectors,
|
||||
bool cached)
|
||||
{
|
||||
struct open_bucket *ob;
|
||||
unsigned i;
|
||||
|
||||
BUG_ON(sectors > wp->sectors_free);
|
||||
wp->sectors_free -= sectors;
|
||||
wp->sectors_allocated += sectors;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
|
||||
|
||||
ptr.cached = cached ||
|
||||
(!ca->mi.durability &&
|
||||
wp->data_type == BCH_DATA_user);
|
||||
|
||||
bch2_bkey_append_ptr(k, ptr);
|
||||
|
||||
BUG_ON(sectors > ob->sectors_free);
|
||||
ob->sectors_free -= sectors;
|
||||
}
|
||||
bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1280,17 +1257,7 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
|
||||
*/
|
||||
void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
|
||||
{
|
||||
struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
|
||||
struct open_bucket *ob;
|
||||
unsigned i;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i)
|
||||
ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
|
||||
wp->ptrs = keep;
|
||||
|
||||
mutex_unlock(&wp->lock);
|
||||
|
||||
bch2_open_buckets_put(c, &ptrs);
|
||||
bch2_alloc_sectors_done_inlined(c, wp);
|
||||
}
|
||||
|
||||
static inline void writepoint_init(struct write_point *wp,
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_types.h"
|
||||
#include "extents.h"
|
||||
#include "super.h"
|
||||
|
||||
#include <linux/hash.h>
|
||||
|
||||
@ -81,6 +83,21 @@ static inline void bch2_open_buckets_put(struct bch_fs *c,
|
||||
ptrs->nr = 0;
|
||||
}
|
||||
|
||||
static inline void bch2_alloc_sectors_done_inlined(struct bch_fs *c, struct write_point *wp)
|
||||
{
|
||||
struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
|
||||
struct open_bucket *ob;
|
||||
unsigned i;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i)
|
||||
ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
|
||||
wp->ptrs = keep;
|
||||
|
||||
mutex_unlock(&wp->lock);
|
||||
|
||||
bch2_open_buckets_put(c, &ptrs);
|
||||
}
|
||||
|
||||
static inline void bch2_open_bucket_get(struct bch_fs *c,
|
||||
struct write_point *wp,
|
||||
struct open_buckets *ptrs)
|
||||
@ -149,6 +166,38 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *,
|
||||
struct write_point **);
|
||||
|
||||
struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *, struct open_bucket *);
|
||||
|
||||
/*
|
||||
* Append pointers to the space we just allocated to @k, and mark @sectors space
|
||||
* as allocated out of @ob
|
||||
*/
|
||||
static inline void
|
||||
bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
|
||||
struct bkey_i *k, unsigned sectors,
|
||||
bool cached)
|
||||
{
|
||||
struct open_bucket *ob;
|
||||
unsigned i;
|
||||
|
||||
BUG_ON(sectors > wp->sectors_free);
|
||||
wp->sectors_free -= sectors;
|
||||
wp->sectors_allocated += sectors;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
|
||||
|
||||
ptr.cached = cached ||
|
||||
(!ca->mi.durability &&
|
||||
wp->data_type == BCH_DATA_user);
|
||||
|
||||
bch2_bkey_append_ptr(k, ptr);
|
||||
|
||||
BUG_ON(sectors > ob->sectors_free);
|
||||
ob->sectors_free -= sectors;
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
|
||||
struct bkey_i *, unsigned, bool);
|
||||
void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
|
||||
|
@ -698,29 +698,6 @@ void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry
|
||||
k->k.u64s -= extent_entry_u64s(entry);
|
||||
}
|
||||
|
||||
void bch2_bkey_append_ptr(struct bkey_i *k,
|
||||
struct bch_extent_ptr ptr)
|
||||
{
|
||||
EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
|
||||
|
||||
switch (k->k.type) {
|
||||
case KEY_TYPE_btree_ptr:
|
||||
case KEY_TYPE_btree_ptr_v2:
|
||||
case KEY_TYPE_extent:
|
||||
EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
|
||||
|
||||
ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
|
||||
|
||||
memcpy((void *) &k->v + bkey_val_bytes(&k->k),
|
||||
&ptr,
|
||||
sizeof(ptr));
|
||||
k->u64s++;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __extent_entry_insert(struct bkey_i *k,
|
||||
union bch_extent_entry *dst,
|
||||
union bch_extent_entry *new)
|
||||
|
@ -581,8 +581,35 @@ unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
|
||||
unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
|
||||
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
|
||||
|
||||
void bch2_bkey_drop_device(struct bkey_s, unsigned);
|
||||
void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
|
||||
const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
|
||||
bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
|
||||
|
||||
void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
|
||||
void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
|
||||
|
||||
static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
|
||||
{
|
||||
EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
|
||||
|
||||
switch (k->k.type) {
|
||||
case KEY_TYPE_btree_ptr:
|
||||
case KEY_TYPE_btree_ptr_v2:
|
||||
case KEY_TYPE_extent:
|
||||
EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
|
||||
|
||||
ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
|
||||
|
||||
memcpy((void *) &k->v + bkey_val_bytes(&k->k),
|
||||
&ptr,
|
||||
sizeof(ptr));
|
||||
k->u64s++;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_extent_ptr_decoded_append(struct bkey_i *,
|
||||
struct extent_ptr_decoded *);
|
||||
union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
|
||||
@ -605,11 +632,6 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
void bch2_bkey_drop_device(struct bkey_s, unsigned);
|
||||
void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
|
||||
const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
|
||||
bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
|
||||
|
||||
bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
|
||||
struct bch_extent_ptr, u64);
|
||||
bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
|
||||
|
@ -832,7 +832,7 @@ static void init_append_extent(struct bch_write_op *op,
|
||||
crc.nonce)
|
||||
bch2_extent_crc_append(&e->k_i, crc);
|
||||
|
||||
bch2_alloc_sectors_append_ptrs(op->c, wp, &e->k_i, crc.compressed_size,
|
||||
bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
|
||||
op->flags & BCH_WRITE_CACHED);
|
||||
|
||||
bch2_keylist_push(&op->insert_keys);
|
||||
@ -1275,7 +1275,7 @@ again:
|
||||
bch2_open_bucket_get(c, wp, &op->open_buckets);
|
||||
ret = bch2_write_extent(op, wp, &bio);
|
||||
|
||||
bch2_alloc_sectors_done(c, wp);
|
||||
bch2_alloc_sectors_done_inlined(c, wp);
|
||||
err:
|
||||
if (ret <= 0) {
|
||||
if (!(op->flags & BCH_WRITE_SYNC)) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user