From 33336c1805d3a03240afda0bfb8c8d20395fb1d3 Mon Sep 17 00:00:00 2001 From: Boris Burkov Date: Thu, 20 Jun 2024 10:33:10 -0700 Subject: [PATCH] btrfs: preallocate ulist memory for qgroup rsv When qgroups are enabled, during data reservation, we allocate the ulist_nodes that track the exact reserved extents with GFP_ATOMIC unconditionally. This is unnecessary, and we can follow the model already employed by the struct extent_state we preallocate in the non qgroups case, which should reduce the risk of allocation failures with GFP_ATOMIC. Add a prealloc node to struct ulist which ulist_add will grab when it is present, and try to allocate it before taking the tree lock while we can still take advantage of a less strict gfp mask. The lifetime of that node belongs to the new prealloc field, until it is used, at which point it belongs to the ulist linked list. Reviewed-by: Qu Wenruo Reviewed-by: Filipe Manana Signed-off-by: Boris Burkov Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-io-tree.c | 4 ++++ fs/btrfs/extent_io.h | 5 +++++ fs/btrfs/ulist.c | 21 ++++++++++++++++++--- fs/btrfs/ulist.h | 2 ++ 4 files changed, 29 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c index ed2cfc3d5d8a..c54c5d7a5cd5 100644 --- a/fs/btrfs/extent-io-tree.c +++ b/fs/btrfs/extent-io-tree.c @@ -4,6 +4,7 @@ #include #include "messages.h" #include "ctree.h" +#include "extent_io.h" #include "extent-io-tree.h" #include "btrfs_inode.h" @@ -1084,6 +1085,9 @@ again: */ prealloc = alloc_extent_state(mask); } + /* Optimistically preallocate the extent changeset ulist node. */ + if (changeset) + extent_changeset_prealloc(changeset, mask); spin_lock(&tree->lock); if (cached_state && *cached_state) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 96c6bbdcd5d6..8b33cfea6b75 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -215,6 +215,11 @@ static inline struct extent_changeset *extent_changeset_alloc(void) return ret; } +static inline void extent_changeset_prealloc(struct extent_changeset *changeset, gfp_t gfp_mask) +{ + ulist_prealloc(&changeset->range_changed, gfp_mask); +} + static inline void extent_changeset_release(struct extent_changeset *changeset) { if (!changeset) diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c index 183863f4bfa4..fc59b57257d6 100644 --- a/fs/btrfs/ulist.c +++ b/fs/btrfs/ulist.c @@ -50,6 +50,7 @@ void ulist_init(struct ulist *ulist) INIT_LIST_HEAD(&ulist->nodes); ulist->root = RB_ROOT; ulist->nnodes = 0; + ulist->prealloc = NULL; } /* @@ -68,6 +69,8 @@ void ulist_release(struct ulist *ulist) list_for_each_entry_safe(node, next, &ulist->nodes, list) { kfree(node); } + kfree(ulist->prealloc); + ulist->prealloc = NULL; ulist->root = RB_ROOT; INIT_LIST_HEAD(&ulist->nodes); } @@ -105,6 +108,12 @@ struct ulist *ulist_alloc(gfp_t gfp_mask) return ulist; } +void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask) +{ + if (!ulist->prealloc) + ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask); +} + /* * Free dynamically allocated ulist. * @@ -206,9 +215,15 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, *old_aux = node->aux; return 0; } - node = kmalloc(sizeof(*node), gfp_mask); - if (!node) - return -ENOMEM; + + if (ulist->prealloc) { + node = ulist->prealloc; + ulist->prealloc = NULL; + } else { + node = kmalloc(sizeof(*node), gfp_mask); + if (!node) + return -ENOMEM; + } node->val = val; node->aux = aux; diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h index 8e200fe1a2dd..c62a372f1462 100644 --- a/fs/btrfs/ulist.h +++ b/fs/btrfs/ulist.h @@ -41,12 +41,14 @@ struct ulist { struct list_head nodes; struct rb_root root; + struct ulist_node *prealloc; }; void ulist_init(struct ulist *ulist); void ulist_release(struct ulist *ulist); void ulist_reinit(struct ulist *ulist); struct ulist *ulist_alloc(gfp_t gfp_mask); +void ulist_prealloc(struct ulist *ulist, gfp_t mask); void ulist_free(struct ulist *ulist); int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,