1

btrfs: preallocate ulist memory for qgroup rsv

When qgroups are enabled, during data reservation, we allocate the
ulist_nodes that track the exact reserved extents with GFP_ATOMIC
unconditionally. This is unnecessary, and we can follow the model
already employed by the struct extent_state we preallocate in the non
qgroups case, which should reduce the risk of allocation failures with
GFP_ATOMIC.

Add a prealloc node to struct ulist which ulist_add will grab when it is
present, and try to allocate it before taking the tree lock while we can
still take advantage of a less strict gfp mask. The lifetime of that
node belongs to the new prealloc field, until it is used, at which point
it belongs to the ulist linked list.

Reviewed-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Boris Burkov <boris@bur.io>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Boris Burkov 2024-06-20 10:33:10 -07:00 committed by David Sterba
parent 28cb13f29f
commit 33336c1805
4 changed files with 29 additions and 3 deletions

View File

@ -4,6 +4,7 @@
#include <trace/events/btrfs.h>
#include "messages.h"
#include "ctree.h"
#include "extent_io.h"
#include "extent-io-tree.h"
#include "btrfs_inode.h"
@ -1084,6 +1085,9 @@ again:
*/
prealloc = alloc_extent_state(mask);
}
/* Optimistically preallocate the extent changeset ulist node. */
if (changeset)
extent_changeset_prealloc(changeset, mask);
spin_lock(&tree->lock);
if (cached_state && *cached_state) {

View File

@ -215,6 +215,11 @@ static inline struct extent_changeset *extent_changeset_alloc(void)
return ret;
}
static inline void extent_changeset_prealloc(struct extent_changeset *changeset, gfp_t gfp_mask)
{
ulist_prealloc(&changeset->range_changed, gfp_mask);
}
static inline void extent_changeset_release(struct extent_changeset *changeset)
{
if (!changeset)

View File

@ -50,6 +50,7 @@ void ulist_init(struct ulist *ulist)
INIT_LIST_HEAD(&ulist->nodes);
ulist->root = RB_ROOT;
ulist->nnodes = 0;
ulist->prealloc = NULL;
}
/*
@ -68,6 +69,8 @@ void ulist_release(struct ulist *ulist)
list_for_each_entry_safe(node, next, &ulist->nodes, list) {
kfree(node);
}
kfree(ulist->prealloc);
ulist->prealloc = NULL;
ulist->root = RB_ROOT;
INIT_LIST_HEAD(&ulist->nodes);
}
@ -105,6 +108,12 @@ struct ulist *ulist_alloc(gfp_t gfp_mask)
return ulist;
}
void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask)
{
if (!ulist->prealloc)
ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask);
}
/*
* Free dynamically allocated ulist.
*
@ -206,9 +215,15 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
*old_aux = node->aux;
return 0;
}
node = kmalloc(sizeof(*node), gfp_mask);
if (!node)
return -ENOMEM;
if (ulist->prealloc) {
node = ulist->prealloc;
ulist->prealloc = NULL;
} else {
node = kmalloc(sizeof(*node), gfp_mask);
if (!node)
return -ENOMEM;
}
node->val = val;
node->aux = aux;

View File

@ -41,12 +41,14 @@ struct ulist {
struct list_head nodes;
struct rb_root root;
struct ulist_node *prealloc;
};
void ulist_init(struct ulist *ulist);
void ulist_release(struct ulist *ulist);
void ulist_reinit(struct ulist *ulist);
struct ulist *ulist_alloc(gfp_t gfp_mask);
void ulist_prealloc(struct ulist *ulist, gfp_t mask);
void ulist_free(struct ulist *ulist);
int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,