summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-io-tree.c
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2023-09-22 11:39:07 +0100
committerDavid Sterba <dsterba@suse.com>2023-10-12 16:44:15 +0200
commit63ffc1f7c492df977353a0d2adf01d41069aad68 (patch)
tree596b1938bfab9587339fa4c70b1edea59ee3ba64 /fs/btrfs/extent-io-tree.c
parentdf2a8e70c3c397d4780027be3151d3c3ae9d47a6 (diff)
downloadlinux-63ffc1f7c492df977353a0d2adf01d41069aad68.tar.gz
linux-63ffc1f7c492df977353a0d2adf01d41069aad68.tar.bz2
linux-63ffc1f7c492df977353a0d2adf01d41069aad68.zip
btrfs: make tree iteration in extent_io_tree_release() more efficient
Currently extent_io_tree_release() is a loop that keeps getting the first node in the io tree, using rb_first() which is a loop that gets to the leftmost node of the rbtree, and then for each node it calls rb_erase(), which often requires rebalancing the rbtree. We can make this more efficient by using rbtree_postorder_for_each_entry_safe() to free each node without having to delete it from the rbtree and without looping to get the first node. Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/extent-io-tree.c')
-rw-r--r--fs/btrfs/extent-io-tree.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index 9fb4f38347fe..f9f7cf028ffb 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -114,14 +114,15 @@ void extent_io_tree_init(struct btrfs_fs_info *fs_info,
*/
void extent_io_tree_release(struct extent_io_tree *tree)
{
- spin_lock(&tree->lock);
- while (!RB_EMPTY_ROOT(&tree->state)) {
- struct rb_node *node;
- struct extent_state *state;
+ struct rb_root root;
+ struct extent_state *state;
+ struct extent_state *tmp;
- node = rb_first(&tree->state);
- state = rb_entry(node, struct extent_state, rb_node);
- rb_erase(&state->rb_node, &tree->state);
+ spin_lock(&tree->lock);
+ root = tree->state;
+ tree->state = RB_ROOT;
+ rbtree_postorder_for_each_entry_safe(state, tmp, &root, rb_node) {
+ /* Clear node to keep free_extent_state() happy. */
RB_CLEAR_NODE(&state->rb_node);
ASSERT(!(state->state & EXTENT_LOCKED));
/*
@@ -131,9 +132,13 @@ void extent_io_tree_release(struct extent_io_tree *tree)
*/
ASSERT(!waitqueue_active(&state->wq));
free_extent_state(state);
-
cond_resched_lock(&tree->lock);
}
+ /*
+ * Should still be empty even after a reschedule, no other task should
+ * be accessing the tree anymore.
+ */
+ ASSERT(RB_EMPTY_ROOT(&tree->state));
spin_unlock(&tree->lock);
}