summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorFilipe Manana <fdmanana@suse.com>2014-11-26 15:28:50 +0000
committerChris Mason <clm@fb.com>2014-12-02 18:19:17 -0800
commit292cbd51ecf85d73195a3e3193937fa770f6ea71 (patch)
treeef651a477536ba72b8cbdf951b823135ca5b3ee4 /fs
parent9ea24bbe17a29f937e7f48e4b15fd52e89e9d386 (diff)
downloadlinux-292cbd51ecf85d73195a3e3193937fa770f6ea71.tar.gz
linux-292cbd51ecf85d73195a3e3193937fa770f6ea71.tar.bz2
linux-292cbd51ecf85d73195a3e3193937fa770f6ea71.zip
Btrfs: fix invalid block group rbtree access after bg is removed
If we grab a block group, for example in btrfs_trim_fs(), we will be holding a reference on it but the block group can be removed after we got it (via btrfs_remove_block_group), which means it will no longer be part of the rbtree. However, btrfs_remove_block_group() was only calling rb_erase() which leaves the block group's rb_node left and right child pointers with the same content they had before calling rb_erase. This was dangerous because a call to next_block_group() would access the node's left and right child pointers (via rb_next), which can be no longer valid. Fix this by clearing a block group's node after removing it from the tree, and have next_block_group() do a tree search to get the next block group instead of using rb_next() if our block group was removed. Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent-tree.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b4e3ab115f5f..c21cd85a2e3a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3162,7 +3162,19 @@ next_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *cache)
{
struct rb_node *node;
+
spin_lock(&root->fs_info->block_group_cache_lock);
+
+ /* If our block group was removed, we need a full search. */
+ if (RB_EMPTY_NODE(&cache->cache_node)) {
+ const u64 next_bytenr = cache->key.objectid + cache->key.offset;
+
+ spin_unlock(&root->fs_info->block_group_cache_lock);
+ btrfs_put_block_group(cache);
+ cache = btrfs_lookup_first_block_group(root->fs_info,
+ next_bytenr);
+ return cache;
+ }
node = rb_next(&cache->cache_node);
btrfs_put_block_group(cache);
if (node) {
@@ -9389,6 +9401,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
&root->fs_info->block_group_cache_tree);
+ RB_CLEAR_NODE(&block_group->cache_node);
if (root->fs_info->first_logical_byte == block_group->key.objectid)
root->fs_info->first_logical_byte = (u64)-1;