summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2017-09-29 15:43:54 -0400
committerDavid Sterba <dsterba@suse.com>2017-10-30 12:28:00 +0100
commit194ab0bc21cc99c5b804040aa785e0bdcc214656 (patch)
tree7e8503565d76b2d4b84f930c09f50b369baa7bb5 /fs/btrfs
parentb00e62507ec6a9b63d9b3019795584e694085b65 (diff)
downloadlinux-194ab0bc21cc99c5b804040aa785e0bdcc214656.tar.gz
linux-194ab0bc21cc99c5b804040aa785e0bdcc214656.tar.bz2
linux-194ab0bc21cc99c5b804040aa785e0bdcc214656.zip
btrfs: breakout empty head cleanup to a helper
Move this code out to a helper function to further simplivy __btrfs_run_delayed_refs. Signed-off-by: Josef Bacik <jbacik@fb.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent-tree.c79
1 files changed, 45 insertions, 34 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e5ed90f20805..5e61e9287b0e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2605,6 +2605,43 @@ static int cleanup_extent_op(struct btrfs_trans_handle *trans,
return ret ? ret : 1;
}
+static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_delayed_ref_root *delayed_refs;
+ int ret;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+
+ ret = cleanup_extent_op(trans, fs_info, head);
+ if (ret < 0) {
+ unselect_delayed_ref_head(delayed_refs, head);
+ btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
+ return ret;
+ } else if (ret) {
+ return ret;
+ }
+
+ /*
+ * Need to drop our head ref lock and re-acquire the delayed ref lock
+ * and then re-check to make sure nobody got added.
+ */
+ spin_unlock(&head->lock);
+ spin_lock(&delayed_refs->lock);
+ spin_lock(&head->lock);
+ if (!list_empty(&head->ref_list) || head->extent_op) {
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ return 1;
+ }
+ head->node.in_tree = 0;
+ delayed_refs->num_heads--;
+ rb_erase(&head->href_node, &delayed_refs->href_root);
+ spin_unlock(&delayed_refs->lock);
+ return 0;
+}
+
/*
* Returns 0 on success or if called with an already aborted transaction.
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
@@ -2686,47 +2723,21 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
}
if (!ref) {
-
-
- /* All delayed refs have been processed, Go ahead
- * and send the head node to run_one_delayed_ref,
- * so that any accounting fixes can happen
- */
- ref = &locked_ref->node;
-
- ret = cleanup_extent_op(trans, fs_info, locked_ref);
- if (ret < 0) {
- unselect_delayed_ref_head(delayed_refs,
- locked_ref);
- btrfs_debug(fs_info,
- "run_delayed_extent_op returned %d",
- ret);
- return ret;
- } else if (ret > 0) {
+ ret = cleanup_ref_head(trans, fs_info, locked_ref);
+ if (ret > 0 ) {
/* We dropped our lock, we need to loop. */
ret = 0;
continue;
+ } else if (ret) {
+ return ret;
}
/*
- * Need to drop our head ref lock and re-acquire the
- * delayed ref lock and then re-check to make sure
- * nobody got added.
+ * All delayed refs have been processed, Go ahead and
+ * send the head node to run_one_delayed_ref, so that
+ * any accounting fixes can happen
*/
- spin_unlock(&locked_ref->lock);
- spin_lock(&delayed_refs->lock);
- spin_lock(&locked_ref->lock);
- if (!list_empty(&locked_ref->ref_list) ||
- locked_ref->extent_op) {
- spin_unlock(&locked_ref->lock);
- spin_unlock(&delayed_refs->lock);
- continue;
- }
- ref->in_tree = 0;
- delayed_refs->num_heads--;
- rb_erase(&locked_ref->href_node,
- &delayed_refs->href_root);
- spin_unlock(&delayed_refs->lock);
+ ref = &locked_ref->node;
} else {
actual_count++;
ref->in_tree = 0;